diff --git "a/5356.jsonl" "b/5356.jsonl" new file mode 100644--- /dev/null +++ "b/5356.jsonl" @@ -0,0 +1,1164 @@ +{"seq_id":"5492487587","text":"\"\"\"\nDocumentation tools cannot document the fortan shared object file\ndue. However, f2py auto-documents the entire interface.\n\nThis file processes that interface into a dummy-Python format,\n_fortran.py with all the same names and docstrings, however\nthis is a format that can be documented.\n\nfortran (python module)\n├─ fortran module (python class)\n│ ├─ subroutine (python class method)\n│ ├─ function (python class method)\n│ ├─ module variable (python class attribute)\n\"\"\"\n\n\nimport inspect\nfrom pathlib import Path\nfrom typing import Any, List, NamedTuple, Set, Union\n\nfrom process import fortran\n\n\nclass FortranModuleMember(NamedTuple):\n \"\"\"A container for data about a specific subroutine/function/module contained within a wrapped module.\n\n :name: the name of the function/subroutine/module variable this object holds data for\n :type name: str\n\n :docstring: the docstring (autogenerated by f2py) for this function/subroutine/module variable\n :type docstring: str\n \"\"\"\n\n name: str\n docstring: str\n\n\nclass FortranModuleVariable(NamedTuple):\n \"\"\"A container for data about a specific variable contained within a wrapped module.\n\n :name: the name of the function/subroutine/module variable this object holds data for\n :type name: str\n\n :docstring: the docstring (autogenerated by f2py) for this function/subroutine/module variable\n :type docstring: str\n\n :var_type: the type of this variable\n :type var_type: Any\n\n :value: the default value, if any, of this member\n :type value: Any\n \"\"\"\n\n name: str\n docstring: str\n value: Any = None\n var_type: Any = None\n module: Any = None\n\n\nclass FortranModule(NamedTuple):\n \"\"\"A container for data about a specific wrapped fortran module\n\n :name: the name of the module this object holds data for\n :type name: str\n\n :docstring: the docstring (autogenerated by f2py) for this module\n :type docstring: str\n\n :members: a list of members, ie subroutines/functions/module variables\n :type members: List[FortranModuleMember]\n \"\"\"\n\n name: str\n docstring: str\n members: List[FortranModuleMember]\n\n\ndef get_modules(ftrn) -> List[FortranModule]:\n \"\"\"Returns a list of modules in the wrapped Python-Fortran interface\n\n :param ftrn: top-level wrapped fortran interface (the name of the module output from f2py) e.g. f2py wrapping PROCESS uses `-m fortran` so this top-level interface is process.fortran\n :type ftrn: f2py-generated Python-Fortran interface\n\n :return classes: the list of modules\n :type classes: List[FortranModule]\n \"\"\"\n classes = []\n\n for name, module in inspect.getmembers(ftrn):\n if type(module) == type(fortran.main_module): # noqa: E721\n classes.append(\n FortranModule(\n name=name, docstring=module.__doc__, members=get_members(module)\n )\n )\n\n return classes\n\n\ndef get_members(\n fortran_module,\n) -> List[Union[FortranModuleMember, FortranModuleVariable]]:\n \"\"\"Returns a list of members (subroutine, function, module variables) of the module\n\n :param fortran_module: the Fortran module to get the members of\n :type fortran_module: a wrapped Fortran module\n\n :return members: a list of subroutines, functions, and variables of the fortran_module\n :type members: List[Union[FortranModuleMember, FortranModuleVariable]]\n \"\"\"\n members = []\n\n for name, member in inspect.getmembers(fortran_module):\n if name[0:2] == \"__\":\n continue\n\n if type(member) == type(fortran.main_module.inform): # noqa: E721\n docstring = member.__doc__\n if is_variable(member):\n members.append(\n FortranModuleVariable(\n name=name,\n docstring=docstring,\n value=member,\n var_type=type(member).__qualname__,\n module=type(member).__module__,\n )\n )\n else:\n members.append(FortranModuleMember(name=name, docstring=docstring))\n\n return members\n\n\ndef is_variable(member) -> bool:\n \"\"\"Checks if the member is a variable or not.\"\"\"\n # should likely be kept as a function since f2py's interface keeps changing\n return member.__doc__ is None\n\n\ndef create_module_signature(mod: FortranModule) -> str:\n \"\"\"Creates the signature for a wrapped fortran module, corresponding to one class.\n Manages the generation of import statements, variable signatures, and function signatures.\n \"\"\"\n\n docstring = f\"Abstract representation of the F2Py-generated wrapper around the {mod.name} module\"\n # f2py gives modules unhelpful docstrings, so a more accurate and concise docstring is\n # created for use in documentation\n\n functions: List[str] = [] # subroutines/functions\n variables: List[str] = [] # module variables\n imports: Set[str] = set() # import non-builtin types\n # set to avoid import duplication (at a class level)\n # duplication could still occur between classes, although\n # neither matter\n\n for i in mod.members:\n if isinstance(i, FortranModuleVariable):\n variables.append(create_variable_signature(i))\n imports.add(create_import(i))\n else:\n functions.append(create_function_signature(i))\n\n header = \"\\n\".join([i for i in imports if i]) + \"\\n\\n\"\n body = \"\\n\".join(variables) + \"\\n\\n\" + \"\\n\\n\".join(functions)\n\n string = f'{header}class {mod.name}:\\n\\t\"\"\"{docstring}\"\"\"\\n{body}'\n\n return string\n\n\ndef create_import(var: FortranModuleVariable) -> str:\n \"\"\"Creates the import statement for a var's type if not a builtin.\"\"\"\n # ignore builtins\n if var.module == \"builtins\":\n return\n\n return f\"from {var.module} import {var.var_type}\"\n\n\ndef create_variable_signature(var: FortranModuleVariable) -> str:\n \"\"\"Creates the signature (abstract code) for module variables.\n In its abstract representation, it is a class variable.\n \"\"\"\n base_string = [var.name]\n # if a type is declared, type hint as such\n if var.var_type is not None:\n base_string.append(f\": {var.var_type}\")\n # if a default is given, show as such\n if var.value is not None:\n base_string.append(f\" = {var.value}\")\n\n return \"\\t\" + \"\".join(base_string)\n\n\ndef create_function_signature(func: FortranModuleMember) -> str:\n \"\"\"Creates the signature (abstract code) for module functions/subroutines.\n In its abstract representation, it is a class method.\n \"\"\"\n # sometimes f2py puts odd characters in the docstring\n # this assert statement will show that this has happened\n # and explain a crash of the script\n assert all(ord(c) != 0 for c in func.docstring)\n\n docstring = func.docstring.replace(\"\\n\", \"\\n\\t\\t\\t\").strip(\"\\n\\t\")\n return f'\\t@classmethod\\n\\tdef {func.name}(cls, *args, **kwargs):\\n\\t\\t\"\"\"{docstring}\"\"\"\\n\\t\\tpass'\n\n\nif __name__ == \"__main__\":\n fortran_module_definitions = [\n create_module_signature(i) for i in get_modules(fortran)\n ]\n\n string = (\n '\"\"\"Abstract definitions of all wrapped modules, including automatically generated docstrings for subroutines/functions created by f2py\"\"\"\\n\\n'\n + \"\".join(fortran_module_definitions)\n )\n\n # write _fortran.py to the process package\n # so it will be autodocumented as if it\n # were a regular source file\n current_dir = Path(__file__).resolve().parent\n target_dir = current_dir / \"../process\"\n with open(target_dir / \"_fortran.py\", \"w\") as file:\n file.write(string)\n","repo_name":"ukaea/PROCESS","sub_path":"scripts/document_fortran_interface.py","file_name":"document_fortran_interface.py","file_ext":"py","file_size_in_byte":7657,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"96"} +{"seq_id":"21290717707","text":"\"\"\" https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/\n\"\"\"\nclass Solution:\n def buildTree(self, P: List[int], I: List[int]) -> Optional[TreeNode]:\n def dfs(P, I):\n if len(I)==0: return None\n val = P.pop(0)\n idx = I.index(val)\n node = TreeNode(val)\n node.left = dfs(P, I[:idx])\n node.right = dfs(P, I[idx+1:])\n return node\n \n return dfs(P, I)","repo_name":"824zzy/Leetcode","sub_path":"I_Searching/DFS/Tree/L0_105_Construc_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_name":"L0_105_Construc_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"96"} +{"seq_id":"18905416693","text":"import random\n\nnum_of_queens = 8\n\n#board = [ [0] * num_of_queens ] * num_of_queens\n#board = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]\nboard = [[0] * num_of_queens]\ni = 1\nwhile i < num_of_queens:\n\tboard.append([0] * num_of_queens)\n\ti += 1\nqueen_position = []\n\n\n\n#for line in board:\n#\tprint(line)\n\n\n# Place queens on board, one per row, one per column\n# Try randomly first\navail_row = []\navail_col = []\ni = 0\nwhile i < num_of_queens:\n\tavail_row.append(i)\n\tavail_col.append(i)\n\ti += 1\nwhile len(avail_row) > 0:\n\ta = random.randint(0, len(avail_row) - 1)\n\tb = random.randint(0, len(avail_col) - 1)\n\tqueen_position.append([avail_row[a], avail_col[b]])\n\tboard[avail_row[a]][avail_col[b]] = 1\n\tavail_row.pop(a)\n\tavail_col.pop(b)\n\nfor line in board:\n\tprint(line)\n\ni = 0\nwhile i < len(queen_position):\n\tj = i + 1\n\twhile j < len(queen_position):\n\t\ta = queen_position[i][0]\n\t\tb = queen_position[i][1]\n\t\twhile a > -1 and b > -1:\n\t\t\ta -= 1\n\t\t\tb -= 1\n\t\t\tif a == queen_position[j][0] and b == queen_position[j][1]:\n\t\t\t\tprint(\"Collision between \" + str(queen_position[i]) + \" and \" + str(queen_position[j]))\n\t\ta = queen_position[i][0]\n\t\tb = queen_position[i][1]\n\t\twhile a > -1 and b < num_of_queens + 1:\n\t\t\ta -= 1\n\t\t\tb += 1\n\t\t\tif a == queen_position[j][0] and b == queen_position[j][1]:\n\t\t\t\tprint(\"Collision between \" + str(queen_position[i]) + \" and \" + str(queen_position[j]))\n\t\ta = queen_position[i][0]\n\t\tb = queen_position[i][1]\n\t\twhile a < num_of_queens + 1 and b > -1:\n\t\t\ta += 1\n\t\t\tb -= 1\n\t\t\tif a == queen_position[j][0] and b == queen_position[j][1]:\n\t\t\t\tprint(\"Collision between \" + str(queen_position[i]) + \" and \" + str(queen_position[j]))\n\t\ta = queen_position[i][0]\n\t\tb = queen_position[i][1]\n\t\twhile a < num_of_queens + 1 and b < num_of_queens + 1:\n\t\t\ta += 1\n\t\t\tb += 1\n\t\t\tif a == queen_position[j][0] and b == queen_position[j][1]:\n\t\t\t\tprint(\"Collision between \" + str(queen_position[i]) + \" and \" + str(queen_position[j]))\n\t\tj += 1\n\ti += 1","repo_name":"jonorga/Queens-Problem","sub_path":"queens.py","file_name":"queens.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20278021525","text":"import collectd\nimport freebox_v5_status.freeboxstatus\nimport netaddr\n\n\nPLUGIN_NAME = 'freeboxv5'\nINTERVAL = 60 # seconds\n\n_fbx = None\n\ncollectd.info('Loading Python plugin:' + PLUGIN_NAME)\n\n\ndef init():\n \"\"\"\n Init of the freebox object\n \"\"\"\n global _fbx\n collectd.info('Initialization :' + PLUGIN_NAME)\n _fbx = freebox_v5_status.freeboxstatus.FreeboxStatus()\n\n\ndef configure(config):\n \"\"\"\n Read configuration options\n \"\"\"\n for node in config.children:\n key = node.key.lower()\n val = node.values[0]\n\n if key == 'interval':\n global INTERVAL\n INTERVAL = val\n else:\n collectd.info('Freeboxv5 plugin: Unknown config key \"%s\"' % key)\n\n\n\ndef read(data=None):\n \"\"\"\n Reads values from freebox and dispatches them to collectd\n \"\"\"\n\n global _fbx\n _fbx.update()\n\n CRC_up = _fbx.status['adsl']['CRC']['up']\n CRC_down = _fbx.status['adsl']['CRC']['down']\n FEC_up = _fbx.status['adsl']['FEC']['up']\n FEC_down = _fbx.status['adsl']['FEC']['down']\n HEC_up = _fbx.status['adsl']['HEC']['up']\n HEC_down = _fbx.status['adsl']['HEC']['down']\n attenuation_up = _fbx.status['adsl']['attenuation']['up']\n attenuation_down = _fbx.status['adsl']['attenuation']['down']\n sync_up = _fbx.status['adsl']['synchro_speed']['up']\n sync_down = _fbx.status['adsl']['synchro_speed']['down']\n uptime = _fbx.status['general']['uptime'].total_seconds()\n publicIP = int(netaddr.IPAddress(_fbx.status['network']['public_ip']))\n\n dispatch_value('uptime', 'uptime', (uptime,))\n dispatch_value('ATM_errors', 'CRC', (CRC_down, CRC_up))\n dispatch_value('ATM_errors', 'FEC', (FEC_down, FEC_up))\n dispatch_value('ATM_errors', 'HEC', (HEC_down, HEC_up))\n dispatch_value('attenuation', 'attenuation', (attenuation_down, attenuation_up))\n dispatch_value('sync', 'sync', (sync_down, sync_up))\n dispatch_value('IP', 'public_ip', (publicIP,))\n\n\ndef dispatch_value(val_type, type_instance, value, plugin_instance=''):\n \"\"\"\n Dispatch a value to collectd\n \"\"\"\n collectd.info('Dispatching: %s=%r' % (type_instance, value))\n val = collectd.Values()\n val.plugin = PLUGIN_NAME\n val.plugin_instance = plugin_instance\n val.type = val_type\n if len(type_instance):\n val.type_instance = type_instance\n val.values = value\n# val.interval = 10\n val.dispatch()\n\n#\n# Register our callbacks to collectd\n#\n\ncollectd.register_init(init)\ncollectd.register_config(configure)\ncollectd.register_read(read, INTERVAL)\n\n\n","repo_name":"qberdugo/collectd-freeboxv5","sub_path":"collectd-freeboxv5-plugin.py","file_name":"collectd-freeboxv5-plugin.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"40730529946","text":"#######\n# MI Flora reader\n#\n# Module to interact with MI Flora devices using basnigholt/miflora and bluepy libraries\n#######\n\n# miflora poller info at https://github.com/basnijholt/miflora\nimport logging\nfrom miflora.miflora_poller import MiFloraPoller, MI_CONDUCTIVITY, MI_MOISTURE, MI_LIGHT, MI_TEMPERATURE, MI_BATTERY\nfrom miflora import miflora_scanner\nfrom btlewrap.bluepy import BluepyBackend\n\nFIRMWARE = \"Firmware\"\nMOISTURE = \"Moisture\"\nTEMPERATURE = \"Temperature\"\nLIGHT = \"Light\"\nFERTILIZER = \"Fertilizer\"\nBATTERY = \"Battery\"\n\nDEMO_MESSAGE = \"DEMO mode is active\"\n\nlog = logging.getLogger(__name__)\nmode = ''\n\ndef connect(mac, run_mode):\n poller = None\n global mode \n mode = run_mode\n if mode != 'DEMO':\n poller = MiFloraPoller(mac, BluepyBackend)\n else: \n log.warning(DEMO_MESSAGE)\n\n return poller\n\ndef scan(run_mode):\n log.info('Looking for MiFlora devices...')\n if (run_mode != 'DEMO'):\n devices = miflora_scanner.scan(BluepyBackend, 10)\n log.info('Found {} devices:'.format(len(devices)))\n for device in devices:\n log.info(' {}'.format(device))\n else:\n log.warning(DEMO_MESSAGE)\n\ndef poll(poller):\n log.debug(\"Starting poll\")\n device_info = {}\n if (mode != 'DEMO'):\n device_info[FIRMWARE] = poller.firmware_version()\n device_info[TEMPERATURE] = poller.parameter_value(MI_TEMPERATURE)\n device_info[MOISTURE] = poller.parameter_value(MI_MOISTURE)\n device_info[LIGHT] = poller.parameter_value(MI_LIGHT)\n device_info[FERTILIZER] = poller.parameter_value(MI_CONDUCTIVITY)\n device_info[BATTERY] = poller.parameter_value(MI_BATTERY)\n else:\n log.warning(DEMO_MESSAGE)\n device_info[FIRMWARE] = 1\n device_info[TEMPERATURE] = 10\n device_info[MOISTURE] = 10\n device_info[LIGHT] = 1\n device_info[FERTILIZER] = 1\n device_info[BATTERY] = 1\n log.debug(\"Ending poll\" + str(device_info))\n return device_info\n\n","repo_name":"mfagundez/plant-manager","sub_path":"miflora_utils.py","file_name":"miflora_utils.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1990807083","text":"# 새로운 operator를 만들려고 할 때 baseOperator로부터 상속받아야 한다.\n# 모든 operator가 같은 minimum function, attributes을 공유하기 위해서 사용된다.\nfrom airflow.models import BaseOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom elasticsearch_plugin.hooks.elastic_hook import ElasticHook\n\nfrom contextlib import closing\nimport json\n\nclass PostgresToElasticOperator(BaseOperator):\n # BaseOperator에서는 *args, **kwargs 인자를 추가 해야 한다.\n def __init__(self, sql, index, \n postgres_conn_id = \"postgres_default\", \n elastic_conn_id = \"elasticsearch_default\", *args, **kwargs): \n super(PostgresToElasticOperator, self).__init__(*args, *kwargs)\n\n self.sql = sql\n self.index = index\n self.postgres_conn_id = postgres_conn_id\n self.elastic_conn_id = elastic_conn_id\n \n def execute(self, context):\n es = ElasticHook(conn_id = self.elastic_conn_id)\n pg = PostgresHook(postgres_conn_id = self.postgres_conn_id)\n with closing(pg.get_conn()) as conn:\n with closing(conn.cursor()) as cur:\n cur.itersize = 1000\n cur.execute(sql)\n for row in cur:\n doc = json.dumps(row, indent = 2)\n es.add_doc(index = self.index, doc_type = 'external', doc = doc)\n\n","repo_name":"micopes/airflow","sub_path":"plugins/elasticsearch_plugin/operators/postgres_to_elastic.py","file_name":"postgres_to_elastic.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"36259155455","text":"import irlc\nfrom irlc.ex09.rl_agent import TabularAgent, TabularQ\nfrom irlc.ex09.rl_agent import TabularAgent\nfrom qtrain import qtrain\nimport gym\nfrom irlc import main_plot\nimport matplotlib.pyplot as plt\nfrom irlc import savepdf\nfrom irlc.ex11.q_agent import QAgent, RAgent\nfrom ModSnake import *\n\n\n# Set total number of episodes\nn_episodes = 5000\n\nepsilon = 1 # Exploration rate\ndecay_epsilon = (True, 1.45, n_episodes // 100)\n\nalpha = 0.1 # Learning Rate\nbetas = [0.2]\ngammas = [0.9] # Discount Factor\n\n\nmax_runs = 10\nmax_steps = 10000000\n\ngrid_sizes = [[10,10],[15,15],[20,20]]\ndef qsnake(grid_size, gamma):\n q_exp = f\"experiments/grid{grid_size[0]}x{grid_size[0]}/q_gamma{gamma}\"\n # Make environment instance\n env = Snake_env(grid_size)\n agent = QAgent(env, gamma=gamma, epsilon=epsilon, alpha=alpha)\n\n stats, trajectories, agent = qtrain(env, agent, q_exp, num_episodes=n_episodes, max_runs=max_runs,\n return_agent=True, max_steps=max_steps, decay_epsilon=decay_epsilon)\n\n # print(stats)\n return env, agent\n\n\ndef rsnake(grid_size, beta):\n q_exp = f\"experiments/grid{grid_size[0]}x{grid_size[0]}/r_beta{beta}\"\n # Make environment instance\n env = Snake_env(grid_size)\n agent = RAgent(env, alpha=alpha, beta=beta, epsilon=epsilon)\n\n stats, trajectories, agent = qtrain(env, agent, q_exp, num_episodes=n_episodes, max_runs=max_runs,\n return_agent=True, max_steps=max_steps, decay_epsilon=decay_epsilon)\n\n return env, agent\n\n\nfor grid_size in grid_sizes:\n for _ in range(5):\n for gamma in gammas:\n qsnake(grid_size, gamma)\n for beta in betas:\n rsnake(grid_size, beta)\n\n\n# while True:\n# observation = env.reset() # Constructs an instance of the game\n# snakes_remaining = 1\n# while snakes_remaining != 0:\n# env.render()\n# action = agent.Q.get_optimal_action(observation)\n# observation, reward, done, info = env.step(action)\n# snakes_remaining = info['snakes_remaining']\n# # print('OBS: ' , observation)\n# print(observation)\n# # print('Reward: ' , reward)\n# # print('Done: ' , done)\n# # print('Info: ' , info)\n#\n# env.close()","repo_name":"natashanorsker/RL_snakes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38070892072","text":"import argparse\nimport json\nimport requests\nimport textwrap\nimport requestToken as token\n\n\n# if we're using this as a stand-alone script, run the following\nif __name__ == \"__main__\":\n # first set up the command line arguments and parse them\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"username\", type=str, help =\"API username\")\n parser.add_argument(\"password\", type=str, help=\"password of API user\")\n parser.add_argument(\"ip_address\", type=str, help=\"IP of FMC\")\n parser.description = textwrap.dedent('''\\\n... input file formatting – one name per line\n... --------------------------------\n... name,value,description,overridable,type\n ''')\n parser.add_argument(\"csvInput\", type=str,\n help=\"provide the csv of network objects \\\n to add.\")\n args = parser.parse_args()\n\n # set needed variables to generate a token\n u = args.username\n p = args.password\n ip = args.ip_address\n path = \"/api/fmc_platform/v1/auth/generatetoken\"\n header = {} # don't need to instantiate this, but doing so for clarity\n payload = [] # don't need to instantiate this, but doing so for clarity\n\n # call the token generating function and populate our header\n header = token.get_token(ip, path, u, p)\n print(header)\n # we need to update our path to account for the domain UUID as follows\n path = f\"/api/fmc_config/v1/domain/{header['DOMAIN_UUID']}/object/networks?bulk=true\"\n\n # and process the file into the payload\n with open(args.csvInput) as file:\n for netObjs in file:\n netObj = netObjs.strip().split(',')\n try: # try block in case something unexpected occurs\n netObject = f'{{\"name\": \"{netObj[0]}\",\"value\": \"{netObj[1]}\",\"overridable\": {netObj[2]},\"description\": \"{netObj[3]}\",\"type\": \"{netObj[4]}\"}}'\n payload.append(json.loads(netObject))\n\n except Exception as err:\n raise SystemExit(err)\n \n header_f = {\"accept\": \"application/json\", \"Content-Type\": \"application/json\", \"X-auth-access-token\": header['X-auth-access-token']}\n\n print(json.dumps(payload, indent=4))\n # now to POST our list of network objects\n try:\n r = requests.post(f\"https://{ip}/{path}\", headers=header_f, data=json.dumps(payload), verify=False)\n \n\n print(r.request.body)\n print(\"Headers: \" + str(r.headers) + \"\\n\")\n print(\"Text: \" + str(r.text) + \"\\n\")\n print(\"Status Code: \" + str(r.status_code))\n\n except requests.exceptions.HTTPError as errh:\n raise SystemExit(errh)\n except requests.exceptions.RequestException as err:\n raise SystemExit(err)","repo_name":"SD123456789/FMC-RESTAPI-LABS","sub_path":"bulkPostNetObjs.py","file_name":"bulkPostNetObjs.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"31519227557","text":"# -*- coding:utf-8 -*-\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask import abort\r\nimport hashlib\r\nimport xmltodict\r\nimport time\r\n\r\ntoken = \"test123\"\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return \"Hello World!\"\r\n\r\n@app.route(\"/wechat\", methods=[\"GET\",\"POST\"])\r\ndef weixin():\r\n if request.method == \"GET\": # 判断请求方式是GET请求\r\n my_signature = request.args.get('signature') # 获取携带的signature参数\r\n my_timestamp = request.args.get('timestamp') # 获取携带的timestamp参数\r\n my_nonce = request.args.get('nonce') # 获取携带的nonce参数\r\n my_echostr = request.args.get('echostr') # 获取携带的echostr参数\r\n\r\n if not all([my_signature, my_timestamp, my_nonce, my_echostr]):\r\n \tabort(400)\r\n #token = 'test123' # 一定要跟刚刚填写的token一致\r\n\r\n # 进行字典排序\r\n data = [token,my_timestamp ,my_nonce ]\r\n data.sort()\r\n\r\n # 拼接成字符串\r\n temp = ''.join(data)\r\n\r\n # 进行sha1加密\r\n mysignature = hashlib.sha1()\r\n mysignature.update(temp.encode('utf-8'))\r\n res = mysignature.hexdigest()\r\n # 加密后的字符串可与signature对比,标识该请求来源于微信\r\n if my_signature == res:\r\n return my_echostr\r\n else:\r\n \tabort(403)\r\n elif request.method == \"POST\": # 判断请求方式是POST请求\r\n \txml_str = request.data\r\n \tif not xml_str:\r\n \t\tabort(400)\r\n\r\n \t# 对xml字符串进行解析\r\n \txml_dict = xmltodict.parse(xml_str)\r\n \txml_dict = xml_dict.get(\"xml\")\r\n\r\n \t# 提取消息类型\r\n \tmsg_type = xml_dict.get(\"MsgType\")\r\n\r\n \tif msg_type == \"text\":\r\n \t\t# 表示发送的是文本消息\r\n \t\t# 构造返回值,经由微信服务器恢复给用户的消息内容\r\n \t\tresp_dict = {\r\n \t\t\t\"xml\":{\r\n \t\t\t\t\"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \t\t\t\t\"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \t\t\t\t\"CreateTime\": int(time.time()),\r\n \t\t\t\t\"MsgType\": \"text\",\r\n \t\t\t\t\"Content\": xml_dict.get(\"Content\")\r\n \t\t\t}\r\n \t\t}\r\n \telif msg_type == \"image\":\r\n resp_dict = {\r\n \"xml\":{\r\n \"ToUserName\": xml_dict.get(\"FromUserName\"),\r\n \"FromUserName\": xml_dict.get(\"ToUserName\"),\r\n \"CreateTime\": int(time.time()),\r\n \"MsgType\": \"text\",\r\n \"Content\": \"test pic\"\r\n }\r\n }\r\n\r\n \t# 将字典转换成为xml字符串\r\n \tresp_xml_str = xmltodict.unparse(resp_dict)\r\n \t# 返回消息数据给微信服务器\r\n \treturn resp_xml_str\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', port=80, debug=True)\r\n","repo_name":"0xFlag/wechatpy","sub_path":"2.复读测试.py","file_name":"2.复读测试.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18836278412","text":"import numpy as np\nimport networkx as nx\n\nG = [[0, 1, 0, 1], \n [0, 0, 0, 1], \n [0, 1, 0, 1], \n [0, 1, 0, 0]]\n\ndef showG(G, directed=True):\n g = nx.DiGraph() if directed else nx.Graph()\n n = len(G)\n for u in range(n):\n g.add_node(u)\n for v in range(n):\n if G[u][v] != 0:\n g.add_edge(u,v)\n nx.draw(g, with_labels = True)\n\nshowG(G)","repo_name":"TheRoro/Algorithms","sub_path":"Graphs/adjacency_matrix.py","file_name":"adjacency_matrix.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"71178603196","text":"import math\nfrom collections import defaultdict, namedtuple\nfrom copy import deepcopy\nfrom typing import Optional\n\nfrom . import yaku\nfrom .tile import *\n\n\ndef calc_fu(gtac: TilesAndCond) -> int:\n if yaku.qi_dui_zi(gtac) > 0:\n return 25\n if yaku.ping_he(gtac) > 0:\n if gtac.is_tsumo:\n return 20\n else:\n return 30\n\n fu = 0\n\n for g, fl in gtac.group_tiles:\n f = 0\n if yaku.group_type(g) == 1:\n f = 2\n elif yaku.group_type(g) == 2:\n f = 8\n if not fl:\n f *= 2\n if yaku.is_yao_jiu_pai(g[0]):\n f *= 2\n fu += f\n\n if yaku.is_san_yuan_pai(gtac.eye[0]):\n fu += 2\n if yaku.tile_num(gtac.eye[0]) == gtac.zi_fong:\n fu += 2\n if yaku.tile_num(gtac.eye[0]) == gtac.chang_fong:\n fu += 2\n\n if gtac.ting_cnt <= 1:\n fu += 2\n\n if gtac.is_tsumo:\n fu += 2\n\n fu = max(fu, 1)\n if gtac.is_tsumo or not gtac.is_men_qing:\n return ceil_to(20+fu, 10)\n else:\n return ceil_to(30+fu, 10)\n\n\ndef calc_fan(gtac: TilesAndCond) -> tuple[int, list[tuple[int, str]]]:\n \"\"\"\n Return (total_fan, [(fan_1,yaku_name_1), (fan_2,yaku_name_2), ...]\n\n Return (0,[]) if no yaku\n \"\"\"\n yakumans: list[tuple[int, str]] = []\n normals: list[tuple[int, str]] = []\n dora = 0\n\n for i, y in enumerate(yaku.YAKU_CHECKERS):\n f = y(gtac)\n if f <= 0:\n continue\n if yaku.YAKU_NAMES[i] == 'dora':\n dora += f\n elif yaku.YAKU_NAMES[i] in yaku.YAKUMANS:\n yakumans.append((f, yaku.YAKU_NAMES[i]))\n else:\n normals.append((f, yaku.YAKU_NAMES[i]))\n\n if len(yakumans) > 0:\n return (sum([f for f, y in yakumans]), yakumans)\n if len(normals) > 0:\n if dora > 0:\n normals.append((dora, 'dora'))\n return (sum([f for f, y in normals]), normals)\n return (0, [])\n\n\ndef calc_basic(fan: int, fu: int) -> int:\n \"\"\"fan must be > 0\"\"\"\n assert(fan > 0)\n fan_level = [5, 7, 10, 12]\n fan2points = [min(fu*(2**(fan+2)), 2000), 3000, 4000, 6000]\n\n for i, f in enumerate(fan_level):\n if fan <= f:\n return fan2points[i]\n return fan//13*8000\n\n\ndef ceil_to(x, y) -> int:\n return (((x-1)//y)+1)*y\n\n\nRonResult = namedtuple('RonResult', 'basic ron tsumo fan fu yakus')\n\n\ndef grouped_tile_points(gtac: TilesAndCond) -> RonResult:\n fan, yakus = calc_fan(gtac)\n fu = calc_fu(gtac)\n b = calc_basic(fan, fu)\n if fan > 0:\n if gtac.is_qin:\n return RonResult(\n b,\n ceil_to(b*6, 100),\n ceil_to(b*2, 100),\n fan,\n fu,\n yakus\n )\n else:\n return RonResult(\n b,\n ceil_to(b*4, 100),\n (ceil_to(b*2, 100), ceil_to(b*1, 100)),\n fan,\n fu,\n yakus\n )\n return RonResult(0, 0, 0, 0, 0, [])\n\n\ndef get_shun_from_last(ugtac: TilesAndCond) -> Optional[TilesAndCond]:\n \"\"\"\n assert ugtac.free_tiles is sorted\n \"\"\"\n ft = ugtac.free_tiles\n\n if yaku.tile_suit(ft[-1]) == yaku.HONORS:\n return None\n\n shun_pos = []\n i = 0\n for j in range(len(ft)-1, -1, -1):\n if yaku.tile_suit(ft[j]) != yaku.tile_suit(ft[-1]) or i > 2:\n break\n if yaku.tile_num(ft[j]) == int(yaku.tile_num(ft[-1]))+i:\n shun_pos.append(j)\n i += 1\n\n if len(shun_pos) == 3:\n newtac = deepcopy(ugtac)\n newtac.group_tiles.append(TileGroup([], 0))\n for i in shun_pos:\n newtac.group_tiles[-1].tiles.append(newtac.free_tiles.pop(i))\n return newtac # it should not be gc\n else:\n return None\n\n\ndef get_ke_from_last(ugtac: TilesAndCond) -> Optional[TilesAndCond]:\n ft = ugtac.free_tiles\n if (yaku.tile_suit(ft[-1]) != yaku.tile_suit(ft[-2])\n or yaku.tile_suit(ft[-2]) != yaku.tile_suit(ft[-3])):\n return None\n if (ft[-1] == ft[-2] and ft[-2] == ft[-3]):\n newtac = deepcopy(ugtac)\n newtac.group_tiles.append(TileGroup([\n newtac.free_tiles.pop(),\n newtac.free_tiles.pop(),\n newtac.free_tiles.pop()\n ], 0))\n return newtac\n return None\n\n\ndef calc_grouped_ting_cnt(ugtac: TilesAndCond) -> set[int]:\n # 對於一組分組方法,遍歷每個沒有副漏(除了暗槓)&&含有最後一張牌的組,檢查組內牌型,聽牌數取小的\n res: set[int] = set()\n for g, fl in ugtac.group_tiles:\n\n if fl or yaku.group_type(g) == 2 or ugtac.last_tile not in g:\n continue\n if yaku.group_type(g) == 0:\n # 1,2,* or *,8,9\n # x,*,x+2\n # x,x+1,*\n nums = [int(yaku.tile_num(t)) for t in g if t != ugtac.last_tile]\n if nums == [1, 2] or nums == [8, 9]:\n res.add(1)\n elif nums[0]+1 == nums[1]:\n res.add(2)\n else:\n res.add(1)\n elif yaku.group_type(g) == 1:\n res.add(2)\n elif yaku.group_type(g) == 3:\n res.add(1)\n return res\n\n\ndef group_normally(ugtac: TilesAndCond, res: list[TilesAndCond]):\n ft = ugtac.free_tiles\n ft.sort(reverse=True)\n\n def dfs(ugtac: TilesAndCond, res: list[TilesAndCond]):\n \"\"\"\n ugtac:\n\n free tiles: sort by suits, sort by number, reverse\n \"\"\"\n if len(ugtac.free_tiles) < 3:\n res.append(ugtac)\n return\n\n newtac = get_ke_from_last(ugtac)\n if newtac:\n dfs(newtac, res)\n\n newtac = get_shun_from_last(ugtac)\n if newtac:\n dfs(newtac, res)\n\n ress = set()\n eye = None\n for i in range(len(ft)-1, -1, -1):\n if (eye is None or eye.tiles[0] != ft[i]) and ft[i] == ft[i-1]:\n newtac = deepcopy(ugtac)\n eye = TileGroup([newtac.free_tiles.pop(\n i), newtac.free_tiles.pop(i-1)], 0)\n # newtac.print_content()\n ways: list[TilesAndCond] = []\n dfs(newtac, ways)\n for w in ways:\n w.group_tiles.append(eye)\n for tcnt in calc_grouped_ting_cnt(w):\n way_with_different_tcnt = deepcopy(w)\n way_with_different_tcnt.ting_cnt = tcnt\n ress.add(way_with_different_tcnt)\n res.extend(list(ress))\n\n\ndef group_qi_dui_zi(ugtac: TilesAndCond, res: list[TilesAndCond]):\n ugtac.free_tiles.sort(reverse=True)\n if len(ugtac.free_tiles) != 14:\n return\n newtac = deepcopy(ugtac)\n while len(newtac.free_tiles) > 0:\n eye = [newtac.free_tiles.pop(), newtac.free_tiles.pop()]\n if eye[0] != eye[1]:\n return\n newtac.group_tiles.append(TileGroup(eye, 0))\n res.append(newtac)\n\n\ndef group_guo_shi_wu_shuang(ugtac: TilesAndCond, res: list[TilesAndCond]):\n ugtac.free_tiles.sort(reverse=True)\n if len(ugtac.free_tiles) != 14:\n return\n # 出現 13 張牌 + 一張重複\n # 13 group\n # 是否是國士\n # 是國士:iterator sorted free tiles, put into newtac grouped\n GUO_SHI = {\n f'{DOTS}-1', f'{DOTS}-9', f'{BAMBOO}-1', f'{BAMBOO}-9', f'{CHARACTERS}-1', f'{CHARACTERS}-9',\n f'{HONORS}-{EAST}', f'{HONORS}-{SOUTH}', f'{HONORS}-{WEST}', f'{HONORS}-{NORTH}',\n f'{HONORS}-{RED}', f'{HONORS}-{GREEN}', f'{HONORS}-{WHITE}',\n }\n if set(ugtac.all_tiles) != GUO_SHI:\n return\n newtac = deepcopy(ugtac)\n while len(newtac.free_tiles) > 0:\n newtac.group_tiles.append(TileGroup([], 0))\n newtac.group_tiles[-1].tiles.append(newtac.free_tiles.pop())\n if len(newtac.free_tiles) and newtac.group_tiles[-1].tiles[0] == newtac.free_tiles[-1]:\n newtac.group_tiles[-1].tiles.append(newtac.free_tiles.pop())\n res.append(newtac)\n\n\ndef highest_point(ungrouped_14_tac: TilesAndCond) -> RonResult:\n \"\"\"\n may not in win pattern, but it's ready to win\n \"\"\"\n ways: list[TilesAndCond] = list()\n group_normally(ungrouped_14_tac, ways)\n group_qi_dui_zi(ungrouped_14_tac, ways)\n group_guo_shi_wu_shuang(ungrouped_14_tac, ways)\n\n res = RonResult(0, 0, 0, 0, 0, [])\n for gtac in ways:\n ron = grouped_tile_points(gtac)\n if ron.basic > res.basic:\n res = ron\n return res\n\n\ndef find_last_tile(ungroup_13_tac: TilesAndCond) -> tuple[RonResult, str]:\n res = RonResult(0, 0, 0, 0, 0, [])\n last_tile = ''\n for t in TILES:\n tac = deepcopy(ungroup_13_tac)\n tac.free_tiles.append(t)\n tac.last_tile = t\n ron = highest_point(tac)\n if ron.basic > res.basic:\n res = ron\n last_tile = t\n return (res, last_tile)\n\n\n# if __name__ == '__main__': # testing ouob\n# gtac = TilesAndCond()\n# gtac.free_tiles = [\n# f'{CHARACTERS}-9', f'{CHARACTERS}-9',\n# '-white', 'honors-white', 'honors-white',\n# 'honors-red', 'honors-red', 'honors-red',\n# ]\n# gtac.group_tiles = [\n# TileGroup(['b-3', 'b-4', 'b-5'], 1),\n# TileGroup(['honors-green', 'honors-green', 'honors-green'], 1)\n# ]\n# gtac.last_tile = 'honors-red'\n# gtac.is_tsumo = False\n# gtac.is_ippatsu = False\n# gtac.is_riichi = False\n# #gtac.doras['honors-north'] = 1\n# #gtac.aka_doras = []\n# gtac.chang_fong = 'east'\n# gtac.zi_fong = 'east'\n\n# gtac.free_tiles.sort(reverse=True)\n# # res = get_shun_from_last(gtac)\n# # if res:\n# # res.print_content()\n# # else:\n# # eprint('none')\n\n# res: list[TilesAndCond] = []\n# normal_grouped_way(gtac, res)\n# group_guo_shi_wu_shuang(gtac, res)\n# group_qi_dui_zi(gtac, res)\n# for r in res:\n# r._print_content()\n# print(grouped_tile_points(r))\n","repo_name":"ArutoriaWhite/mahjong_helper","sub_path":"mahjong_helper/points.py","file_name":"points.py","file_ext":"py","file_size_in_byte":9877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"4921553968","text":"import problems.abstract\nfrom copy import deepcopy\nclass NimProblem(problems.abstract.TreeProblem):\n def __init__(self, startvalue = 7, maxStarts=True):\n start = startvalue if isinstance(startvalue, list) else [startvalue]\n self.rootnode = NimNode(start, self, 1, maxStarts)\n self.relations = []\n\n\n def renderNode(self, graph, node):\n if node.result == 0:\n if node.endState():\n graph.node(str(node),str(node.sticks),fillcolor=\"red\", style=\"filled\")\n else:\n graph.node(str(node),str(node.sticks),fillcolor=\"darkorange\", style=\"filled\")\n elif node.result == 1:\n if node.endState():\n graph.node(str(node),str(node.sticks),fillcolor=\"green\", style=\"filled\")\n else:\n graph.node(str(node),str(node.sticks),fillcolor=\"lawngreen\", style=\"filled\")\n else:\n graph.node(str(node),str(node.sticks))\n\nclass NimNode(problems.abstract.AdversarialTreeNode):\n def __init__(self, sticks, base, depth, maximising=True):\n self.sticks = sticks\n self.base = base\n self.depth = depth\n self.result = None\n self.maximising = maximising\n \n def __str__(self):\n strout = str(self.depth) + \"d\"\n for i in self.sticks:\n strout += \"{}s\".format(i)\n return strout[:-1] + \"u\" + (\"\" if self.result == None else str(self.result))\n \n def __repr__(self):\n return self.__str__()\n \n def __eq__(self, other):\n return self.sticks == other.sticks and self.depth == other.depth and self.base is other.base and self.result == other.result\n \n def endState(self):\n for stick in self.sticks:\n if stick not in [1,2]:\n return False\n return True\n \n def children(self):\n childs = []\n\n for i in range(len(self.sticks)):\n for j in range(1,int((self.sticks[i]-1)/2)+1):\n sticks = deepcopy(self.sticks)\n changed = sticks.pop(i)\n sticks.append(j)\n sticks.append(changed-j)\n sticks.sort(reverse=True) # Comment out to speed up, makes easier to read\n childs.append(NimNode(sticks, self.base, self.depth, not self.maximising))\n self.base.addNode(self, childs[-1])\n return childs\n \n def setUtility(self, value):\n self.result = value\n\n def maximisingPlayer(self):\n return self.maximising\n \n def endUtility(self):\n return 0 if self.maximisingPlayer() else 1","repo_name":"mattysmith22/G51FAI","sub_path":"problems/nim.py","file_name":"nim.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42243932115","text":"#!/usr/bin/env python\r\n\"\"\"\r\nThis simple example is used for the line-by-line tutorial\r\nthat comes with pygame. It is based on a 'popular' web banner.\r\nNote there are comments here, but for the full explanation,\r\nfollow along in the tutorial.\r\n\"\"\"\r\n\r\n\r\n#Import Modules\r\nimport os, pygame\r\nfrom pygame.locals import *\r\nfrom pygame.compat import geterror\r\nimport math\r\nimport logging\r\nimport random\r\n\r\nif not pygame.font: print ('Warning, fonts disabled')\r\nif not pygame.mixer: print ('Warning, sound disabled')\r\n\r\nmain_dir = os.path.split(os.path.abspath(__file__))[0]\r\ndata_dir = os.path.join(main_dir, 'snake_res')\r\n\r\nsprite_size = 24\r\n#functions to create our resources\r\ndef load_image(name, colorkey=None, w=0, h=0):\r\n fullname = os.path.join(data_dir, name)\r\n try:\r\n image = pygame.image.load(fullname)\r\n if w!= 0 and h != 0:\r\n image = pygame.transform.scale(image, (w, h))\r\n except pygame.error:\r\n print ('Cannot load image:', fullname)\r\n raise SystemExit(str(geterror()))\r\n image = image.convert()\r\n if colorkey is not None:\r\n if colorkey is -1:\r\n colorkey = image.get_at((0,0))\r\n image.set_colorkey(colorkey, RLEACCEL)\r\n return image, image.get_rect()\r\n\r\ndef load_sound(name):\r\n class NoneSound:\r\n def play(self): pass\r\n if not pygame.mixer or not pygame.mixer.get_init():\r\n return NoneSound()\r\n fullname = os.path.join(data_dir, name)\r\n try:\r\n sound = pygame.mixer.Sound(fullname)\r\n except pygame.error:\r\n print ('Cannot load sound: %s' % fullname)\r\n raise SystemExit(str(geterror()))\r\n return sound\r\n\r\nclass Snake:\r\n def __init__(self, name, n=10, angle = 90):\r\n \"\"\"create snake at the center of the window\r\n the move the body to correct postion\"\"\"\r\n self.bodys = []\r\n self.name = name\r\n self.angle = angle\r\n self.move = False #control when the snake start move\r\n self.speed = 2\r\n\r\n screen = pygame.display.get_surface()\r\n area = screen.get_rect()\r\n x = area.x + area.w / 2 - sprite_size / 2 - (n - 1) * sprite_size * math.cos(self.angle * math.pi / 180)\r\n y = area.y + area.h / 2 - sprite_size / 2 + (n - 1) * sprite_size * math.sin(self.angle * math.pi / 180)\r\n i = 0\r\n for i in range(n, 0, -1):\r\n x2 = area.x + area.w / 2 - sprite_size / 2 - (i - 1) * sprite_size * math.cos(self.angle * math.pi / 180)\r\n y2 = area.y + area.h / 2 - sprite_size / 2 + (i - 1) * sprite_size * math.sin(self.angle * math.pi / 180)\r\n distance = math.sqrt((x - x2) * (x - x2) + (y - y2) * (y - y2))\r\n steps = int(distance / self.speed)\r\n self.make_body(i - 1, self.angle, x, y, steps, self)\r\n\r\n x2 = area.x + area.w / 2 - sprite_size / 2\r\n y2 = area.y + area.h / 2 - sprite_size / 2\r\n distance = math.sqrt((x-x2)*(x-x2)+(y-y2)*(y-y2))\r\n steps = int(distance/self.speed)\r\n for j in range(0, steps):\r\n for i in range(0, n):\r\n if j < self.bodys[i].initial_steps:\r\n if i == 0:\r\n self.bodys[i].walk()\r\n else:\r\n self.bodys[i].follow()\r\n self.move = True\r\n\r\n def make_body(self, index, angle, x, y, steps, snake):\r\n snake = SnakeSprite(x, y, angle, index, 'body1.bmp', steps, self)\r\n self.bodys.insert(0, snake)\r\n\r\n def set_direction(self, angle):\r\n \"\"\"set current head direction\"\"\"\r\n if len(self.bodys):\r\n s = self.bodys[0]\r\n s.set_direction(angle)\r\n\r\n def get_bodys(self):\r\n \"\"\"mget_bodys return spirtes object ot RenderUpdates\"\"\"\r\n return self.bodys\r\n\r\nclass SnakeSprite(pygame.sprite.Sprite):\r\n \"\"\"moves a monkey critter across the screen. it can spin the\r\n monkey when it is punched.\"\"\"\r\n def __init__(self, x, y, angle, n, img, steps, snake):\r\n pygame.sprite.Sprite.__init__(self) #call Sprite intializer\r\n self.index = n\r\n self.image, self.rect = load_image(img, -1, sprite_size, sprite_size)\r\n self.rect.left = x\r\n self.rect.top = y\r\n self.angle = angle\r\n self.speed = 2\r\n screen = pygame.display.get_surface()\r\n self.area = screen.get_rect()\r\n self.snake = snake\r\n self.turns = [] #save the x, y for next body follow\r\n self.initial_steps = steps #for move the body's correct position\r\n\r\n def update(self):\r\n \"\"\"\"walk inside the window sometimes change the angle\"\"\"\r\n if self.index == 0 and len(self.snake.bodys) > 0:\r\n if self.rect.left < self.area.left or \\\r\n self.rect.right > self.area.right or \\\r\n self.rect.top < self.area.top or \\\r\n self.rect.top + self.rect.height > self.area.top + self.area.height:\r\n angle = (self.angle + 180)%360\r\n self.snake.set_direction(angle)\r\n else:\r\n if random.randint(0, 99) < 2:\r\n angle = random.randrange(0,30, 5)\r\n self.snake.set_direction(angle + self.angle)\r\n\r\n if self.snake.move:\r\n if self.index == 0:\r\n self.walk()\r\n else:\r\n self.follow()\r\n\r\n def walk(self, save = False):\r\n x = self.rect.x + round(self.speed * math.cos(self.angle * math.pi / 180))\r\n y = self.rect.y - round(self.speed * math.sin(self.angle * math.pi / 180))\r\n self.rect.x = x\r\n self.rect.y = y\r\n\r\n if save:\r\n self.set_direction(self.angle, True)\r\n else:\r\n if self.index != len(self.snake.bodys) - 1 and len(self.snake.bodys) > 0:\r\n self.set_direction(self.angle, True)\r\n\r\n def follow(self):\r\n \"\"\"move the body, just follow the previous piece.\"\"\"\r\n if self.index < 0:\r\n return\r\n\r\n pre_sprite = self.snake.bodys[self.index - 1]\r\n direction = pre_sprite.remove_direction()\r\n\r\n if direction != None:\r\n angle = direction[0]\r\n self.angle = angle\r\n x = direction[1]\r\n y = direction[2]\r\n self.rect.x = x\r\n self.rect.y = y\r\n\r\n if self.index != len(self.snake.bodys) - 1 and len(self.snake.bodys) > 0:\r\n self.set_direction(self.angle, True)\r\n\r\n def set_direction(self, angle, save = False):\r\n \"\"\"set angle and position for next piece\"\"\"\r\n self.angle = angle\r\n if save == True:\r\n self.turns.append((angle, self.rect.x, self.rect.y))\r\n\r\n def remove_direction(self):\r\n \"\"\"remove the angle and piece when it is used\"\"\"\r\n if len(self.turns) > 0:\r\n return self.turns.pop(0)\r\n return None\r\n\r\ndef main():\r\n \"\"\"this function is called when the program starts.\r\n it initializes everything it needs, then runs in\r\n a loop until the function returns.\"\"\"\r\n logging.basicConfig(filename='snake.log',\r\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(module)s: %(message)s\",\r\n datefmt=\"%Y-%m-%d %H:%M:%S\",\r\n filemode=\"a\", level=logging.DEBUG)\r\n#Initialize Everything\r\n pygame.init()\r\n screen = pygame.display.set_mode((800, 600))\r\n pygame.display.set_caption('Snake')\r\n pygame.mouse.set_visible(0)\r\n\r\n#Create The Backgound\r\n background = pygame.Surface(screen.get_size())\r\n background = background.convert()\r\n background.fill((250, 250, 250))\r\n\r\n#Put Text On The Background, Centered\r\n if pygame.font:\r\n #font = pygame.font.Font(None, 36)\r\n #text = font.render(\"Pummel The Chimp, And Win $$$\", 1, (10, 10, 10))\r\n #textpos = text.get_rect(centerx=background.get_width()/2)\r\n #background.blit(text, textpos)\r\n pass\r\n\r\n#Display The Background\r\n screen.blit(background, (0, 0))\r\n pygame.display.flip()\r\n\r\n#Prepare Game Objects\r\n clock = pygame.time.Clock()\r\n allsprites = pygame.sprite.RenderUpdates()\r\n screen = pygame.display.get_surface()\r\n area = screen.get_rect()\r\n\r\n snake = Snake('Kuaikuai')\r\n snake_bodys = snake.get_bodys()\r\n for s in snake_bodys:\r\n allsprites.add(s)\r\n\r\n#Main Loop\r\n going = True\r\n while going:\r\n clock.tick(60)\r\n #Handle Input Events\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n going = False\r\n\r\n allsprites.update()\r\n #Draw Everything\r\n allsprites.clear(screen, background)\r\n rcs = allsprites.draw(screen)\r\n pygame.display.update(rcs)\r\n pygame.quit()\r\n#Game Over\r\n\r\n#this calls the 'main' function when this script is executed\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"bluecrest8/pysnake","sub_path":"snake5.py","file_name":"snake5.py","file_ext":"py","file_size_in_byte":8800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36748528381","text":"import re\n\nnames = {}\n\nf = open(\"names\",\"r\")\nfor x in f:\n\tg = re.match(r'^(.+)@(.+)\\.(.+)',x)\n\tif g:\n\t\tnames[g.group(2)] = names.setdefault (g.group(2),[]) + [g.group(1)]\n\nprint (names)","repo_name":"DangerousVegetable/PROGRAMMING","sub_path":"9th/PYHTHON/20.05.2020 HW/pr2.py","file_name":"pr2.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"74428864314","text":"from typing import Any, Dict, List\nfrom bfo_crawler.functions import *\nfrom bfo_crawler.excel_parser import FinStatements\nfrom bfo_crawler.downloader import download_excel_from_nalog\nfrom bfo_crawler.models.finance_model import FinanceResultsModel\n\nYEARS: List[int] = [2021, 2020]\n\n\ndef get_org_financial_details(org_info: Dict[str, Any]) -> Dict[str, Any]:\n try:\n fin = FinStatements(download_excel_from_nalog(org_info[\"bfo_id\"]))\n except Exception as e:\n raise Exception(f\"Unable to download or read xlsx file: {e}\")\n\n results = []\n\n for year in YEARS:\n try:\n results.append(\n {\n year: FinanceResultsModel(\n revenue=calculate_revenue(fin, year),\n income=calculate_income(fin, year),\n revenue_growth_yoy=calculate_revenue_growth_yoy(fin, year)\n if year is not YEARS[-1]\n else None,\n profit_margin=calculate_profit_margin(fin, year),\n ebit_margin=calculate_ebit_margin(fin, year),\n sales_margin=calculate_sales_margin(fin, year),\n gross_margin=calculate_gross_margin(fin, year),\n roe=calculate_roe(fin, year) if year is not YEARS[-1] else None,\n ).dict()\n }\n )\n except Exception as e:\n print(f\"{org_info['bfo_id']}: {e}\")\n\n org_info.update({\"results\": results})\n\n return org_info\n","repo_name":"like-a-freedom/bfo_crawler","sub_path":"process_engine.py","file_name":"process_engine.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"12359108115","text":"import tensorflow as tf\n\n# Create a variable tensor (mutable)\nx = tf.Variable([[1, 2, 3], [4, 5, 6]])\n\n# Create a new tensor with the desired values\nnew_values = tf.constant([[7, 8, 9], [10, 11, 12], [10, 11, 32]])\n\n# Use the assign method to change the value of x\nx.assign(new_values)\n\n# Print the modified tensor\nprint(x.numpy())\n","repo_name":"gunjlkiran/tenser_flow","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"72520226237","text":"import boto3\n\ndynamodb = boto3.resource('dynamodb')\n\ntable = dynamodb.Table(\"Subscription\")\n\nwith table.batch_writer() as batch:\n batch.put_item(\n Item={\n \"id\": \"1\",\n \"subscription_name\": \"Basic\",\n \"cost\": 0,\n \"storage_limit\": 1,\n }\n )\n batch.put_item(\n Item={\n \"id\": \"2\",\n \"subscription_name\": \"Pro\",\n \"cost\": 5,\n \"storage_limit\": 50,\n }\n )\n batch.put_item(\n Item={\n \"id\": \"3\",\n \"subscription_name\": \"Business\",\n \"cost\": 10,\n \"storage_limit\": None,\n \"additional_info\": \"cost per user, unlimited storage\",\n }\n )\n batch.put_item(\n Item={\n \"id\": \"4\",\n \"subscription_name\": \"Enterprise\",\n \"cost\": 0,\n \"storage_limit\": None,\n \"additional_info\": \"special conditions based on the contract\",\n }\n )\n","repo_name":"lekarus/screenshot_api","sub_path":"subscription_fixture.py","file_name":"subscription_fixture.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11573958983","text":"from tensorflow.keras import losses\nfrom tensorflow.keras.layers import Input, GRU, Dense, Concatenate, TimeDistributed, LSTM, Add\nfrom tensorflow.keras.models import Model\nfrom attention_tanh import AttentionLayerTanh\nfrom attention_base import AttentionLayerBase\nfrom tensorflow.keras import optimizers\nfrom tensorflow.python.keras import backend as K\n# from tensorflow.contrib.keras.python.keras import backend as K\nfrom tensorflow.keras import optimizers\n\ndef custom_loss(y_true, y_pred):\n return(K.categorical_crossentropy(y_true, y_pred, from_logits=True))\n\ndef define_attn_model(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize, attn_layer_type=0):\n \"\"\" Defining a NMT model \"\"\"\n\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = Input(shape=(fr_timesteps, fr_vsize), name='decoder_inputs')\n\n # Encoder LSTM\n encoder_lstm = LSTM(hidden_size, return_sequences=True, return_state=True, name='encoder_lstm')\n encoder_out, enc_state_h, enc_state_c = encoder_lstm(encoder_inputs)\n\n encoder_state = [enc_state_h, enc_state_c]\n\n #print ('K.shape', K.shape(enc_state_h), K.shape(enc_state_c))\n # Set up the decoder LSTM, using `encoder_states` as initial state.\n decoder_lstm = LSTM(hidden_size, return_sequences=True, return_state=True, name='decoder_lstm')\n decoder_out, dec_state_h, dec_state_c = decoder_lstm(decoder_inputs, initial_state=encoder_state)\n\n # Attention layer\n if attn_layer_type == 0:\n attn_layer = AttentionLayerBase(name='attention_layer')\n elif attn_layer_type == 1:\n attn_layer = AttentionLayerTanh(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # print(decoder_out.shape)\n # print(attn_out.shape)\n\n # Concat attention input and decoder LSTM output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # tile attention input and decoder outputs\n # attn_out_tiled = K.tile(attn_out, attn_states.shape[1])\n #decoder_concat_input = Add(name='addition_layer')([decoder_out, attn_out])\n # decoder_concat_input = decoder_out + attn_out\n\n # Dense layer\n dense = Dense(fr_vsize, activation='linear', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n optimizer = optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, amsgrad=False)\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer= optimizer , loss=custom_loss, metrics=['accuracy'])\n\n full_model.summary(line_length=200)\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, enc_inf_state_h, enc_inf_state_c = encoder_lstm(encoder_inf_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, enc_inf_state_h, enc_inf_state_c])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state_h = Input(batch_shape=(batch_size, hidden_size), name='decoder_init_h')\n decoder_init_state_c = Input(batch_shape=(batch_size, hidden_size), name='decoder_init_c')\n\n decoder_inf_out, decoder_inf_state_h, decoder_inf_state_c = decoder_lstm(decoder_inf_inputs, initial_state=[decoder_init_state_h, decoder_init_state_c])\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n\n #decoder_inf_concat = Add(name='addition_layer')([decoder_inf_out, attn_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n\n\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state_h, decoder_init_state_c, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state_h, decoder_inf_state_c])\n\n return full_model, encoder_model, decoder_model, attn_layer, attn_states\n\n\nif __name__ == '__main__':\n\n \"\"\" Checking nmt model for toy examples \"\"\"\n define_nmt(64, None, 20, 30, 20, 20)\n\n","repo_name":"isukrit/models_genesis_brain","sub_path":"junk.py","file_name":"junk.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"1123758119","text":"# Questao 4\r\n# A seqüência de Fibonacci é a seguinte: 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, ... Sua regra de formação é simples: \r\n# os dois primeiros elementos são 1; a partir de então, cada elemento é a soma dos dois anteriores. \r\n# Faça um algoritmo que leia um número inteiro calcule o seu número de Fibonacci. F1 = 1, F2 = 1, F3 = 2, etc.\r\n\r\nfib = [1,1]\r\ni = 0\r\nnum = int(input(\"Digite um numero: \"))\r\n\r\nwhile num > len(fib):\r\n\tfib.append(fib[i] + fib[i+1])\r\n\ti+=1\r\n\r\nprint ('Fibonacci(%d): %d' %(num,fib[num-1]))\r\n","repo_name":"thiribeiro/Python-para-Zumbis","sub_path":"Lista3_questao04.py","file_name":"Lista3_questao04.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40108453996","text":"from stellar_sdk import *\nfrom tools import *\n\nserver, base_fee = get_server()\nquest_kp = Keypair.from_secret('SC6DZ2NMWS4AW74VCHV425SDLSDSAGC3DKBPQ6RJA3GVXKEXGBZB3QPB')\n\n\nfund(quest_kp)\n\nquest_ac = server.load_account(quest_kp)\nprint(f'Current sequence number {quest_ac.sequence}')\n\ntxb = (get_txb(quest_ac)\n .append_bump_sequence_op(bump_to=quest_ac.sequence + 100) \n) \n \ntx = txb.build()\ntx.sign(quest_kp)\ndisplay_tx_results(server.submit_transaction(tx))\nprint(f'Current sequence number {quest_ac.sequence}')\n\n\nbumped_ac = Account(quest_kp.public_key,quest_ac.sequence+99)\ntxb = (get_txb(bumped_ac)\n .append_manage_data_op(\n data_name='sequence',\n data_value='bumped'\n )\n \n)\ntx = txb.build()\ntx.sign(quest_kp)\ndisplay_tx_results(server.submit_transaction(tx))\nprint(f'Current sequence number {quest_ac.sequence}')\nquest_ac = server.load_account(quest_kp)\nprint(f'Current sequence number {quest_ac.sequence}')","repo_name":"jamiels/stellar-quest","sub_path":"15-quest-3-1-bump-sequence.py","file_name":"15-quest-3-1-bump-sequence.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7037513344","text":"import numpy as np\n\nfrom lab2.task import do_embedding, get_rho_for_image\nfrom lab2.utils.in_out import read_image, write_image\nfrom utils.distortion import cut, scale, smooth, jpeg, cut_bulk, scale_bulk, smooth_bulk, jpeg_bulk, cut_and_jpeg_bulk\nfrom matplotlib import pyplot as plt\n\n\nif __name__ == '__main__':\n\n container = read_image('resources/barb.tif')\n H_zone, watermark, image = do_embedding(container)#read_image('resources/black.png')#\n\n rho = get_rho_for_image(H_zone, watermark, image)\n print(f'Original rho: {rho}')\n\n cut_images = cut_bulk(image, container, 0.2, 0.9, 0.1)\n scale_images = scale_bulk(image, 0.55, 1.45, 0.15)\n smooth_images = smooth_bulk(image, 3, 15, 2)\n jpeg_images = jpeg_bulk(image, 30, 90, 10)\n\n cut_rhos = []\n for i in range(0, cut_images.shape[0]):\n cut_rhos.append(get_rho_for_image(H_zone, watermark, cut_images[i]))\n\n plt.title('Rhos (cut)')\n x = np.arange(0.2, 1.0, 0.1)\n plt.plot(x, cut_rhos)\n plt.show()\n\n scale_rhos = []\n for i in range(0, scale_images.shape[0]):\n scale_rhos.append(get_rho_for_image(H_zone, watermark, scale_images[i]))\n\n plt.title('Rhos (scale)')\n x = np.arange(0.55, 1.5, 0.15)\n plt.plot(x, scale_rhos)\n plt.show()\n\n smooth_rhos = []\n for i in range(0, smooth_images.shape[0]):\n smooth_rhos.append(get_rho_for_image(H_zone, watermark, smooth_images[i]))\n\n plt.title('Rhos (smooth)')\n x = np.arange(3, 17, 2)\n plt.plot(x, smooth_rhos)\n plt.show()\n\n jpeg_rhos = []\n for i in range(0, jpeg_images.shape[0]):\n jpeg_rhos.append(get_rho_for_image(H_zone, watermark, jpeg_images[i]))\n\n plt.title('Rhos (jpeg)')\n x = np.arange(30, 91, 10)\n plt.plot(x, jpeg_rhos)\n plt.show()\n\n cut_and_jpeg = cut_and_jpeg_bulk(image, container, 0.2, 0.9, 0.1, 30, 90, 10, H_zone, watermark)\n a = 3\n\n\n\n\n","repo_name":"SobolevD/stegan3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6472747792","text":"import os\nfrom skimage import io, transform, img_as_ubyte\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport imageio\nfrom slider_plot import slider_plot\nfrom scipy.special import erfc\nfrom skimage import color\n\n\ndef conc_func(X, a, b, c, d):\n x, t = X\n if d < 0:\n return np.inf\n return a + (0.5 * c * (1 - erfc((x - b) / (2 * np.sqrt(d * t)))))\n\n\ndef conc_func_b(X, a, b_1, b_0, c, d):\n x, t = X\n b = b_func(t, b_1, b_0)\n if d < 0:\n return np.inf\n return a + (0.5 * c * (1 - erfc((x - b) / (2 * np.sqrt(d * t)))))\n\n\ndef conc_func_surf(X, a, b_1, b_2, c, d_1a, d_1b, d_1c, d_2a, d_4):\n x, t = X\n b = b_1 * t + b_2\n d = broadening_func(t, d_1a, d_1b, d_1c, d_2a, d_4)\n return a + (0.5 * c * (1 - erfc((x - b) / (2 * np.sqrt(d * t)))))\n\n\ndef b_func(t, b_1, b_0):\n return b_1*t + b_0\n\n\ndef broadening_func(t, d_1a, d_1b, d_1c, d_2a, d_4):\n d = d_1a * np.exp(-d_1c(t-d_1b))\n d += d_2a/t\n d += d_4\n return d\n\n\ndef broadening_func_guess(t, y):\n d_4 = np.average(y[-10:])\n d_1a = max(y) - d_4\n d_1b = list(y).index(max(y))\n d_1c = max(t)/4\n d_2a = max(y)\n d_2b = d_1b\n d_2c = max(t)/4\n d_3b = -d_2b\n d_3a = d_3b*min(y)\n return [d_1a, d_1b, d_1c, d_2a, d_2b, d_2c, d_3a, d_3b, d_4]\n\n\ndef surf_plot(data, path):\n vals = eval(open(path + \"/data.txt\", \"r\").read())\n x, y = np.mgrid[0:data.shape[0], 0:data.shape[1]]\n y = y * (.09 / (vals[\"height\"][1] - vals[\"height\"][0]))\n ax = plt.axes(projection=\"3d\")\n ax.plot_surface(x, y, data, cmap=plt.cm.viridis)\n plt.xlabel(\"Time (arb. units)\", fontsize=17)\n plt.ylabel(\"Height (m)\", fontsize=17)\n plt.tight_layout()\n plt.savefig(path + \"\\\\plots\\\\surf_plot.pdf\")\n plt.show()\n\n\ndef heatmap_plot(data, color_data, path, time):\n vals = eval(open(path + \"/data.txt\", \"r\").read())\n fig, axs = plt.subplots(2, figsize=(18, 10.125))\n x = max(time) - min(time)\n y = np.shape(data)[1] * (.09 / (vals[\"height\"][1] - vals[\"height\"][0]))\n aspect = x / (y * 3)\n extent = [min(time), max(time), 0, y]\n axs[0].imshow(np.rot90(data), aspect=aspect, extent=extent)\n axs[0].set_title(\"Gray Scale\", fontsize=21)\n axs[0].set_xlabel(\"Time (seconds)\", fontsize=17)\n axs[0].set_ylabel(\"Height (m)\", fontsize=17)\n axs[1].imshow(np.rot90(color_data), aspect=aspect, extent=extent)\n axs[1].set_title(\"Full Color\", fontsize=21)\n axs[1].set_xlabel(\"Time (seconds)\", fontsize=17)\n axs[1].set_ylabel(\"Height (m)\", fontsize=17)\n plt.tight_layout()\n plt.savefig(path + \"\\\\plots\\\\heatmap_plot.pdf\")\n plt.show()\n\n\ndef plot_coeffs(coeffs, time, path):\n fig, axes = plt.subplots(2, 2, figsize=(18, 10.125))\n\n axes[0, 0].scatter(time, [n[0] for n in coeffs], label=path)\n axes[0, 1].scatter(time, [n[1] for n in coeffs], label=path)\n axes[1, 0].scatter(time, [n[2] for n in coeffs], label=path)\n axes[1, 1].scatter(time, [n[3] for n in coeffs], label=path)\n\n axes[0, 0].set_title(\"a\", fontsize=19)\n axes[0, 0].set_xlabel(\"t (seconds)\")\n axes[0, 0].set_ylabel(\"Intensity (arb. units)\")\n\n axes[0, 1].set_title(\"b\", fontsize=19)\n axes[0, 1].set_xlabel(\"t (seconds)\")\n axes[0, 1].set_ylabel(\"Height (m)\")\n\n axes[1, 0].set_title(\"c\", fontsize=19)\n axes[1, 0].set_xlabel(\"t (seconds)\")\n axes[1, 0].set_ylabel(\"Intensity (arb. units)\")\n\n axes[1, 1].set_title(\"d\", fontsize=19)\n axes[1, 1].set_xlabel(\"t (seconds)\")\n axes[1, 1].set_ylabel(\"Arb. units\")\n plt.suptitle(\n r\"Time dependence of fitting parameters y=$a+0.5\\cdot c\\cdot$erfc$\\left(\\frac{x+b}{2\\sqrt{d\\cdot t}}\\right)$\",\n fontsize=21)\n plt.savefig(path + \"\\\\plots\\\\plot.pdf\")\n plt.show()\n\n\ndef plot_coeffs_comparison(coeffs, data, color_data, time, path):\n vals = eval(open(path + \"/data.txt\", \"r\").read())\n x = max(time) - min(time)\n y = np.shape(data)[1] * (.09 / (vals[\"height\"][1] - vals[\"height\"][0]))\n aspect = x / (y * 4)\n extent = [min(time), max(time), 0, y]\n\n fig, axes = plt.subplots(2, figsize=(18, 10.125), sharex=\"col\")\n axes[0].imshow(np.rot90(color_data), aspect=aspect, extent=extent)\n axes[0].set_title(\"Full Color\", fontsize=19)\n axes[0].set_ylabel(\"Height (m)\", fontsize=17)\n axes[1].scatter(time, [n[0] for n in coeffs])\n axes[1].set_title(\"a\", fontsize=19)\n axes[1].set_xlabel(\"t (seconds)\", fontsize=17)\n axes[1].set_ylabel(\"Intensity (arb. units)\", fontsize=17)\n plt.savefig(path + \"\\\\plots\\\\a_plot.pdf\")\n plt.close(fig)\n\n fig, axes = plt.subplots(2, figsize=(18, 10.125), sharex=\"col\")\n axes[0].imshow(np.rot90(color_data), aspect=aspect, extent=extent)\n axes[0].set_title(\"Full Color\", fontsize=19)\n axes[0].set_ylabel(\"Height (m)\", fontsize=17)\n axes[1].scatter(time, [n[1] for n in coeffs])\n axes[1].set_title(\"b\", fontsize=19)\n axes[1].set_xlabel(\"t (seconds)\", fontsize=17)\n axes[1].set_ylabel(\"Height (m)\", fontsize=17)\n plt.savefig(path + \"\\\\plots\\\\b_plot.pdf\")\n plt.close(fig)\n\n fig, axes = plt.subplots(2, figsize=(18, 10.125), sharex=\"col\")\n axes[0].imshow(np.rot90(color_data), aspect=aspect, extent=extent)\n axes[0].set_title(\"Full Color\", fontsize=19)\n axes[0].set_ylabel(\"Height (m)\", fontsize=17)\n axes[1].scatter(time, [n[2] for n in coeffs])\n axes[1].set_title(\"c\", fontsize=19)\n axes[1].set_xlabel(\"t (seconds)\", fontsize=17)\n axes[1].set_ylabel(\"Intensity (arb. units)\", fontsize=17)\n plt.savefig(path + \"\\\\plots\\\\c_plot.pdf\")\n plt.close(fig)\n\n fig, axes = plt.subplots(2, figsize=(18, 10.125), sharex=\"col\")\n axes[0].imshow(np.rot90(color_data)[:, :], aspect=aspect, extent=extent)\n axes[0].set_title(\"Full Color\", fontsize=19)\n axes[0].set_ylabel(\"Height (m)\", fontsize=17)\n axes[1].scatter(time, [n[3] for n in coeffs])\n axes[1].set_title(\"d\", fontsize=19)\n axes[1].set_xlabel(\"t (seconds)\", fontsize=17)\n axes[1].set_ylabel(\"Arb. units\", fontsize=17)\n plt.savefig(path + \"\\\\plots\\\\d_plot.pdf\")\n plt.close(fig)\n\n\ndef generate_gif(path):\n images = []\n cropped_images = []\n crop, start_time, filetype, angle, height = eval(open(path + \"/data.txt\", \"r\").read()).values()\n n = 0\n n_frames = 60\n files = os.listdir(path)\n if n_frames > len(files):\n n_frames = len([n for n in files if n.endswith(filetype)])\n for file in tqdm(sorted(files)):\n n += 1\n if file.endswith(filetype) and n % int(len(files) / n_frames) == 0:\n im = io.imread(path + \"\\\\\" + file, )\n im = transform.rotate(im, float(angle))\n im = im[crop[0]:crop[1], crop[2]:crop[3]]\n cropped_images.append(img_as_ubyte(im))\n images.append(imageio.imread(path + \"\\\\\" + file))\n imageio.mimsave(path + \"\\\\plots\\\\images_gif.gif\", images)\n imageio.mimsave(path + \"\\\\plots\\\\cropped_images_gif.gif\", cropped_images)\n\n\ndef plot_fits(data, coeffs, time, path):\n data_vals = eval(open(path + \"/data.txt\", \"r\").read())\n\n def plot_fit(plt, vals):\n n = int(vals[0])\n y = data[n]\n x = np.asarray(range(len(y))) * (.09 / (data_vals[\"height\"][1] - data_vals[\"height\"][0]))\n smooth_x = np.linspace(min(x), max(x), len(x) * 10)\n plt.plot(smooth_x, conc_func((smooth_x, np.asarray([time[n]] * len(smooth_x))), *coeffs[n]), color=\"red\",\n label=\"Fit\")\n plt.scatter(x, y, label=\"Data\")\n # plt.title(str(round(time[n], 1))+\" Second(s)\")\n plt.title(str(coeffs[n]))\n\n ranges = [{\"max\": len(time) - 1, \"min\": 0, \"step\": 1}]\n slider_plot(plot_fit, ranges)\n\n\ndef latex_plot(coeffs, time, path):\n fig, axes = plt.subplots(2, 2, figsize=(8, 4.5))\n axes[0, 0].scatter(time, [n[0] for n in coeffs])\n axes[0, 0].set_title(\"a\", fontsize=19)\n axes[0, 1].scatter(time, [n[1] for n in coeffs])\n axes[0, 1].set_title(\"b\", fontsize=19)\n axes[1, 0].scatter(time, [n[2] for n in coeffs])\n axes[1, 0].set_title(\"c\", fontsize=19)\n axes[1, 1].scatter(time, [n[3] for n in coeffs])\n axes[1, 1].set_title(\"d\", fontsize=19)\n plt.tight_layout()\n plt.savefig(path + \"\\\\plots\\\\latex_plot.pdf\")\n plt.show()\n\n\ndef combined_plot(data_sets):\n fig, axes = plt.subplots(2, 2, figsize=(18, 10.125))\n\n for data_set in data_sets:\n coeffs, time, path = data_set\n a = np.asarray([n[0] for n in coeffs])\n a = a - np.average(a[:10])\n b = np.asarray([n[1] for n in coeffs])\n b = b - np.average(b[:10])\n c = np.asarray([n[2] for n in coeffs])\n c = c - np.average(c[:10])\n d = np.asarray([n[3] for n in coeffs])\n label = path.split(\"\\\\\")[-1]\n axes[0, 0].scatter(time, a, label=label, s=1)\n axes[0, 1].scatter(time, b, label=label, s=1)\n axes[1, 0].scatter(time, c, label=label, s=1)\n axes[1, 1].scatter(time[30:], d[30:], label=label, s=1)\n\n axes[0, 0].set_title(\"a\", fontsize=19)\n axes[0, 0].set_xlabel(\"t (seconds)\")\n axes[0, 0].set_ylabel(\"Intensity (arb. units)\")\n\n axes[0, 1].set_title(\"b\", fontsize=19)\n axes[0, 1].set_xlabel(\"t (seconds)\")\n axes[0, 1].set_ylabel(\"Height (m)\")\n\n axes[1, 0].set_title(\"c\", fontsize=19)\n axes[1, 0].set_xlabel(\"t (seconds)\")\n axes[1, 0].set_ylabel(\"Intensity (arb. units)\")\n\n axes[1, 1].set_title(\"d\", fontsize=19)\n axes[1, 1].set_xlabel(\"t (seconds)\")\n axes[1, 1].set_ylabel(r\"$\\frac{m^2}{s^2}$\")\n plt.legend()\n plt.show()\n","repo_name":"JohannesByle/phys_343","sub_path":"plot_methods.py","file_name":"plot_methods.py","file_ext":"py","file_size_in_byte":9417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33443634770","text":"#!/usr/bin/python3.7\nfrom lib import timer\n\n\n\n\n\nfib = [2,8]\n\n@timer\ndef main(l):\n\twhile fib[-1] < l:\n\t\tfib.append(4*fib[-1]+fib[-2]) # aparently i noticed the pattern long ago ...\n\tprint(sum(fib[:-1]))\n\treturn sum(fib[:-1])\n# ~35µ\n\n\nmain(4_000_000)\n\n\n\n","repo_name":"kyoobey/projecteuler_solutions","sub_path":"2_even_fibonacci_numbers.py","file_name":"2_even_fibonacci_numbers.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16368910834","text":"#!/usr/bin/env python \n#-*- coding:utf-8 -*- \n\"\"\"\nThis module provides the transformation from adc to energy.\n\"\"\"\n__author__ = \"I-Huan CHIU\"\n__email__ = \"ichiu@rirc.osaka-u.ac.jp\"\n__created__ = \"2021-11-02\"\n__copyright__ = \"Copyright 2021 I-Huan CHIU\"\n__license__ = \"GPL http://www.gnu.org/licenses/gpl.html\"\n\nimport sys,os,random,math,time,ROOT\nfrom ROOT import TFile, TTree, TCut\nfrom ROOT import gROOT, AddressOf, gPad, gDirectory\nROOT.gROOT.SetBatch(1)\nimport argparse\nimport math\nsys.path.append('/Users/chiu.i-huan/Desktop/new_scientific/imageAna/macro/')\nsys.path.append('/Users/chiu.i-huan/Desktop/new_scientific/imageAna/macro/utils/')\nfrom helpers import ProgressBar\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\nROOT.gROOT.LoadMacro( __location__+'/AtlasStyle/AtlasStyle.C')\n#ROOT.SetAtlasStyle()\nnum_Det=6\n\ndef createRatioCanvas(Name = \"cs\", w = 1000, h = 800):\n cRatioCanvas = ROOT.TCanvas(Name,\"\",0,0,int(w),int(h))\n cRatioCanvas.GetFrame().SetBorderMode(0)\n cRatioCanvas.GetFrame().SetBorderSize(0)\n cRatioCanvas.SetBorderMode(0)\n cRatioCanvas.SetBorderSize(0)\n cRatioCanvas.SetFillStyle(0)\n cRatioCanvas.SetFillColor(0)\n cRatioCanvas.SetRightMargin(0.15)\n cRatioCanvas.SetWindowSize( int(w + (w-cRatioCanvas.GetWw())), int(h + (h-cRatioCanvas.GetWh())) )\n return cRatioCanvas\n\ndef getfunc(name, _down, _up):\n return ROOT.TF1(name,\"expo\",_down,_up) \n\ndef fit(args):\n fout = ROOT.TFile(args.output,\"recreate\")\n cv0 = createRatioCanvas(\"cv0\", 1200, 800)\n\n f_ba=ROOT.TFile(\"./edb_data/MUSE203305_01_001_000_ene.root\",\"read\")\n f_co=ROOT.TFile(\"./edb_data/MUSE203302_01_001_000_ene.root\",\"read\") \n f_eu=ROOT.TFile(\"./edb_data/MUSE203303_01_001_000_ene.root\",\"read\")\n t_ba=f_ba.Get(\"edbtree\")\n t_co=f_co.Get(\"edbtree\")\n t_eu=f_eu.Get(\"edbtree\")\n fout.cd()\n\n prog = ProgressBar(ntotal=num_Det*3,text=\"Fitting...\",init_t=time.time())\n nevproc=0\n if prog: prog.update(nevproc)\n p0_ba_list, p1_ba_list, time_ba_list, fuc_ba_list = [],[],[],[]\n p0_co_list, p1_co_list, time_co_list, fuc_co_list = [],[],[],[]\n p0_eu_list, p1_eu_list, time_eu_list, fuc_eu_list = [],[],[],[]\n for ich in range(num_Det):\n cv0.cd()\n t_ba.Draw(\"delta_t_det >> htemp_ba(50000,-1,1)\",\"ch == {}\".format(ich+1), \"\");nevproc+=1\n t_co.Draw(\"delta_t_det >> htemp_co(50000,-1,1)\",\"ch == {}\".format(ich+1), \"\");nevproc+=1\n t_eu.Draw(\"delta_t_det >> htemp_eu(50000,-1,1)\",\"ch == {}\".format(ich+1), \"\");nevproc+=1\n htemp_ba, htemp_co, htemp_eu = gDirectory.Get(\"htemp_ba\"), gDirectory.Get(\"htemp_co\"), gDirectory.Get(\"htemp_eu\")\n htemp_ba.SetLineColor(1)\n htemp_co.SetLineColor(1)\n htemp_eu.SetLineColor(1)\n htemp_ba.SetTitle(\"Ba Ch{0};#Delta t [s]; Counts / 40#mus\".format(ich+1))\n htemp_co.SetTitle(\"Co Ch{0};#Delta t [s]; Counts/40#mu s\".format(ich+1))\n htemp_eu.SetTitle(\"Eu Ch{0};#Delta t [s]; Counts/40#mu s\".format(ich+1))\n htemp_ba.SetName(\"htemp_ba_CH{}\".format(ich+1))\n htemp_co.SetName(\"htemp_co_CH{}\".format(ich+1))\n htemp_eu.SetName(\"htemp_eu_CH{}\".format(ich+1))\n\n gb_ba=getfunc(\"gb_ba_\"+str(ich),0,0.4)\n gb_co=getfunc(\"gb_co_\"+str(ich),0,0.02)\n gb_eu=getfunc(\"gb_eu_\"+str(ich),0,0.02)\n htemp_ba.Fit(\"gb_ba_\"+str(ich),\"QR\")\n htemp_co.Fit(\"gb_co_\"+str(ich),\"QR\")\n htemp_eu.Fit(\"gb_eu_\"+str(ich),\"QR\")\n p0_ba_list.append(gb_ba.GetParameter(0))\n p1_ba_list.append(gb_ba.GetParameter(1))\n p0_co_list.append(gb_co.GetParameter(0))\n p1_co_list.append(gb_co.GetParameter(1))\n p0_eu_list.append(gb_eu.GetParameter(0))\n p1_eu_list.append(gb_eu.GetParameter(1))\n time_ba_list.append(htemp_ba.GetEntries()/(gb_ba.GetParameter(1)*(-1)))\n time_co_list.append(htemp_co.GetEntries()/(gb_co.GetParameter(1)*(-1)))\n time_eu_list.append(htemp_eu.GetEntries()/(gb_eu.GetParameter(1)*(-1)))\n fuc_ba_list.append(gb_ba)\n fuc_co_list.append(gb_co)\n fuc_eu_list.append(gb_eu)\n\n htemp_ba.Write()\n htemp_co.Write()\n htemp_eu.Write()\n gb_ba.Write()\n gb_co.Write()\n gb_eu.Write()\n if prog: prog.update(nevproc)\n\n # === make comparison ===\n ROOT.SetAtlasStyle()\n leg = ROOT.TLegend(.65,.7,.8,.88);\n leg.SetFillColor(0);\n leg.SetLineColor(0);\n leg.SetBorderSize(0);\n cv1 = createRatioCanvas(\"cv1\", 1200, 800)\n cv2 = createRatioCanvas(\"cv2\", 1200, 800)\n cv3 = createRatioCanvas(\"cv3\", 1200, 800)\n for i in range(num_Det):\n cv1.cd()\n gPad.SetLogy(1)\n if i == 0:\n fuc_ba_list[i].SetTitle(\"Ba Ch{0};#Delta t [s]; Counts\".format(i+1))\n fuc_ba_list[i].SetLineColor(i+1);fuc_ba_list[i].Draw()\n else:\n fuc_ba_list[i].SetLineColor(i+1);fuc_ba_list[i].Draw(\"same\")\n leg.AddEntry(fuc_ba_list[i], \"CH{}\".format(i+1) , \"l\");\n if i == num_Det-1: leg.Draw(\"same\");\n for i in range(num_Det):\n cv2.cd()\n gPad.SetLogy(1)\n if i == 0:\n fuc_co_list[i].SetTitle(\"Co Ch{0};#Delta t [s]; Counts\".format(i+1))\n fuc_co_list[i].GetXaxis().SetNdivisions(5, 3, 0, False)\n fuc_co_list[i].SetLineColor(i+1);fuc_co_list[i].Draw()\n else:\n fuc_co_list[i].SetLineColor(i+1);fuc_co_list[i].Draw(\"same\")\n if i == num_Det-1: leg.Draw(\"same\");\n for i in range(num_Det):\n cv3.cd()\n gPad.SetLogy(1)\n if i == 0:\n fuc_eu_list[i].SetTitle(\"Eu Ch{0};#Delta t [s]; Counts\".format(i+1))\n fuc_eu_list[i].GetXaxis().SetNdivisions(5, 3, 0, False)\n fuc_eu_list[i].SetLineColor(i+1);fuc_eu_list[i].Draw()\n else:\n fuc_eu_list[i].SetLineColor(i+1);fuc_eu_list[i].Draw(\"same\")\n if i == num_Det-1: leg.Draw(\"same\");\n\n cv1.Write()\n cv2.Write()\n cv3.Write()\n fout.Write()\n if prog: prog.finalize()\n cv1.SaveAs(\"./outfigs/fit_ba.pdf\")\n cv2.SaveAs(\"./outfigs/fit_co.pdf\")\n cv3.SaveAs(\"./outfigs/fit_eu.pdf\")\n print(\"Ba CH1-6 => \", \" p0 : \", p0_ba_list, \" p1 : \", p1_ba_list, \" livetime : \", time_ba_list)\n print(\"Co CH1-6 => \", \" p0 : \", p0_co_list, \" p1 : \", p1_co_list, \" livetime : \", time_co_list)\n print(\"Eu CH1-6 => \", \" p0 : \", p0_eu_list, \" p1 : \", p1_eu_list, \" livetime : \", time_eu_list)\n\nif __name__ == '__main__' :\n parser = argparse.ArgumentParser(description='Process some integers.') \n parser.add_argument(\"--output\", type=str, default=\"/Users/chiu.i-huan/Desktop/geant4WS/geant4-xrayimage/macro/Ryugu_sim/outfigs/fit_output.root\", help=\"Input File Name\")\n args = parser.parse_args()\n\n fit( args )\n\n","repo_name":"IHuanChiu/geant4-xrayimage","sub_path":"macro/Ryugu_sim/find_deadtime.py","file_name":"find_deadtime.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"13019922651","text":"import fractions\n\nn = int(input(\"Ingrese el número de parejas ordenadas: \" )) - 1 # n intervals\nb = float(input(\"Último límite de integración: \" )) # Input the last limit of integration (x-axis normally)\na = float(input(\"Primer límite de integración: \" )) # Input the first limit of integration (x-axis nomalli)\ndelta_x = (b-a)/n # Delta x\ndiccionario = {} # In this dictionary would be all the couples\nResultado = 0 # Future variable\n\nfor i in range(n+1): # get all the couples\n x = input(\"Valor x de la pareja ordenada: \" ) # x axis\n try: # See if x axis is a fraction\n if not x.isnumeric():\n x = fractions.Fraction(x)\n except ValueError:\n (x + \"NO ES UN NÚMERO\")\n doubleofx = float(x) \n y = input(\"Valor y de la pareja ordenada: \" ) # y axis\n try: # see if y is a fraction\n if not y.isnumeric():\n y = fractions.Fraction(y)\n except ValueError:\n (y + \"NO ES UN NÚMERO\")\n doubleofy = float(y) \n diccionario.update({doubleofx:doubleofy}) # Dictionary get updated by x and y\n\n# sum the first and last value of the dictionary, then quit that values and the values rest multiply by 2 and sum to the final result\nResultado += diccionario.get(a)\ndiccionario.pop(a)\nResultado += diccionario.get(b)\ndiccionario.pop(b)\nfor i in diccionario.values():\n Resultado += i*2\nResultado *= delta_x/2\nprint(Resultado)\n\n\"\"\"\n⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣶⣄⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⠀⠀⠀⠀⠀⠀⢀⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣦⣄⣀⡀⣠⣾⡇⠀⠀⠀⠀\n⠀⠀⠀⠀⠀⠀⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀\n⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠿⢿⣿⣿⡇⠀⠀⠀⠀\n⠀⣶⣿⣦⣜⣿⣿⣿⡟⠻⣿⣿⣿⣿⣿⣿⣿⡿⢿⡏⣴⣺⣦⣙⣿⣷⣄⠀⠀⠀\n⠀⣯⡇⣻⣿⣿⣿⣿⣷⣾⣿⣬⣥⣭⣽⣿⣿⣧⣼⡇⣯⣇⣹⣿⣿⣿⣿⣧⠀⠀\n⠀⠹⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠸⣿⣿⣿⣿⣿⣿⣿⣷⠀\n\"\"\"\n","repo_name":"TheLudway/TRAPEZOIDAL-RULE","sub_path":"TRAPECIO.py","file_name":"TRAPECIO.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25291700062","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\n\nimport sys\nimport os\nfrom DKF import DKF\nfrom data import data_iter\nfrom config import config\nimport pdb\nimport csv \n\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\"save_path\", \"./\",\n \"base save path for the experiment\")\nflags.DEFINE_string(\"mode\",\"train\",\"mode in which model needs to run\")\n# flags.DEFINE_string(\"log_path\",None,\"Log Directory path\")\n\n\nFLAGS = flags.FLAGS\n\ndef main(_):\n model_config = config.config().dkf_model_config\n\n print(\"Configuration DKF:\")\n \n import pprint\n pprint.PrettyPrinter().pprint(model_config)\n\n dkf_config = {\n \"batch_size\" : model_config.batch_size,\n \"time_len\" : model_config.max_time_steps,\n\n \"x_size\" : model_config.output_size,\n \"u_size\" : model_config.input_size,\n \"z_size\" : model_config.latent_state_size,\n\n \"num_hidden_layers\" : model_config.num_hidden_layers,\n \"num_hidden_units\" : model_config.num_hidden_units,\n \"keep_prob\" : model_config.keep_prob,\n \n \"n_samples_term_1\" : model_config.nsamples_e1,\n \"n_samples_term_3\" : model_config.nsamples_e3,\n \"lsm_time\" : model_config.lsm_time,\n\n \"learning_rate\" : model_config.learning_rate,\n \"logfolder\": model_config.logfolder,\n \"max_grad_norm\": model_config.max_grad_norm,\n }\n\n save_path = os.path.join(FLAGS.save_path)\n # log_path = os.path.join(FLAGS.log_path)\n\n model = DKF(config=dkf_config, device=\"gpu\")\n best_saver = tf.train.Saver()\n\n with tf.Session() as session:\n\n session.run(tf.global_variables_initializer())\n\n if FLAGS.mode == \"test\":\n iterator_test = data_iter.SSIterator(model_config,mode = \"test\")\n best_saver.restore(\n sess=session,\n save_path=os.path.join(save_path, \"best_model_dkf.ckpt\"))\n final_outputs = model.test(session, reader = iterator_test)\n with open(\"output.csv\", \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(final_outputs)\n\n\n else:\n if model_config.load_mode == \"best\":\n best_saver.restore(\n sess=session,\n save_path=os.path.join(save_path, \"best_model_dkf.ckpt\"))\n\n i, patience = 0, 0\n best_valid_metric = 1e16\n\n \n while patience < model_config.patience:\n i += 1\n\n iterator_train = data_iter.SSIterator(model_config, mode = \"train\")\n iterator_valid = data_iter.SSIterator(model_config, mode=\"valid\")\n\n print(\"\\nEpoch: %d\" % (i))\n model.run_epoch(session, reader=iterator_train, verbose=True)\n\n print(\"Evaluating\")\n valid_rms = model.run_test(session, reader=iterator_valid)\n\n print(\"RMS: \", valid_rms)\n\n if valid_rms < best_valid_metric:\n best_valid_metric = valid_rms\n\n print(\"\\nsaving best model...\")\n best_saver.save(sess=session, save_path=os.path.join(save_path, \"best_model_dkf.ckpt\"))\n patience = 0\n else:\n patience += 1\n print(\"\\nLosing patience...\")\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"chitwansaharia/AMLProject","sub_path":"train_dkf.py","file_name":"train_dkf.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39302938924","text":"# Write a program that converts a decimal height in feet into feet and inches. For instance, an\n# input of 4.75 feet should become 4 feet, 9 inches.\n\nuser_input = input('provide a length in feet(must be a decimal): ')\ndot_index = user_input.index('.')\nfirst_integer = user_input[0:dot_index]\n# print(first_integer)\ndecimals = float(user_input) - int(first_integer)\n# print(decimals)\ninched = decimals * 12\nprint(f'{int(first_integer)} feet, {int(inched)} inches')\n\n# provide a length in feet(must be a decimal): 4.75\n# 4 feet, 9 inches\n# provide a length in feet(must be a decimal): 361.592\n# 361 feet, 7 inches\n# provide a length in feet(must be a decimal): 18.048\n# 18 feet, 0 inches","repo_name":"shinaeli/My-Python-Codes","sub_path":"A Practical Introduction To Python Programming/Exercise 10.9/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40446786052","text":"\"\"\"\nImagine a robot standing at postion(0,0)\nin a 2D grid, given a string consisting of its moves,\nfind the final location of the robot\n\nExample: UD\nCurrent Position: (0,0)\nU => (0, 0+1)\nD => (0, 1-1)\n\nU: up, Increase in y axis\nD: down, Decrease in y axis\nR: right, Increase in x axis\nL: Left, Decrease in x axis\n\"\"\"\n\nclass Solution:\n def judgeCircle(self, moves: str) -> bool:\n x = 0\n y = 0\n\n for move in moves:\n if move == \"U\":\n y += 1\n elif move == \"R\":\n x += 1\n elif move == \"D\":\n y -= 1\n elif move == \"L\":\n x -= 1\n return x == 0 and y == 0\n\ns= Solution()\nanswer = s.judgeCircle(\"URRDLL\")\n\n\n\n\n\n\n\n\n\n","repo_name":"shovarnu2022/DSA_Problem_solving","sub_path":"easy_probs/robot_return_origin.py","file_name":"robot_return_origin.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"367219874","text":"from texttable import Texttable\n\n\nclass ParseTreeNode:\n def __init__(self, symbol, children=None):\n self.symbol = symbol\n self.children = children if children is not None else []\n\n def add_child(self, node):\n self.children.append(node)\n\n\nclass Grammar:\n\n def __init__(self, nonterminals, terminals, productions, start) -> None:\n self._nonterminals = nonterminals\n self._terminals = terminals\n self._productions = productions\n self._start = start\n\n # This static functions takes in a file path and returns a Grammar object from the data in the file\n # -> it starts by reading the nonterminals, terminals, and the start object, because each of them \n # is situated on a single line\n # -> it proceeds by processing all of the remaining lines and paring them into productions\n\n @staticmethod\n def from_file(path):\n nonterminals = []\n terminals = []\n productions = {}\n start = None\n\n with open(path, 'r') as f:\n # Read the single-line elements\n nonterminals.extend(f.readline().strip().split(\" \"))\n terminals.extend(f.readline().strip().split(\" \"))\n start = f.readline().strip()\n\n # process all of the remaining lines\n raw_productions = [line.strip() for line in f.readlines()]\n for production in raw_productions:\n # spilt the current line in left hand and right hand sides\n lh, rh = production.split('->')\n\n lh = lh.strip()\n # split the right hand side in variations\n rh = [x.strip() for x in rh.strip().split('|')]\n\n # add each variation to the list of productions\n for alternative in rh:\n if lh in productions.keys():\n productions[lh].append(alternative)\n else:\n productions[lh] = [alternative]\n # Return the Grammar object\n return Grammar(nonterminals, terminals, productions, start)\n\n @property\n def nonterminals(self):\n return self._nonterminals\n\n @property\n def terminals(self):\n return self._terminals\n\n @property\n def start(self):\n return self._start\n\n @property\n def productions(self):\n return self._productions\n\n def get_nonterminal_productions(self, nonterminal):\n if nonterminal in self._productions:\n return self._productions[nonterminal]\n return []\n\n\n # With this method we check if a grammar is a context free grammar\n # -> First we check if the starting symbols is found within the non-terminals\n # -> Second we check if on the left hand side we have only one non-terminal (for each production)\n # -> Third we check if the productions of that left hand side non-terminal can be found within the non-terminals set or terminals set or is equal to the empty sequence\n \n def is_cfg(self):\n # Check if the start symbol is a non-terminal\n if self._start not in self._nonterminals:\n print(f\"Start symbol {self._start} is not a non-terminal.\")\n return False\n\n # Check each production\n for lh, rhs_list in self._productions.items():\n # Left-hand side should be a single non-terminal\n if lh not in self._nonterminals:\n print(f\"Left-hand side {lh} is not a non-terminal.\")\n return False\n\n # Check right-hand side of the productions\n for rhs in rhs_list:\n # Split the right-hand side into individual tokens\n rhs_symbols = rhs.split(\" \")\n\n # Check each symbol in the right-hand side\n for symbol in rhs_symbols:\n if symbol not in self._terminals and symbol not in self._nonterminals and symbol != 'ε':\n print(f\"Symbol {symbol} in production {lh} -> {rhs} is neither a terminal nor a non-terminal.\")\n return False\n\n return True\n\n def is_terminal(self, symbol):\n return symbol in self._terminals\n\n def is_nonterminal(self, symbol):\n return symbol in self._nonterminals\n\n\n def compute_first(self, symbol):\n first_set = set()\n\n if self.is_terminal(symbol):\n first_set.add(symbol)\n elif self.is_nonterminal(symbol):\n productions = self.get_nonterminal_productions(symbol)\n\n for production in productions:\n rhs_symbols = production.split(\" \")\n first_set.update(self.compute_first(rhs_symbols[0]))\n\n return first_set\n\n\n def compute_follow(self, nonterminal, computed=None):\n if computed is None:\n computed = set()\n\n follow_set = set()\n\n if nonterminal == self._start:\n follow_set.add('$') # $ - end of input\n\n computed.add(nonterminal)\n\n for lh, rhs_list in self._productions.items():\n for rhs in rhs_list:\n rhs_symbols = rhs.split(\" \")\n\n if nonterminal in rhs_symbols:\n index = rhs_symbols.index(nonterminal)\n\n if index < len(rhs_symbols) - 1:\n next_symbol = rhs_symbols[index + 1]\n\n if next_symbol in self._nonterminals:\n if next_symbol not in computed:\n follow_set.update(self.compute_follow(next_symbol, computed))\n\n # Include first of the symbol after nonterminal in Follow set\n follow_set.update(self.compute_first(next_symbol))\n\n # Include epsilon in Follow set if everything after A is epsilon\n if 'ε' in self.compute_first(next_symbol):\n follow_set.update(self.compute_follow(lh, computed))\n\n elif next_symbol in self._terminals:\n follow_set.add(next_symbol)\n\n elif lh != nonterminal:\n if lh not in computed:\n follow_set.update(self.compute_follow(lh, computed))\n\n elif 'ε' in self.compute_first(nonterminal):\n # Include epsilon in Follow set for the current nonterminal\n follow_set.update(self.compute_follow(lh, computed))\n\n return follow_set\n \n\n def generate_parsing_table(self):\n if not self.is_cfg():\n print(\"Cannot generate parsing table for non-context-free grammar.\")\n return None\n\n # Initialize the parsing table as a dictionary\n parsing_table = {}\n\n # Initialize table entries for each nonterminal and terminal to an empty string\n for nonterminal in self._nonterminals:\n for terminal in self._terminals + ['$']: # Include the end-of-input symbol\n parsing_table[(nonterminal, terminal)] = \"\" # Use empty string instead of None\n\n # Populate the parsing table\n for nonterminal in self._nonterminals:\n for production in self.get_nonterminal_productions(nonterminal):\n # Compute FIRST of the right-hand side of the production\n symbols = production.split()\n\n for symbol in symbols:\n first_set = self.compute_first(symbol) # Assuming compute_first works for both terminals and non-terminals\n\n for terminal in first_set:\n if terminal != 'ε': # If epsilon is not in FIRST, add the production\n parsing_table[(nonterminal, terminal)] = production\n\n # If ε is in FIRST or production is ε, add the production for all terminals in FOLLOW(nonterminal)\n if 'ε' in first_set or production == 'ε':\n follow_set = self.compute_follow(nonterminal)\n for follow_terminal in follow_set:\n # Add the production to the parsing table if the cell is empty or contains an epsilon production\n if not parsing_table[(nonterminal, follow_terminal)] or 'ε' in parsing_table[(nonterminal, follow_terminal)]:\n parsing_table[(nonterminal, follow_terminal)] = production\n\n # If the symbol is a non-terminal and leads to ε, continue to the next symbol\n if 'ε' in first_set and symbol != symbols[-1]:\n continue\n else:\n break # Break out of the loop if we've found a non-ε production or reached the end of the production\n\n # Return the parsing table\n return parsing_table\n \n\n def ll1_parser(self, input_string):\n input_string += '$'\n input_tokens = [c for c in input_string]\n\n root = ParseTreeNode(self._start) # Root of the parse tree\n stack = [('$', None), (self._start, root)] # Stack holds tuples of (symbol, tree_node)\n\n input_pointer = 0\n parsing_table = self.generate_parsing_table()\n\n while len(stack) > 1: # The stack will always contain the end symbol '$'\n stack_top, tree_node = stack[-1]\n current_input = input_tokens[input_pointer]\n\n if stack_top == current_input:\n if stack_top == '$':\n return True, root # Successful parsing and return root of the parse tree\n else:\n stack.pop()\n input_pointer += 1\n else:\n rule = parsing_table.get((stack_top, current_input))\n if rule:\n stack.pop() # Pop the nonterminal\n children = []\n\n if rule != 'ε':\n for symbol in reversed(rule.split()):\n child_node = ParseTreeNode(symbol)\n children.append(child_node)\n stack.append((symbol, child_node))\n\n tree_node.children = children # Attach children to the current node\n else:\n return False, None # Parsing error\n\n return True, root # Return the success status and the parse tree root\n\n\n\n def print_parsing_table(self):\n parsing_table = self.generate_parsing_table()\n if parsing_table is None:\n return\n\n table = Texttable()\n table.add_row([''] + self._terminals + ['$'])\n\n for nonterminal in self._nonterminals:\n row = [nonterminal]\n for terminal in self._terminals + ['$']:\n entry = parsing_table.get((nonterminal, terminal), '')\n row.append(entry)\n table.add_row(row)\n\n print(table.draw())\n\n\ndef print_tree(node, indent=\"\", last=True):\n prefix = \"└── \" if last else \"├── \"\n print(indent + prefix + node.symbol)\n indent += \" \" if last else \"│ \"\n for i, child in enumerate(node.children):\n last_child = i == (len(node.children) - 1) # Check if it's the last child\n print_tree(child, indent, last_child)\n\nif __name__ == \"__main__\":\n gr = Grammar.from_file(\"./g1.txt\")\n print(\"Nonterminals: \", gr.nonterminals)\n # print()\n print(\"Terminals: \", gr.terminals)\n # print()\n print(\"Start: \", gr.start)\n print(\"Productions:\")\n for key in gr.productions:\n print(f\"{key} -> {gr.productions[key]}\")\n print()\n\n print(\"FIRST: \")\n for nt in gr.nonterminals:\n print(gr.compute_first(nt))\n print()\n print(\"FOLLOW: \")\n for nt in gr.nonterminals:\n print(gr.compute_follow(nt))\n\n print()\n # gr.print_parsing_table()\n gr.print_parsing_table()\n\n input_string = input(\"Input String: \")\n success, parse_tree_root = gr.ll1_parser(input_string)\n\n if success:\n print(\"Parsing successful! Here's the parse tree:\")\n print_tree(parse_tree_root)\n else:\n print(\"Parsing failed.\")\n","repo_name":"PotatoGolden76/nitwit-lang","sub_path":"Parser/Grammar.py","file_name":"Grammar.py","file_ext":"py","file_size_in_byte":12113,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"33970834324","text":"#!/usr/bin/python\n\n'''\nplot a 2D histogram using matplotlib\n'''\n\nimport argparse\n\nap = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nap.add_argument('--inp',required=True,type=str,help='input percent GC values, assumed to be in the range 0...100, and where < 0 means undefined')\nconf = ap.parse_args()\n\nimport sys\nfrom collections import defaultdict\n\ncts = defaultdict(defaultdict(int))\n\nfgc = open(conf.inpgc)\nfcov = open(conf.inpcov)\n\nwhile True:\n gc = fgc.readline()\n cov = fcov.readline()\n \n if gc == '' or cov == '': break #end of file\n \n tokgc = gc.strip().split()\n tokcov = cov.strip().split()\n\n assert tokgc[0] == tokcov[0]\n assert tokgc[1] == tokcov[1]\n\n if float(tokgc[2]) < 0.0: continue #skp where gc is undefined\n\n x = int(tokcov[2])\n y = int(float(tokgc[2])*10.0)\n","repo_name":"robertvi/rjvbio","sub_path":"utils/plot_cov_vs_gc.py","file_name":"plot_cov_vs_gc.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20395332475","text":"import os\nimport pandas as pd\n\n# Function to process and collect data from CSV files in a directory\ndef collect_data_from_directory(directory):\n # Get a list of CSV files in the directory\n csv_files = [file for file in os.listdir(directory) if file.endswith('.csv')]\n\n # Create a list to store data from all CSV files\n all_data = []\n\n # Read and store data from each CSV file\n for csv_file in csv_files:\n csv_path = os.path.join(directory, csv_file)\n data = pd.read_csv(csv_path, header=None, names=['x', 'timestamp', 'y'])\n all_data.append(data['y'])\n\n # Concatenate data from all CSV files and calculate mean and standard deviation\n concatenated_data = pd.concat(all_data, axis=1)\n max_values = concatenated_data.loc[47, :].mean() # Get max value for index 48\n std_values = concatenated_data.loc[47, :].std() # Get std value for index 48\n return max_values, std_values\n\n# Main function to traverse through directories, collect data, and create a table\ndef main(root_directory):\n max_values_dict = {}\n std_values_dict = {}\n\n for subdir in os.listdir(root_directory):\n subdir_path = os.path.join(root_directory, subdir)\n if os.path.isdir(subdir_path):\n max_value, std_value = collect_data_from_directory(subdir_path)\n max_values_dict[subdir] = max_value\n std_values_dict[subdir] = std_value\n\n # Create a DataFrame with the max hypervolume values and standard deviations\n df_max = pd.DataFrame.from_dict(max_values_dict, orient='index', columns=['Mean HV'])\n df_std = pd.DataFrame.from_dict(std_values_dict, orient='index', columns=['Std Deviation'])\n\n # Combine the DataFrames\n df_combined = pd.concat([df_max, df_std], axis=1)\n\n # Print the combined DataFrame\n print(df_combined)\n df_combined.to_csv('table/HV.csv')\n\nif __name__ == \"__main__\":\n root_dir = \"/Users/amineelblidi/Documents/Bachlor vorbereitung/code/TEST/data/Hypervolume\"\n main(root_dir)\n","repo_name":"amine0el/TEST","sub_path":"table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1753896095","text":"import psycopg2 as pg\nimport psycopg2.extras\n\npg_local = {\n 'host':\"localhost\",\n 'user':\"postgres\",\n 'dbname':\"postgres\",\n 'password':\"Ska25zns!\"\n}\n\n#postgres://dbuser:1234@postgres/dbapp\n#localhost == 127.0.0.1\n#postgres://postgress:빕번@127.0.0.1/postgres\n\ndb_connector = pg_local \n\nconnect_string = \"host={host} user={user} dbname={dbname} password={password}\".format(\n **db_connector)\n\ndef read_tables():\n tables =[]\n with pg.connect(connect_string) as conn:\n with conn.cursor() as cur:\n cur.execute(\"\"\"SELECT table_name FROM information_schema.tables\n WHERE table_schema = 'public'\"\"\")\n for table in cur.fetchall():\n tables.append(table)\n return tables\n\n# with pg.connect(connect_string) as conn:\n# with conn.cursor() as cur:\n# cur.execute(\n# \"CREATE TABLE guser ( id integer primary key, name varchar(20), email varchar(20) );\")\n\ndef read_dbs():\n sql = '''SELECT datname FROM pg_database;'''\n with pg.connect(connect_string) as conn:\n with conn.cursor() as cur:\n cur.execute(sql)\n for db in cur.fetchall():\n print(db)\n\ndef create_table(table_name):\n sql = f'''CREATE TABLE {table_name} (\n id integer primary key,\n name varchar(20),\n email varchar(20)\n );\n '''\n print(sql)\n try:\n conn = pg.connect(connect_string)#db 연결\n cur = conn.cursor()#작업할 지시자 정하기\n cur.execute(sql)#실행\n\n #db 저장하고 마무리\n conn.commit()#select 제외 commit 필요 db에 저장하는 거\n conn.close()\n #db 연결 해제\n except pg.OperationalError as e:\n print(e)\n\ndef insert(table_name,sid,name,email):\n sql = f'''INSERT INTO {table_name}\n VALUES({sid},\\'{name}\\', \\'{email}\\');\n '''\n print(sql)\n try:\n conn = pg.connect(connect_string)#db 연결\n cur = conn.cursor()#작업할 지시자 정하기\n cur.execute(sql)#실행\n\n #db 저장하고 마무리\n conn.commit()#select 제외 commit 필요 db에 저장하는 거\n conn.close()\n #db 연결 해제\n except pg.OperationalError as e:\n print(e)\n return -1\n return 0\n\ndef students_list():\n sql = f'''SELECT id, name, email FROM student\n '''\n print(\"lists\")\n try:\n conn = pg.connect(connect_string)#db 연결\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)#작업할 지시자 정하기\n cur.execute(sql)#실행\n\n result = cur.fetchall()\n print(result)\n conn.close()\n return result\n except Exception as e:\n print(e)\n return []\n\n\n\ndef main():\n print(\"pg!\")\n read_dbs()\n create_table(\"student\")\n read_tables()\n\n#if __name__ == (\"__main__\"):\n# main()\n","repo_name":"nesllewr/web","sub_path":"flask/pypg/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"69915914877","text":"import os\r\nimport csv\r\n\r\nfrom PIL import Image\r\n\r\noutput = 'decor_split'\r\n\r\nwith open('decor.csv') as csvfile:\r\n label_reader = csv.reader(csvfile, delimiter=',')\r\n next(label_reader) # skipping header\r\n for row in label_reader:\r\n if row[5] == 'product':\r\n path = os.path.join(output, row[3])\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n img = Image.open(row[6]).convert(\"RGB\")\r\n dst = os.path.join(output, row[3], row[6].replace('.png', '.jpg'))\r\n img.save(dst)\r\n","repo_name":"krelus01/TF_decor_tuto","sub_path":"split_images.py","file_name":"split_images.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15834257142","text":"from pathlib import Path\r\nimport configparser\r\nfrom abc import ABC, abstractmethod\r\nimport pdfplumber\r\nimport re\r\nfrom collections import OrderedDict\r\nimport sys\r\n\r\n#utility functions\r\n\r\ndef getfiles(directory, suffix='.*'):\r\n \"\"\"Returns list of Path objects contained within directory and its subdirectories\r\n\r\n Parameters:\r\n directory (str or Path OBJECT): Topmost directory to search\r\n suffix (str): filter by desired suffix (begin with '.'). If not specified, return all files.\r\n Returns:\r\n list of Path objects\r\n \"\"\"\r\n path=Path(directory)\r\n fileslist = list(path.rglob(f'*{suffix}'))\r\n return fileslist\r\n\r\ndef getPDFfiles(directory):\r\n return getfiles(directory, '.pdf')\r\n \r\ndef getinifiles(directory):\r\n return getfiles(directory, '.ini')\r\n\r\ndef str_to_class(str):\r\n return getattr(sys.modules[__name__], str)\r\n \r\n#ini functions and constants\r\n\r\nINIKEY = 'fields'\r\nEDITMARKER = '|e'\r\n#SELECTMARKER = '|s' -- not used since only two formfield types\r\nCONCAT = -2\r\n\r\nEDITTYPE = 'e'\r\nSELECTTYPE = 's'\r\n\r\nFORMSCATALOG = {}\r\n\r\n#iniparser factory\r\ndef iniparserfactory():\r\n iniparser = configparser.ConfigParser(allow_no_value=True)\r\n iniparser.optionxform = str #maintain key case, remove for lower case \r\n return iniparser \r\n\r\ndef readfieldsfromini(file):\r\n fields = {}\r\n fields['textfields'] = {}\r\n fields['formfields'] = {}\r\n #iniparser =iniparserfactory()\r\n iniparser = readinifile(file)\r\n for field in iniparser['fields']:\r\n value=iniparser['fields'][field]\r\n storefield(fields, field, value)\r\n return fields\r\n \r\ndef readinifile(file):\r\n iniparser = iniparserfactory()\r\n iniparser.read(file)\r\n return iniparser \r\n\r\ndef storefield(fields, field, value):\r\n if value is None:\r\n formfield = parseffield(field)\r\n if field.endswith(EDITMARKER):\r\n fields['formfields'][formfield] = EDITTYPE\r\n else:\r\n fields['formfields'][formfield] = SELECTTYPE\r\n else:\r\n fields['textfields'][field] = value\r\n return\r\n \r\ndef parseffield(field):\r\n return field[0:CONCAT]\r\n \r\n#PDF Forms Classes and Utilities\r\n\r\nclass PDFParser:\r\n parser = pdfplumber\r\n text = None\r\n formdata = None\r\n \r\n @classmethod\r\n def parse(cls, pdffile):\r\n with cls.parser.open(pdffile) as pdf:\r\n cls.text = pdf.pages[0].extract_text()\r\n cls.formdata = {}\r\n fields = pdf.doc.catalog[\"AcroForm\"].resolve()[\"Fields\"]\r\n for objref in fields:\r\n field=objref.resolve()\r\n field_name = field.get(\"T\").decode()\r\n field_value = field.get(\"V\")\r\n if field_value is not None:\r\n field_value = field_value.decode()\r\n cls.formdata[field_name] = field_value \r\n\r\nclass AbstractPDFForm(ABC):\r\n name = None\r\n textfields = None\r\n formfields = None\r\n \r\n @staticmethod\r\n @abstractmethod\r\n def isForm(parser):\r\n pass\r\n \r\n @classmethod \r\n def initialize(cls,inifieldscatalog):\r\n cls.textfields = inifieldscatalog['textfields']\r\n cls.formfields = inifieldscatalog['formfields']\r\n #cls.pdfparser = pdfparser\r\n \r\n @classmethod\r\n def extractDataFromParser(cls, parser):\r\n formdata = cls._readPDFFormFields(parser.formdata)\r\n textdata = cls._readPDFTextFields(parser.text)\r\n return {**textdata, **formdata}\r\n \r\n # @classmethod\r\n # def _parsePDF(cls, pdffile):\r\n # pdf= cls.pdfparser.open(pdffile)\r\n # return {pdf.formdata, pdf.textdata}\r\n \r\n @classmethod\r\n def _readPDFFormFields(cls, pdfformdata):\r\n formdata = {}\r\n for field in cls.formfields:\r\n formdata[field] = pdfformdata.get(field)\r\n return formdata\r\n \r\n @staticmethod \r\n @abstractmethod\r\n def _readPDFTextFields(pdftext):\r\n pass\r\n \r\n @classmethod\r\n def _generateDataDict(cls, pdfformdata, pdftext):\r\n formdata=cls._readPDFFormFields(pdfformdata)\r\n textdata=cls_readPDFTextFields(pdftext)\r\n return {**textdata, **formdata} #concatenates dictionaries\r\n \r\nclass PDFForm_211559_050(AbstractPDFForm):\r\n name = '211559-050'\r\n \r\n @staticmethod\r\n def isForm(parser):\r\n #text = pdf.pages[0].extract_text()\r\n return parser.text.find('TALLER') > -1\r\n \r\n @staticmethod \r\n def _readPDFTextFields(pdftext):\r\n return {}\r\n\r\n \r\nclass PDFForm_1900070(AbstractPDFForm):\r\n name='1900070'\r\n \r\n @staticmethod\r\n def isForm(parser):\r\n #text = pdf.pages[0].extract_text()\r\n return parser.text.find('TALLER') == -1\r\n \r\n @staticmethod\r\n def _readPDFTextFields(pdftext):\r\n textdata = {}\r\n textregex = r\"ALBARAN.*[\\r\\n]*([\\d]+-[\\d]+-[\\d]+) (\\w*)\" #([\\w]+)\"\r\n textcapturesmap = {'DeliveryNote' : 2, 'DeliveryDate' : 1}\r\n fielddata= re.search(textregex, pdftext)\r\n for field in textcapturesmap:\r\n textdata[field] = fielddata.group(textcapturesmap[field])\r\n return textdata\r\n \r\n#forms_catalog = {'211559-050' : PDFForm_211559_050, '1900070': PDFForm_1900070}\r\n\r\ndef identifyform(formscatalog, parser):\r\n\r\n for form in formscatalog.values():\r\n if form.isForm(parser):\r\n return form \r\n return None #raise exception here instead\r\n \r\ndef createscript(formsdata):\r\n for iter in formsdata:\r\n iterstr = str(iter)\r\n formtype = formsdata[iter][\"formtype\"]\r\n filename = formtype + \"_\" + iterstr + \".txt\"\r\n formdata = formsdata[iter]['data']\r\n pdfform = FORMSCATALOG[formtype]\r\n \r\n fieldobjects = genscriptdataobjects(pdfform, formdata) \r\n writescript(filename, fieldobjects)\r\n \r\ndef genscriptdataobjects(pdfform, formdata):\r\n\r\n rows = []\r\n rows.append('\\nlet formdata = [\\n')\r\n \r\n textfields = pdfform.textfields\r\n formfields = pdfform.formfields\r\n \r\n\r\n for textfield in textfields:\r\n row = '{field: \"' + textfield + '\", fieldtype: \"e\", value: \"' + formdata[textfield] + '\"},\\n'\r\n rows.append(row)\r\n \r\n for formfield in formfields:\r\n row = row = '{field: \"' + formfield + '\", fieldtype: \"' + formfields[formfield] + '\", value: \"' + formdata[formfield] + '\"},\\n'\r\n rows.append(row)\r\n rows.append(']\\n')\r\n return rows\r\n\r\n\r\ndef writescript(filename, fieldobjects):\r\n s1 ='''const S = 's'\r\n\r\nfunction processfield(data) {\r\n formfield = document.getElementById(data.field)\r\n if (data.field == S) {\r\n processselectfield(formfield, data.value)\r\n } else { //otherwise text data\r\n formfield.value = data.value\r\n }\r\n}\r\n\r\nfunction processselectfield(field, value) {\r\n for (var i = 0; i < field.options.length; i++) {\r\n if (field.options[i].text == value) {\r\n field.options[i].selected = true;\r\n return; \r\n }\r\n }\r\n}'''\r\n\r\n s3 = '''for (data of formdata) {\r\n processfield(data)\r\n}'''\r\n try:\r\n f = open(filename, \"w\")\r\n f.write(s1)\r\n f.writelines(fieldobjects)\r\n f.write(s3)\r\n f.close()\r\n except BaseException as msg:\r\n print('Write Error occurred: ' + str(msg))\r\n \r\ndef execute():\r\n try:\r\n\r\n appconfig=readinifile(\"app.ini\")\r\n ini_dir = appconfig['dirs']['inidir']\r\n data_dir = appconfig['dirs']['datadir']\r\n\r\n forms = appconfig['forms']\r\n \r\n #formscatalog = {}\r\n for formname in forms:\r\n classname = forms[formname]\r\n FORMSCATALOG[formname] = str_to_class(classname)\r\n\r\n parser = PDFParser\r\n\r\n #initialize form classes from ini file\r\n\r\n for formname in FORMSCATALOG:\r\n formini = ini_dir+formname+'.ini'\r\n formfieldscatalog = readfieldsfromini(formini)\r\n formclass = FORMSCATALOG[formname]\r\n formclass.initialize(formfieldscatalog)\r\n \r\n #retrieve PDFs\r\n\r\n iter = 1\r\n datadict = OrderedDict()\r\n\r\n pdffiles = getPDFfiles(data_dir)\r\n for pdffile in pdffiles:\r\n parser.parse(pdffile)\r\n form = identifyform(FORMSCATALOG, parser)\r\n formdata = form.extractDataFromParser(parser)\r\n datastore = {'file': pdffile.resolve(), 'formtype': form.name, 'data' : formdata}\r\n datadict[iter] = datastore\r\n iter = iter + 1\r\n \r\n return datadict\r\n \r\n except BaseException as msg:\r\n print('Execute Error occurred: ' + str(msg))\r\n\r\n \r\nif __name__ == '__main__':\r\n data = execute()\r\n createscript(data)\r\n \r\n \r\n","repo_name":"photane/liveproject-DataEntryAutomation","sub_path":"dn.py","file_name":"dn.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24430087585","text":"T = int(input())\n\ndef solve():\n N = int(input())\n S = input()\n one = S.count(\"1\")\n if one%2==1:\n print(-1)\n return\n else:\n cnt = 0\n if one==2:\n if S.count(\"11\") == 1:\n if N==2 or N==3:\n print(-1)\n return\n cnt+=2\n print((one//2)+cnt)\n return\n\nfor i in range(T):\n solve()","repo_name":"1022yuki/atcoder","sub_path":"ARC/ARC156/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37316303933","text":"import streamlit as st\r\nimport pandas as pd\r\nimport requests\r\nimport sqlite3\r\nfrom datetime import datetime\r\nimport time\r\nfrom PIL import Image\r\nfrom utils import *\r\n\r\ndef main():\r\n\r\n #---------------\r\n if \"model_running\" not in st.session_state:\r\n st.session_state['model_running']=\"not_activated\"\r\n #---------------\r\n\r\n option_c = \"question_answering\"\r\n option_b = \"text_generator\"\r\n option_a = \"sentiment_analysis\"\r\n option_d = \"image_classifier\"\r\n\r\n st.sidebar.header(\"Analyzing app\")\r\n st.text(\"\")\r\n\r\n model_choice = st.sidebar.selectbox(\"Choose your model\",\r\n [option_a, option_b, option_c, option_d])\r\n\r\n if model_choice!=st.session_state['model_running']:\r\n start_model(model_choice)\r\n\r\n st.text(\"\")\r\n st.markdown(f\"### {model_choice.replace('_',' ').capitalize()}\")\r\n\r\n#---------------------------------------------\r\n if model_choice==\"question_answering\":\r\n form = st.form(key =\"my_form\")\r\n context_input = form.text_input(\"Write something here.\",\" \")\r\n question_input = form.text_input(\"Write a question.\",\" \")\r\n submit_button=form.form_submit_button(label =\"Press when done\")\r\n if submit_button:\r\n if len(context_input)>2 and len(question_input)>2:\r\n answer_output, score_output =question_answering(context_input, question_input)\r\n st.success(f\"Answer to your question is : {answer_output} with a surety of {score_output:.4f} \")\r\n st.text(\"\")\r\n sql_input_qa(context_input, question_input, answer_output, score_output)\r\n else:\r\n st.error(\"Both fields must have a value.\") \r\n st.text(\"\")\r\n if st.button('Press to show data'):\r\n sql_output_qa()\r\n\r\n#--------------------------------------------- \r\n elif model_choice ==\"text_generator\":\r\n\r\n st.write(\"This model is not developed in this project.\")\r\n#---------------------------------------------\r\n elif model_choice==\"sentiment_analysis\":\r\n text_input = st.text_input(\"Write a sentence.\",\"\")\r\n sentiment_output, score_output = sentiment_analysis(text_input)\r\n if text_input:\r\n if sentiment_output.capitalize() == \"Positive\":\r\n st.success(f\"Sentiment response: {sentiment_output.capitalize()} at a score rate of {score_output:.4f}\")\r\n elif sentiment_output.capitalize() == \"Negative\":\r\n st.error(f\"Sentiment response: {sentiment_output.capitalize()} at a score rate of {score_output:.4f}\")\r\n st.text(\"\")\r\n st.markdown('** Do you agree? Please respond below **')\r\n validation = st.selectbox(\"\",[\"Positive\",\"Negative\"])\r\n \r\n if st.button('Press to save data'):\r\n sentimental_analysis_sql_input(text_input, sentiment_output, score_output, validation.upper())\r\n st.text(\"\")\r\n st.text(\"\")\r\n if st.button('Press to show data'):\r\n sentimental_analysis_sql_output()\r\n pass\r\n\r\n#---------------------------------------------\r\n elif model_choice==\"image_classifier\":\r\n init_session_state_image_classifier() # initialize cat and hide_panel (session state variables)\r\n \r\n st.write(\"This model takes your input image and compare it to three labels and gives a score how much the model think your image looks like the label\")\r\n st.write(\"_You can use the default labels or choose three of your own in the meny._\")\r\n labels_changer()\r\n \r\n if st.session_state['hide_upload_panel']==0: # if somethin is wrong with categories -> hide panel\r\n file_upload = st.file_uploader(\"Upload a file\", type=[\"jpeg\",\"jpg\",\"png\"])\r\n \r\n if file_upload is not None: #and st.session_state['show_error']==1:\r\n img = Image.open(file_upload)\r\n files = {'file': file_upload.getvalue()} #the picture as binary\r\n x = 0\r\n try:\r\n result = image_classifier(files)\r\n st.image(img) #show the users picture\r\n x=1\r\n except ValueError:\r\n st.error(\"The model could not analyse your image please try with another one!\")\r\n x=0 \r\n if x == 1:\r\n img_name = file_upload \r\n for k, v in result.items():\r\n result[k] = float(v)\r\n for key,value in result.items():\r\n st.write(f\"Label: {key} = {value:.6f}\")\r\n if st.button('Press to save data'):\r\n sql_image_input(img_name,result)\r\n if st.button('Press to show data'):\r\n sql_img_output()\r\n elif x==0:\r\n pass\r\n else:\r\n pass\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"wibstyle/Studiegrupp_3","sub_path":"main_streamlit.py","file_name":"main_streamlit.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15490526356","text":"import xarray as xr\nimport numpy as np\nimport pandas as pd\nfrom pqtool.utils import *\nfrom argparse import ArgumentParser\nimport yaml\nfrom munch import munchify\nfrom tasks.utils import getConfigurationByID\nfrom tasks.pq.class2_pq import Class2\nimport os\n\n# filtering outliers according to z-score\ndef reject_outliers(data, obs_type='argo',reference=None, sigma=5.):\n variables = ['temperature', 'salinity']\n if obs_type=='sst':\n variables.remove('salinity')\n for variable in variables:\n bias = data['model_%s' % variable].isel(model=0) - data[variable]\n stdev = float(bias.std())\n mean = float(np.nanmean(bias))\n\n total = int(data[variable].count())\n data[variable] = data[variable].where(np.abs(bias - mean) < sigma * stdev)\n accepted = int(data[variable].count())\n try:\n print(total, '->', accepted, 100. * accepted / total, (total - accepted), 'rejected')\n except:\n pass\n return data\n###\nparser = ArgumentParser(description='Process intermediate data')\nparser.add_argument('-c', '--config', default='../example/config.yaml', help='configuration file')\nparser.add_argument('-o', '--obs_type', default='argo')\nparser.add_argument('-m', '--model', default='fc0')\nparser.add_argument('-d', '--date', help='reference date')\nparser.add_argument('-vc', '--validation_class', required=True, help='Class_4 or Class_2')\n\nargs = parser.parse_args()\nconfig = args.config\nobs_type = args.obs_type\nmod_type = args.model\nref_date = args.date\n\nvc=args.validation_class\n\ncfg_base=getConfigurationByID('conf.yaml','base')\ncfg =getConfigurationByID('conf.yaml','Class_2' if vc=='cl2' else 'Class_4' )\n\noutpath=cfg_base.output\nin_file=os.path.join(outpath,ref_date,f\"{args.validation_class}_{obs_type}_{mod_type}_{ref_date}.nc\")\nprint(in_file)\nintermediate = xr.open_dataset(in_file)\n\nif obs_type == \"sla\":\n intermediate=intermediate.isel(obs=np.isfinite(intermediate.model_ssh.values[0]))\n mdt_file = cfg_base.mdt\n mdt = xr.open_dataset(mdt_file).get(['mdt', 'old_mdt', 'bathymetry'])\n del mdt.coords['lon']\n del mdt.coords['lat']\n\n coords = {coord: intermediate[coord] for coord in ['longitude', 'latitude']}\n\n mdt = mdt.interp(coords, method='nearest')\n intermediate = xr.merge([intermediate, mdt])\n intermediate = intermediate.where(intermediate['bathymetry'] > 1000.)\n intermediate['model_sla'] = intermediate['model_ssh'] - intermediate['mdt']\n # this apply changes only at the old version of BSFS.\n intermediate = intermediate.groupby('model').map(swap_mdt)\n\nif obs_type.split('_')[0]==\"moor\":\n Class2(intermediate,ref_date,cfg,cfg_base)\n\nelse:\n\n intermediate.coords['date'] = intermediate['time'].astype('datetime64[D]')\n try:\n intermediate = intermediate.dropna('obs')\n except:\n pass\n\n if obs_type == \"argo\":\n bins = np.array(cfg.variables.depths.value)\n print (intermediate)\n #add z-score filter\n intermediate = intermediate.groupby_bins('depth', bins=np.insert(bins,0,0) ).map(reject_outliers,obs_type='argo',sigma=3.)\n #\n result = intermediate.groupby_bins('depth', bins=np.insert(bins, 0, 0)).apply(mvr_metrics)\n print (result)\n #result = intermediate.groupby_bins('depth', bins=np.insert(bins,0,0) ).apply(mvr_metrics)\n #\n result=result.rename_dims({\"depth_bins\":\"depths\"})\n result = result.drop('depth_bins')\n elif obs_type == \"sla\":\n along_track = True\n\n intermediate = intermediate.groupby('model').map(unbias_along_track if along_track else unbias)\n result = intermediate.groupby('date').apply(mvr_metrics)\n\n elif obs_type == \"sst\":\n #add z-score filter\n intermediate = intermediate.groupby('date').map(reject_outliers,obs_type='sst',sigma=3.)\n #\n result = intermediate.groupby('date').apply(mvr_sst_metrics)\n else:\n exit(f'{obs_type} not implemented yet')\n try:\n result =result.rename({\"date\":\"time\"})\n except:\n pass\n\n comp = dict(_FillValue=None)\n encoding = {var: comp for var in result.data_vars}\n\n out_file=os.path.join(outpath,ref_date, f\"mvr_{args.validation_class}_{obs_type}_{mod_type}_{ref_date}.nc\")\n result.to_netcdf(out_file, encoding=encoding)\n\n","repo_name":"scausio/MVR_2022","sub_path":"tools/mvr_new/bin/process_intermediate.py","file_name":"process_intermediate.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3476561331","text":"# Write the benchmarking functions here.\n# See \"Writing benchmarks\" in the asv docs for more information.\n\nimport os\n\nimport asv.util\nimport timeit\n\ncwd = os.getcwdu()\ntestdir = os.path.sep.join([cwd, \"microtests\"])\n\n\nclass Testing(object):\n repeat = 100\n timeout = 600.0\n timer = timeit.default_timer\n\n def compile(self, name):\n \"\"\"\n Compile @name. If @name is a list, loop over it.\n \"\"\"\n os.chdir(testdir)\n if isinstance(name, list):\n for n in name:\n asv.util.check_call(\"shedskin %s\" % n, shell=True)\n asv.util.check_call(\"make\")\n else:\n asv.util.check_call(\"shedskin %s\" % name, shell=True)\n asv.util.check_call(\"make\")\n os.chdir(cwd)\n\n def run_test(self, test):\n binary = os.path.sep.join([testdir, test])\n cmd = \"%s > /dev/null\" % binary\n asv.util.check_call(cmd, shell=True)\n\n\nclass Startup(Testing):\n \"\"\"\n Benchmark all startup tests.\n \"\"\"\n def setup(self):\n self.compile(\"empty_startup.py\")\n\n def time_empty_startup(self):\n self.run_test(\"empty_startup\")\n\n\nclass Printing(Testing):\n \"\"\"\n Benchmark all printing tests.\n \"\"\"\n tests = [\"print_empty.py\",\n \"print_floats.py\",\n \"print_ints.py\",\n \"print_str.py\",\n ]\n\n def setup(self):\n self.compile(self.tests)\n\n def time_empty(self):\n self.run_test(\"print_empty\")\n\n def time_floats(self):\n self.run_test(\"print_ints\")\n\n def time_ints(self):\n self.run_test(\"print_ints\")\n\n def time_str(self):\n self.run_test(\"print_str\")\n\n\nclass Reference(Testing):\n \"\"\"\n Benchmark reference C/C++ tests.\n \"\"\"\n folder = \"reference\"\n\n def setup(self):\n os.chdir(os.path.sep.join([cwd, self.folder]))\n asv.util.check_call(\"gcc empty_startup.c -o empty_startup\", shell=True)\n os.chdir(cwd)\n\n # def time_empty_startup(self):\n # binary = os.path.sep.join([cwd, self.folder, \"empty_startup\"])\n # cmd = \"%s > /dev/null\" % binary\n # asv.util.check_call(cmd, shell=True)\n","repo_name":"shedskin/benchmarks","sub_path":"benchmarks/benchmarks.py","file_name":"benchmarks.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"20766947906","text":"import unittest\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom dictime import moment\n\n\nclass Tests(unittest.TestCase):\n def test_has_one_value(self):\n \"moments have only one value at any moment\"\n pass\n\n def test_future(self):\n \"moments can have multiple future values\"\n b = moment('a', future=dict(milliseconds=10))\n b.set('b', future=dict(milliseconds=20))\n self.assertRaises(ValueError, b.get)\n time.sleep(.01)\n self.assertEquals(b.get(), 'a')\n time.sleep(.01)\n self.assertEquals(b.get(), 'b')\n self.assertEquals(len(b), 1, \"'a' was not removed\")\n\n def tests_expire(self):\n \"moments can expire values\"\n b = moment()\n b.set('a', expires=datetime.now() + timedelta(milliseconds=10))\n self.assertEquals(b.get(), 'a')\n time.sleep(.01)\n self.assertRaises(ValueError, b.get)\n \n def tests_value(self):\n \"moments evicts present\"\n b = moment()\n b.set('a')\n b.set('b')\n self.assertEquals(b.get(), 'b')\n b.set('c')\n self.assertEquals(b.get(), 'c')\n self.assertEquals(len(b), 1)\n # replace w/ future\n b.set('d', future=datetime.now() + timedelta(milliseconds=10))\n self.assertEquals(b.get(), 'c')\n self.assertEquals(len(b), 2)\n time.sleep(.01)\n self.assertEquals(b.get(), 'd')\n\n def test_set(self):\n \"moments validate expires/future values\"\n self.assertRaises(ValueError, moment().set, 1, 'not a date')\n self.assertRaises(ValueError, moment().set, 1, future='not a date')\n self.assertRaises(AssertionError, moment().set, 1, expires=datetime.now() - timedelta(minutes=10))\n self.assertRaises(AssertionError, moment().set, 1, datetime.now() + timedelta(minutes=10), datetime.now() + timedelta(minutes=20))\n","repo_name":"movermeyer/dictime","sub_path":"tests/test_moments.py","file_name":"test_moments.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26574512598","text":"import base64\n\nfrom six.moves.urllib.parse import quote\n\nimport advertools as adv\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\nfrom dash_table import DataTable\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\nimport pandas as pd\nimport logging\n\nimg_base64 = base64.b64encode(open('./logo.png', 'rb').read()).decode('ascii')\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s==%(funcName)s==%(message)s')\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.COSMO])\n\nserver = app.server\n\napp.layout = html.Div([\n html.Br(),\n # dbc.Row([\n # dbc.Col([\n # html.A([\n # html.Img(src='data:image/png;base64,' + img_base64, width=200),\n # ], href='https://github.com/eliasdabbas/advertools'),\n # ], sm=12, lg=2, style={'text-align': 'center'}), html.Br(),\n # dbc.Col([\n # html.H1('Search Engine Marketing: Keyword Generation Tool',\n # style={'text-align': 'center'}),\n # ], sm=12, lg=9),\n # ], style={'margin-left': '5%'}),\n # html.Hr(),\n # html.Br(),\n dbc.Row([\n dbc.Col([\n dbc.Label('Edit campaign name:'),\n dbc.Input(id='campaign_name',\n value='SEM_Campaign'),\n html.Br(),\n dbc.Label('Select match type(s):'),\n dcc.Dropdown(id='match_types',\n multi=True,\n options=[{'label': match, 'value': match}\n for match in ['Exact', 'Phrase', 'Modified',\n 'Broad']],\n value=['Exact', 'Phrase']),\n html.Br(),\n dbc.Checklist(id='order_matters',\n values=['True'],\n options=[{'label': 'Order matters', 'value': 'True'}]),\n html.Br(),\n dbc.Row([\n dbc.Col([\n dbc.Label(' Products:'),\n dbc.Textarea(id='products_table', value='', rows=20,\n cols=10,\n placeholder='Products you sell, one per line\\n'\n 'Example:\\n\\nhonda\\ntoyota\\nbmw\\netc...')\n ]),\n dbc.Col([\n dbc.Label(' Words:'),\n dbc.Textarea(id='words_table', value='', rows=20,\n cols=10,\n placeholder='Words that signify purchase intent, '\n 'one per line\\n'\n 'Example:\\n\\nbuy\\nprice\\nshop\\netc...'),\n ]),\n ])\n ], sm=11, lg=3, style={'margin-left': '5%'}),\n dbc.Col(lg=1),\n dbc.Col([\n html.Br(),\n dbc.Button(id='submit', children='Generate Keywords',\n style={'display': 'none'}),\n html.Br(), html.Br(),\n dcc.Loading(\n DataTable(id='output_df',\n virtualization=True,\n fixed_rows={'headers': True},\n style_header={'background-color': '#A6A7A7'},\n style_cell={'font-family': 'Source Sans Pro'},\n columns=[{'name': col, 'id': col}\n for col in ['#', 'Campaign', 'Ad Group',\n 'Keyword', 'Criterion Type',\n 'Labels']]),\n ),\n html.Br(),\n html.B( html.A('Download Keywords',\n id='download_link',\n download=\"rawdata.csv\",\n href=\"\",\n target=\"_blank\",\n n_clicks=0),\n),\n html.Div(id='kw_df_summary'),\n html.Div([\n html.Br(), html.Br(),\n html.H3('About the tool:'),\n html.Content('In the \"Products\" column, simply enter the '\n 'products/services you are trying'\n ' to promote, one per line.\\nFor \"Words\", think '\n 'of verbs and words that indicate interest'\n 'if combined with your products. Then all '\n 'possible combinations will be genrated for you.'\n '\\nYou can also play around with the '\n 'other options.'),\n html.Br(), html.Br(),\n html.H3('Reference content:'), html.Br(),\n html.Content('Quick overview: '),\n html.A('Short presentation describing what generating '\n 'keywords means (compared to researching keywords)',\n href='https://www.slideshare.net/eliasdabbas/dont-research-keywords-generate-them'),\n html.Br(),\n html.Content('For more details on the logic behind generating '\n 'the keywords, please checkout the '),\n html.A('DataCamp tutorial on Search Engine Marketing.',\n href='http://bit.ly/datacamp_sem'),\n html.Br(),\n html.Content('DataCamp project: '),\n html.A('Practice generating keywords using Python and pandas',\n href='https://www.datacamp.com/projects/400'),\n html.Br(),\n html.Content('SEMrush tutorial: '),\n html.A('Setting up SEM accounts on a large scale.',\n href='https://www.semrush.com/blog/setting-up-search-engine-marketing-campaigns-on-large-scale/'),\n html.Br(),\n html.Content('Functionality based on the '),\n html.A('advertools', href='http://bit.ly/advertools'),\n html.Content(' package.')\n ] + [html.Br() for x in range(9)]),\n ], sm=11, lg=7),\n ]),\n html.Div(id='download')\n] + [html.Br() for i in range(3)], style={'background-color': '#eeeeee'})\n\n\n@app.callback(Output('kw_df_summary', 'children'),\n [Input('output_df', 'data')])\ndef display_kw_df_summary(kw_df_list):\n kw_df = pd.DataFrame(kw_df_list)\n return [html.H3('Summary:'),\n html.Content('Total keywords: ' + str(len(kw_df))), html.Br(),\n html.Content('Unique Keywords: ' + str(kw_df['Keyword'].nunique())),\n html.Br(),\n html.Content('Ad Groups: ' + str(kw_df['Ad Group'].nunique()))]\n\n\n@app.callback(Output('output_df', 'data'),\n [Input('submit', 'n_clicks')],\n [State('products_table', 'value'),\n State('words_table', 'value'),\n State('match_types', 'value'),\n State('campaign_name', 'value'),\n State('order_matters', 'values')])\ndef generate_kw_df(button, products, words, match_types, campaign_name,\n order_matters):\n if any([x is None for x in [button, products, words, match_types,\n campaign_name, order_matters]]):\n raise PreventUpdate\n if button and products and words and match_types and campaign_name:\n logging.info(msg=locals())\n\n if products and words:\n product_list = list({x.strip() for x in products.split('\\n') if x})\n if '' in product_list:\n product_list.remove('')\n word_list = list({x.strip() for x in words.split('\\n')})\n final_df = adv.kw_generate(product_list, word_list,\n match_types=match_types,\n order_matters=bool(order_matters),\n campaign_name=campaign_name)\n final_df['#'] = list(range(1, len(final_df) + 1))\n return final_df.to_dict('rows')\n\n@app.callback(Output('download_link', 'href'),\n [Input('output_df', 'data')])\ndef download_df(data_df):\n df = pd.DataFrame.from_dict(data_df, 'columns')\n df = df.drop('#', axis='columns')\n csv_string = df.to_csv(index=False, encoding='utf-8')\n csv_string = \"data:text/csv;charset=utf-8,\" + quote(csv_string)\n return csv_string\n\n\n@app.callback(Output('download', 'children'),\n [Input('download_link', 'n_clicks')])\ndef register_file_downloads(n_clicks):\n if n_clicks:\n logging.info(str(n_clicks) + '_file_download')\n\n\n@app.callback(Output('submit', 'style'),\n [Input('products_table', 'value'),\n Input('words_table', 'value'),\n Input('match_types', 'value'),\n Input('campaign_name', 'value')])\ndef show_submit_button(products, words, match_types, campaign_name):\n if products is None and words is None:\n raise PreventUpdate\n if products and words:\n return {'display': 'inline'}\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"eliasdabbas/advertools_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9048,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"13119778239","text":"class TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef buildBinaryTree(nums, index):\n if index >= len(nums) or nums[index] is None:\n return None\n\n root = TreeNode(nums[index])\n root.left = buildBinaryTree(nums, 2 * index + 1) # Recursive call for left child\n root.right = buildBinaryTree(nums, 2 * index + 2) # Recursive call for right child\n return root\n\n\nclass Solution(object):\n def inorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if root is None:\n return []\n\n result = []\n result.extend(self.inorderTraversal(root.left))\n result.append(root.val)\n result.extend(self.inorderTraversal(root.right))\n return result\n\n\ndef main():\n # Test cases: root = [1,null,2,3], output = [1,3,2]\n # root = [], output = []\n # root = [1], output = [1]\n nums = [3,1,2]\n root = buildBinaryTree(nums, 0)\n\n solution = Solution()\n print(solution.inorderTraversal(root))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Kaaykun/Leetcode","sub_path":"Python/094_BinaryTreeTraversal.py","file_name":"094_BinaryTreeTraversal.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42751540494","text":"class Stock:\n def __getattribute__(self, item):\n print(\"You have approached to:\",item, \"object\")\n\ns = Stock()\n\n# myItem is delivered to magic method __getattribute__ \n# as an item parameter.\ns.myItem\ns.wow\ns.newlyCreatedAttr","repo_name":"asunlabs/mycodebox-monorepo","sub_path":"languages/python/basics/attr/attr.py","file_name":"attr.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16609652433","text":"# encoding: utf-8\n\"\"\"\n@author: wanglixiang\n@contact: lixiangwang9705@gmail.com\n\"\"\"\n\n## 3.\nclass ReorderLink:\n @staticmethod\n def find_mid_node(head):\n if head is None or head.next is None:\n return head\n fast = head\n slow = head\n prev = slow\n while fast is not None and fast.next is not None:\n fast = fast.next.next\n prev = slow\n slow = slow.next\n prev.next = None\n return slow \n\n @staticmethod\n def reverse(head): \n p = head\n if p is None or p.next is None:\n return p\n q = p\n p = p.next\n while p is not None:\n tmp = p.next\n p.next = q\n if q == head:\n q.next = None\n q = p\n p = tmp\n return q\n\n @staticmethod\n def reorder(head1, head2):\n cur1 = head1\n cur2 = head2\n tmp = None\n while cur1.next is not None:\n tmp = cur1.next\n cur1.next = cur2\n cur1 = tmp\n\n tmp = cur2.next\n cur2.next = cur1\n cur2 = tmp\n\n cur1.next = cur2 \n\n def __call__(self, link_head):\n before = link_head\n rest =self.find_mid_node(before)\n rest = self.reverse(rest) \n self.reorder(before, rest)\n return link_head\n \n \nclass Node: \n def __init__(self, data=None):\n self.data = data\n self.next = None\n\nclass LinkList: \n def __init__(self):\n self.head = None\n\n def append(self, x):\n if self.head is None:\n self.head = Node(x)\n return self\n p = self.head\n while p.next is not None:\n p = p.next\n p.next = Node(x)\n return self\n \nlink1 = LinkList()\nlink1.append(1).append(2).append(3).append(4).append(5).append(6)\n\nhead9 = link1.head\n\np = head9\nprint(\"排序前: \", end=\" \")\nwhile p is not None:\n print(p.data, end=\"\\t\")\n p = p.next\n\ninstance = ReorderLink() \nhead9 = instance(head9) \nprint()\n\np = head9\nprint(\"排序后: \", end=\" \")\nwhile p is not None:\n print(p.data, end=\"\\t\")\n p = p.next","repo_name":"lixiangwang/SEU---Data-structure-and-algorithm-design","sub_path":"线性结构/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"74187330555","text":"import os\nimport shutil\n\nget=os.getcwd()\nno_fold=10\ndest='/home/chinmay/7jan2022backup11/mytemp1/'\n\nfor i in range(1,no_fold+1):\n os.chdir(str(i))\n shutil.copy('dcsq_no_'+str(i),dest+str(i))\n shutil.copy('dr_no_'+str(i),dest+str(i))\n os.chdir(get)\n print(os.getcwd())\n","repo_name":"chinmaypradhan139/UcorrectedmodifiedIESH","sub_path":"exp_corrected_Uversion/extrafiles/suptrans.py","file_name":"suptrans.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34364811822","text":"from hudescriptor import huDescriptor\nfrom colordescriptor import ColorDescriptor\nimport glob\nimport cv2\nimport os\n\nhu = huDescriptor()\ncd = ColorDescriptor((8, 12, 3))\n\nhuOutput = open(\"conf/huFeatures.csv\", \"w\")\ncolorOutput = open(\"conf/colorFeatures.csv\", \"w\")\n\nfor imagePath in glob.glob(\"static/datasets/*\"):\n imageID = os.path.basename(imagePath)\n image = cv2.imread(imagePath)\n\n hufeatures = hu.huMoments(image)\n colorFeatures = cd.describe(image)\n\n hufeatures = [str(abs(float(f))) for f in hufeatures]\n colorFeatures = [str(float(cf)) for cf in colorFeatures]\n \n huOutput.write(\"%s,%s\\n\" % (imageID, \",\".join(hufeatures)))\n colorOutput.write(\"%s,%s\\n\" % (imageID, \",\".join(colorFeatures)))\n\nhuOutput.close()\ncolorOutput.close()\n\nprint(\"Configuracion realizada\")\n","repo_name":"fabirian/CBIR","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38831809640","text":"# -*- coding:utf-8 -*-\n\n# base\n# https://linux.thai.net/~thep/datrie/datrie.html\n# http://jorbe.sinaapp.com/2014/05/11/datrie/\n# http://www.hankcs.com/program/java/%E5%8F%8C%E6%95%B0%E7%BB%84trie%E6%A0%91doublearraytriejava%E5%AE%9E%E7%8E%B0.html\n# (komiya-atsushi/darts-java | 先建立Trie树,再构造DAT,为siblings先找到合适的空间)\n# https://blog.csdn.net/kissmile/article/details/47417277\n# http://nark.cc/p/?p=1480\n# https://github.com/midnight2104/midnight2104.github.io/blob/58b5664b3e16968dd24ac5b1b3f99dc21133b8c4/_posts/2018-8-8-%E5%8F%8C%E6%95%B0%E7%BB%84Trie%E6%A0%91(DoubleArrayTrie).md\n\n# 不需要构造真正的Trie树,直接用字符串,构造对应node,因为words是排过序的\n# todo : error info\n# todo : performance test\n# todo : resize\n# warning: code=0表示叶子节点可能会有隐患(正常词汇的情况下是ok的)\n# 修正: 由于想要回溯字符串的效果,叶子节点和base不能重合(这样叶子节点可以继续记录其他值比如频率),叶子节点code: 0->-1\n# 但是如此的话,叶子节点可能会与正常节点冲突? 找begin的使用应该是考虑到的?\n# from __future__ import print_function\nclass DATrie(object):\n class Node(object):\n\n def __init__(self, code, depth, left, right):\n self.code = code\n self.depth = depth\n self.left = left\n self.right = right\n\n def __init__(self):\n self.MAX_SIZE = 2097152 # 65536 * 32\n self.base = [0] * self.MAX_SIZE\n self.check = [-1] * self.MAX_SIZE # -1 表示空\n self.used = [False] * self.MAX_SIZE\n self.nextCheckPos = 0 # 详细 见后面->当数组某段使用率达到某个值时记录下可用点,以便下次不再使用\n self.size = 0 # 记录总共用到的空间\n\n # 需要改变size的时候调用,这里只能用于build之前。cuz没有打算复制数据.\n def resize(self, size):\n self.MAX_SIZE = size\n self.base = [0] * self.MAX_SIZE\n self.check = [-1] * self.MAX_SIZE\n self.used = [False] * self.MAX_SIZE\n\n # 先决条件是self.words ordered 且没有重复\n # siblings至少会有一个\n def fetch(self, parent): ###获取parent的孩子,存放在siblings中,并记录下其左右截至\n depth = parent.depth\n\n siblings = [] # size == parent.right-parent.left\n i = parent.left\n while i < parent.right: # 遍历所有子节点,right-left+1个单词\n s = self.words[i][depth:] # 词的后半部分\n if s == '':\n siblings.append(\n self.Node(code=-1, depth=depth + 1, left=i, right=i + 1)) # 叶子节点\n else:\n c = ord(s[0]) # 字符串中每个汉字占用3个字符(code,实际也就当成符码),将每个字符转为数字 ,树实际是用这些数字构建的\n # print type(s[0]),c\n if siblings == [] or siblings[-1].code != c:\n siblings.append(\n self.Node(code=c, depth=depth + 1, left=i, right=i + 1)) # 新建节点\n else: # siblings[-1].code == c\n siblings[-1].right += 1 # 已经是排过序的可以直接计数+1\n i += 1\n # siblings\n return siblings\n\n # 在insert之前,认为可以先排序词汇,对base的分配检查应该是有利的\n # 先构建树,再构建DAT,再销毁树\n def build(self, words):\n words = sorted(list(set(words))) # 去重排序\n # for word in words:print word.decode('utf-8')\n self.words = words\n # todo: 销毁_root\n _root = self.Node(code=0, depth=0, left=0, right=len(self.words)) # 增加第一个节点\n self.base[0] = 1\n siblings = self.fetch(_root)\n # for ii in words: print ii.decode('utf-8')\n # print 'siblings len',len(siblings)\n # for i in siblings: print i.code\n self.insert(siblings, 0) # 插入根节点的第一层孩子\n # while False: # 利用队列来实现非递归构造\n # pass\n del self.words\n print(\"DATrie builded.\")\n\n def insert(self, siblings, parent_base_idx):\n \"\"\" parent_base_idx为父节点base index, siblings为其子节点们 \"\"\"\n # 暂时按komiya-atsushi/darts-java的方案\n # 总的来讲是从0开始分配beigin]\n # self.used[parent_base_idx] = True\n\n begin = 0\n pos = max(siblings[0].code + 1, self.nextCheckPos) - 1 # 从第一个孩子的字符码位置开始找,因为排过序,前面的都已经使用\n nonzero_num = 0 # 非零统计\n first = 0\n\n begin_ok_flag = False # 找合适的begin\n while not begin_ok_flag:\n pos += 1\n if pos >= self.MAX_SIZE:\n raise Exception(\"no room, may be resize it.\")\n if self.check[pos] != -1 or self.used[pos]: # check——check数组,used——占用标记,表明pos位置已经占用\n nonzero_num += 1 # 已被使用\n continue\n elif first == 0:\n self.nextCheckPos = pos # 第一个可以使用的位置,记录?仅执行一遍\n first = 1\n\n begin = pos - siblings[0].code # 第一个孩子节点对应的begin\n\n if begin + siblings[-1].code >= self.MAX_SIZE:\n raise Exception(\"no room, may be resize it.\")\n\n if self.used[begin]: # 该位置已经占用\n continue\n\n if len(siblings) == 1: # 只有一个节点\n begin_ok_flag = True\n break\n\n for sibling in siblings[1:]:\n if self.check[begin + sibling.code] == -1 and self.used[\n begin + sibling.code] is False: # 对于sibling,begin位置可用\n begin_ok_flag = True\n else:\n begin_ok_flag = False # 用一个不可用,则begin不可用\n break\n\n # 得到合适的begin\n\n # -- Simple heuristics --\n # if the percentage of non-empty contents in check between the\n # index 'next_check_pos' and 'check' is greater than some constant value\n # (e.g. 0.9), new 'next_check_pos' index is written by 'check'.\n\n # 从位置 next_check_pos 开始到 pos 间,如果已占用的空间在95%以上,下次插入节点时,直接从 pos 位置处开始查找成功获得这一层节点的begin之后得到,影响下一次执行insert时的查找效率\n if (nonzero_num / (pos - self.nextCheckPos + 1)) >= 0.95:\n self.nextCheckPos = pos\n\n self.used[begin] = True\n\n # base[begin] 记录 parent chr -- 这样就可以从节点回溯得到字符串\n # 想要可以回溯的话,就不能在字符串末尾节点记录值了,或者给叶子节点找个0以外的值? 0->-1\n # self.base[begin] = parent_base_idx #【*】\n # print 'begin:',begin,self.base[begin]\n\n if self.size < begin + siblings[-1].code + 1:\n self.size = begin + siblings[-1].code + 1\n\n for sibling in siblings: # 更新所有子节点的check base[s]+c=t & check[t]=s\n self.check[begin + sibling.code] = begin\n\n for sibling in siblings: # 由于是递归的情况,需要先处理完check\n # darts-java 还考虑到叶子节点有值的情况,暂时不考虑(需要记录的话,记录在叶子节点上)\n if sibling.code == -1:\n self.base[begin + sibling.code] = -1 * sibling.left - 1\n else:\n new_sibings = self.fetch(sibling)\n h = self.insert(new_sibings, begin + sibling.code) # 插入孙子节点,begin + sibling.code为子节点的位置\n self.base[begin + sibling.code] = h # 更新base所有子节点位置的转移基数为[其孩子最合适的begin]\n\n return begin\n\n def search(self, word):\n \"\"\" 查找单词是否存在 \"\"\"\n p = 0 # root\n if word == '':\n return False\n for c in word:\n c = ord(c)\n next = abs(self.base[p]) + c\n # print(c, next, self.base[next], self.check[next])\n if next > self.MAX_SIZE: # 一定不存在\n return False\n # print(self.base[self.base[p]])\n if self.check[next] != abs(self.base[p]):\n return False\n p = next\n\n # print('*'*10+'\\n', 0, p, self.base[self.base[p]], self.check[self.base[p]])\n # 由于code=0,实际上是base[leaf_node->base+leaf_node.code],这个负的值本身没什么用\n # 修正:left code = -1\n if self.base[self.base[p] - 1] < 0 and self.base[p] == self.check[self.base[p] - 1]:\n # print p\n return True\n else: # 不是词尾\n return False\n\n def common_prefix_search(self, content):\n \"\"\" 公共前缀匹配 \"\"\"\n # 用了 darts-java 写法,再仔细看一下\n result = []\n b = self.base[0] # 从root开始\n p = 0\n n = 0\n tmp_str = \"\"\n for c in content:\n c = ord(c)\n p = b\n n = self.base[p - 1] # for iden leaf\n\n if b == self.check[p - 1] and n < 0:\n result.append(tmp_str)\n\n tmp_str += chr(c)\n # print(tmp_str )\n p = b + c # cur node\n\n if b == self.check[p]:\n b = self.base[p] # next base\n else: # no next node\n return result\n\n # 判断最后一个node\n p = b\n n = self.base[p - 1]\n\n if b == self.check[p - 1] and n < 0:\n result.append(tmp_str)\n\n return result\n\n def Find_Last_Base_index(self, word):\n b = self.base[0] # 从root开始\n p = 0\n # n = 0\n # print len(word)\n tmp_str = \"\"\n for c in word:\n c = ord(c)\n p = b\n p = b + c # cur node, p is new base position, b is the old\n\n if b == self.check[p]:\n tmp_str += chr(c)\n b = self.base[p] # next base\n else: # no next node\n return -1\n # print '====', p, self.base[p], tmp_str.decode('utf-8')\n return p\n\n def GetAllChildWord(self, index):\n result = []\n # result.append(\"\")\n # print self.base[self.base[index]-1],'++++'\n if self.base[self.base[index] - 1] <= 0 and self.base[index] == self.check[self.base[index] - 1]:\n result.append(\"\")\n # return result\n for i in range(0, 256):\n # print(chr(i))\n if self.check[self.base[index] + i] == self.base[index]:\n # print self.base[index],(chr(i)),i\n for s in self.GetAllChildWord(self.base[index] + i):\n # print s\n result.append(chr(i) + s)\n return result\n\n def FindAllWords(self, word):\n result = []\n last_index = self.Find_Last_Base_index(word)\n if last_index == -1:\n return result\n for end in self.GetAllChildWord(last_index):\n result.append(word + end)\n return result\n\n def get_string(self, chr_id):\n \"\"\" 从某个节点返回整个字符串, todo:改为私有 \"\"\"\n if self.check[chr_id] == -1:\n raise Exception(\"不存在该字符。\")\n child = chr_id\n s = []\n while 0 != child:\n base = self.check[child]\n print(base, child)\n label = chr(child - base)\n s.append(label)\n print(label)\n child = self.base[base]\n return \"\".join(s[::-1])\n\n def get_use_rate(self):\n \"\"\" 空间使用率 \"\"\"\n return self.size / self.MAX_SIZE\n\n\nif __name__ == '__main__':\n\n\n # for word in words:print [word] #一个汉字的占用3个字符,\n words = []\n for line in open('/data/ylx/ylx/data/entities.txt').readlines():\n # #print line.strip().decode('utf-8')\n words.append(line.strip())\n\n datrie = DATrie()\n datrie.build(words)\n # for line in open('1000.txt').readlines():\n # print(datrie.search(line.strip()),end=' ')\n # print('-'*10)\n # print(datrie.search(\"景华路\"))\n # print('-'*10)\n # print(datrie.search(\"景华路号\"))\n\n # print('-'*10)\n # for item in datrie.common_prefix_search(\"商业模式\"): print(item.decode('utf-8'))\n # for item in datrie.common_prefix_search(\"商业模式\"):print item.decode('utf-8')\n # print(datrie.common_prefix_search(\"一举成名天下知\"))\n # print(datrie.base[:1000])\n # print('-'*10)\n # print(datrie.get_string(21520))\n # index=datrie.Find_Last_Base_index(\"商业\")\n # print(index),'-=-=-='\n # print datrie.search(\"商业\"),datrie.search(\"商业\"),datrie.search(\"商业模式\")\n # print index, datrie.check[datrie.base[index]+230],datrie.base[index]\n for ii in datrie.FindAllWords('小红帽特工队的续作是?'):\n print (ii.decode('utf-8'))\n # print(datrie.Find_Last_Base_index(\"一举\")[2].decode('utf-8'))\n# print()","repo_name":"sssirus/QA-keras","sub_path":"ner/TrieTree.py","file_name":"TrieTree.py","file_ext":"py","file_size_in_byte":13213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"39561743774","text":"# Lúcio v2\n# Let's break it down\n# @author Hex#8998 and Logiwire#5452\n# @version 2.0.0\n\nimport discord, yaml, glob\nfrom os.path import dirname, basename, isfile, join\nimport partybot.manager\nimport partybot.queuer\nimport atexit\nimport os\n\nclass Lucio(discord.Client):\n po = None\n config = None\n manager = None\n channel = None\n now_playing = None\n active_voice_channel = None\n _queuer = None\n\n # load the bot\n def load(self):\n print(\"Launching Lúcio v2 - by @hex#8998 and @logiwire#5452\\n\")\n self.load_config()\n self.manager = partybot.manager.CommandRouter(self)\n\n # create required directories\n try:\n os.mkdir(\"cache\")\n except FileExistsError: # file already exists\n pass\n\n # load required variables\n self._queuer = partybot.queuer.Queuer(self)\n \n # method that gets executed when the bot is ready\n async def on_ready(self):\n for guild in self.guilds:\n print(\"Logged in as\",self.user,\"in\",guild.name)\n try: # let's disconnect from the voice channel. # IMPORTANT: ALLOWS PLAYING MUSIC AFTER REBOOTING THE BOT\n await (await discord.VoiceChannel.connect(guild.me.voice.channel)).disconnect()\n except Exception:\n pass\n #await self.get_channel(int(self.config[\"default-channel\"])).send(\"Howdy everyone! :wave: I'm online. Go ahead and summon me in your channel using **\" + self.config[\"prefix\"] + \"join**\")\n\n\n try:\n self.active_voice_channel = await discord.VoiceChannel.connect(self.get_channel(int(self.config[\"default-voice-channel\"])),timeout=1.0) # join user channel\n self.queuer.load_queue()\n await self.queuer.play_queue() # play music\n except AttributeError as e:\n if \"'NoneType' object has no attribute 'channel'\" in str(e):\n print(\"ERROR: Cannot join channel id\",self.config[\"default-voice-channel\"])\n return\n else:\n raise e\n\n # redirects to the message handler\n async def on_message(self, msg):\n if msg.content.startswith(self.config[\"prefix\"]):\n print(msg.author,\"said:\",msg.content)\n if msg.author == self.user:\n print(\"Bot said:\",msg.content)\n return\n await self.manager.handle(msg)\n\n # loads the configuration file\n def load_config(self):\n if self.config is None:\n self.file = open(\"config.yml\")\n self.config = yaml.load(self.file, Loader=yaml.FullLoader)\n\n # sets last video played\n def set_lvp(self, lvp):\n with open(\"lvp\", \"w+\") as file:\n file.write(str(lvp))\n\n def at_exit(self):\n self.now_playing.cleanup()\n\n @property\n def queuer(self):\n return self._queuer\n\n @property\n def lvp(self):\n with open(\"lvp\", \"r\") as file:\n return file.read()\n\n \n\nlucio = Lucio()\nlucio.load()\nlucio.run(lucio.config[\"token\"])","repo_name":"shamelin/partybot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"30318360494","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2022-02-03 23:24\n# @Author : wuyingwen\n# @Contact : wuyingwen66@163.com\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer, Dense\n\nclass Res_layer(Layer):\n\tdef __init__(self, hidden_unit, embed_layers_len):\n\t\tsuper(Res_layer, self).__init__()\n\t\tself.dense_layer = Dense(hidden_unit, activation='relu')\n\t\tself.output_layer = Dense(embed_layers_len, activation=None)\n\n\tdef call(self, inputs, **kwargs):\n\t\tx1 = self.dense_layer(inputs)\n\t\tx2 = self.output_layer(x1)\n\t\toutputs = tf.nn.relu(x2 + inputs)\n\t\treturn outputs\n\n\n","repo_name":"WUYvonne/model_zoo","sub_path":"DeepCrossing/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"29831548460","text":"import logging\nfrom typing import List\n\nfrom pedrec.models.experiments.experiment_description import ExperimentDescription\nfrom pedrec.training.experiments.experiment_train_helper import EpochValidationResults\nfrom pedrec.utils.string_helper import get_seconds_as_string\n\n\ndef results_to_csv(epoch_validation_results: List[EpochValidationResults]):\n header_fields = get_header_fields(epoch_validation_results)\n output = f\"{','.join(header_fields)}\\n\"\n for epoch_results in epoch_validation_results:\n output += ','.join(get_epoch_values(epoch_results)) + \"\\n\"\n return output\n\n\ndef results_to_md_table(epoch_validation_results: List[EpochValidationResults]):\n header_fields = get_header_fields(epoch_validation_results)\n output = f\"| {' | '.join(header_fields)} |\\n\"\n output += f\"|\"\n for field in header_fields:\n output += f\" {'-' * len(field)} |\"\n output += \"\\n\"\n for epoch_results in epoch_validation_results:\n values = get_epoch_values(epoch_results)\n output += f\"|\"\n for field, value in zip(header_fields, values):\n output += f\" {value.ljust(len(field))} |\"\n output += \"\\n\"\n return output\n\n\ndef log_epoch_validation_results(results: EpochValidationResults):\n csv = ','.join(get_epoch_values(results))\n logger = logging.getLogger(__name__)\n logger.info(csv)\n\n\ndef get_header_fields(epoch_validation_results: List[EpochValidationResults]) -> List[str]:\n fields = []\n fields.append(\"Epoch\")\n fields.append(\"Train Loss\")\n for val_set_name, result in epoch_validation_results[0].validation_results.items():\n if result.pose2d_pck is not None:\n fields.append(f\"{val_set_name} PCK@0.05\")\n fields.append(f\"{val_set_name} PCK@0.2\")\n if result.pose3d is not None:\n fields.append(f\"{val_set_name} MPJPE\")\n fields.append(f\"{val_set_name} MRCJP\")\n fields.append(f\"{val_set_name} MRCD\")\n if result.pose2d_conf is not None:\n fields.append(f\"{val_set_name} JointAcc\")\n if result.orientation is not None:\n fields.append(f\"{val_set_name} Body Sph.Dist.\")\n fields.append(f\"{val_set_name} Body.Phi.Ang.Dist.\")\n fields.append(f\"{val_set_name} Head Sph.Dist.\")\n fields.append(f\"{val_set_name} Head.Phi.Ang.Dist.\")\n if result.env_position is not None:\n fields.append(f\"{val_set_name} EnvPosDistMM.\")\n fields.append(f\"{val_set_name} Val Loss\")\n fields.append(f\"{val_set_name} Val Time\")\n fields.append(\"Train Time\")\n return fields\n\n\ndef get_epoch_values(epoch_results: EpochValidationResults) -> List[str]:\n values: List[str] = []\n values.append(f\"{epoch_results.epoch}\")\n values.append(f\"{epoch_results.train_loss:.5f}\")\n for val_set_name, result in epoch_results.validation_results.items():\n if result.pose2d_pck is not None:\n values.append(f\"{result.pose2d_pck.pck_05_mean:.2f}\")\n values.append(f\"{result.pose2d_pck.pck_2_mean:.2f}\")\n if result.pose3d is not None:\n values.append(f\"{result.pose3d.mpjpe_mean:.2f}\")\n values.append(f\"{result.pose3d.pct_correct_joint_position_mean:.2f}\")\n values.append(f\"{result.pose3d.pct_correct_depth_mean:.2f}\")\n if result.pose2d_conf is not None:\n values.append(f\"{result.pose2d_conf.conf_acc:.2f}\")\n if result.orientation is not None:\n values.append(f\"{result.orientation.body.spherical_distance:.2f}\")\n values.append(f\"{result.orientation.body.angle_error_phi:.2f}\")\n values.append(f\"{result.orientation.head.spherical_distance:.2f}\")\n values.append(f\"{result.orientation.head.angle_error_phi:.2f}\")\n if result.env_position is not None:\n values.append(f\"{result.env_position.distance_mm:.2f}\")\n values.append(f\"{result.loss:.4f}\")\n values.append(f\"{get_seconds_as_string(result.val_duration)}\")\n values.append(f\"{get_seconds_as_string(epoch_results.train_time)}\")\n return values\n\n\ndef get_experiment_protocol(experiment_description: ExperimentDescription):\n md = f\"# {experiment_description.net_name}\\n\" \\\n f\"## Trial Name\\n\" \\\n f\"**{experiment_description.experiment_name}**\\n\" \\\n f\"## Initialization\\n\" \\\n f\"| {'Network Part'.ljust(30)} | {'Initialization'.ljust(60)} |\\n\" \\\n f\"| {'-' * 30} | {'-' * 60} |\\n\"\n for network_part in experiment_description.net_layer_names:\n md += f\"| {network_part.ljust(30)} | {'TODO'.ljust(60)} |\\n\"\n\n md += f\"Notes. {experiment_description.initialization_notes}\\n\" \\\n f\"## Datasets\\n\" \\\n f\"### Training\\n\" \\\n f\"| {'Dataset'.ljust(25)} | Subsampling | Full set length | Used length |\\n\" \\\n f\"| {'-' * 25} | {'-' * 11} | {'-' * 15} | {'-' * 11} |\\n\"\n for dataset in experiment_description._train_sets:\n md += f\"| {dataset.name.ljust(25)} | {str(dataset.subsampling).ljust(11)} | {str(dataset.full_length).ljust(15)} | {str(dataset.used_length).ljust(11)} |\\n\"\n md += f\"### Validation\\n\" \\\n f\"| {'Dataset'.ljust(25)} | Subsampling | Full set length | Used length |\\n\" \\\n f\"| {'-' * 25} | {'-' * 11} | {'-' * 15} | {'-' * 11} |\\n\"\n for dataset in experiment_description._val_sets:\n md += f\"| {dataset.name.ljust(25)} | {str(dataset.subsampling).ljust(11)} | {str(dataset.full_length).ljust(15)} | {str(dataset.used_length).ljust(11)} |\\n\"\n md += f\"## Augmentation\\n\" \\\n f\"### Training Augmentations - COCO\\n\" \\\n f\"| {'Augmentation'.ljust(25)} | {'Value'.ljust(25)} |\\n\" \\\n f\"| {'-' * 25} | {'-' * 25} |\\n\" \\\n f\"| {'Scale'.ljust(25)} | {str(experiment_description.coco_train_dataset_cfg.scale_factor).ljust(25)} |\\n\" \\\n f\"| {'Flip'.ljust(25)} | {str(experiment_description.coco_train_dataset_cfg.flip).ljust(25)} |\\n\" \\\n f\"| {'Rotate'.ljust(25)} | {str(experiment_description.coco_train_dataset_cfg.rotation_factor).ljust(25)} |\\n\" \\\n f\"### Training Augmentations - SIM\\n\" \\\n f\"| {'Augmentation'.ljust(25)} | {'Value'.ljust(25)} |\\n\" \\\n f\"| {'-' * 25} | {'-' * 25} |\\n\" \\\n f\"| {'Scale'.ljust(25)} | {str(experiment_description.sim_train_dataset_cfg.scale_factor).ljust(25)} |\\n\" \\\n f\"| {'Flip'.ljust(25)} | {str(experiment_description.sim_train_dataset_cfg.flip).ljust(25)} |\\n\" \\\n f\"| {'Rotate'.ljust(25)} | {str(experiment_description.sim_train_dataset_cfg.rotation_factor).ljust(25)} |\\n\" \\\n f\"### Training Augmentations - H36M\\n\" \\\n f\"| {'Augmentation'.ljust(25)} | {'Value'.ljust(25)} |\\n\" \\\n f\"| {'-' * 25} | {'-' * 25} |\\n\" \\\n f\"| {'Scale'.ljust(25)} | {str(experiment_description.h36m_train_dataset_cfg.scale_factor).ljust(25)} |\\n\" \\\n f\"| {'Flip'.ljust(25)} | {str(experiment_description.h36m_train_dataset_cfg.flip).ljust(25)} |\\n\" \\\n f\"| {'Rotate'.ljust(25)} | {str(experiment_description.h36m_train_dataset_cfg.rotation_factor).ljust(25)} |\\n\"\n md += f\"## General data preparation\\n\" \\\n \"```python\\n\" \\\n \"trans = transforms.Compose([\\n\" \\\n \" transforms.ToTensor(),\\n\" \\\n \" transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\\n\" \\\n \"])\\n```\\n\" \\\n f\"suggested lr: {experiment_description.suggested_lr:.2e}\\n\"\n\n for round_num, round in enumerate(experiment_description.experiment_rounds):\n md += f\"## Round {round_num}\\n\" \\\n f\"- Epochs: **{round.num_epochs}**\\n\" \\\n f\"- LR Scheduler: **{round.scheduler.__class__.__name__}**\\n\" \\\n f\"### Optimizer\\n\" \\\n f\"| {'Property'.ljust(25)} | {'Value'.ljust(25)} |\\n\" \\\n f\"| {'-' * 25} | {'-' * 25} |\\n\" \\\n f\"| {'name'.ljust(25)} | {str(round.optimizer.__class__.__name__).ljust(25)} |\\n\"\n for prop, value in round.optimizer_parameters.items():\n md += f\"| {prop.ljust(25)} | {str(value).ljust(25)} |\\n\"\n md += f\"### LRs\\n\" \\\n f\"| {'Network Part'.ljust(25)} | {'LRs'.ljust(20)} | Frozen? |\\n\" \\\n f\"| {'-' * 25} | {'-' * 20} | {'-' * 7} |\\n\"\n for layer_idx, (layer, layer_name) in enumerate(zip(experiment_description.net_layers, experiment_description.net_layer_names)):\n lr1 = round.max_lrs[layer_idx * 2]\n lr2 = round.max_lrs[layer_idx * 2 + 1]\n md += f\"| {layer_name.ljust(25)} | {f'{lr1:.2e}, {lr2:.2e}'.ljust(20)} | {str(layer in round.frozen_layers).ljust(7)} |\\n\"\n md += f\"### Results\\n\" \\\n f\"{results_to_md_table(round.validation_results)}\"\n return md\n\n\ndef save_log(log: str, output_path: str):\n with open(output_path, \"w\") as f:\n f.write(log)","repo_name":"noboevbo/PedRec","sub_path":"pedrec/training/experiments/experiment_log_helper.py","file_name":"experiment_log_helper.py","file_ext":"py","file_size_in_byte":8816,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"96"} +{"seq_id":"21374715745","text":"import pyrebase\nimport socket\nfrom subprocess import PIPE, Popen\nimport time, sys\n\nHOST = ''\nPORT = 8134\nserverStatus = 0\n\nconfig = {\n\t\"apiKey\" : \"AIzaSyABjB5OCyfUh3YbhMkcKsYZmqWgJcyJybM\",\n\t\"authDomain\" : \"worapp-8bba7.firebaseapp.com\",\n\t\"databaseURL\" : \"https://worapp-8bba7.firebaseio.com\",\n\t\"storageBucket\" : \"worapp-8bba7.appspot.com\",\n\t\"messagingSenderId\" : \"786995450047\",\n\t\"serviceAccount\" : \"WORAPP.json\"\n}\n\nfirebase = pyrebase.initialize_app(config)\n\nauth = firebase.auth()\n\n#authenticate a user\nuser = auth.sign_in_with_email_and_password(\"pyserver@worapp-8bba7.iam.gserviceaccount.com\", \"d17aedfc4f85980f41ca34ab3c597b9e124e773c\")\n\ndb = firebase.database()\n\n# test = db.child(\"test\").get()\n# print(test)\n\ndef stream_handler(message):\n\tglobal p\n\tprint(message[\"event\"])\n\tprint(message[\"path\"])\n\tprint(message[\"data\"])\n\tif message[\"path\"] == b'/':\n\t\treturn;\n\tif serverStatus == 2:\n\t\tif message[\"data\"] == True:\n\t\t\tp.stdin.write(b'1\\n')\n\t\telse:\n\t\t\tp.stdin.write(b'0\\n');\n\t\tp.stdin.flush()\n\t\ttime.sleep(1);\n\t\tdata = p.stdout.readline()\n\t\tprint('Recieved: '+str(data));\n\nmy_stream = db.child(\"test\").stream(stream_handler)\n\np = Popen(['./server'], stdin=PIPE, stdout=PIPE)\n\ntime.sleep(2)\n\nout = p.stdout.readline()\nprint(out)\nif b'Error' in out:\n\tsys.exit()\n\nserverStatus = 1;\ntime.sleep(10);\n\nout = p.stdout.readline()\nprint(out)\nif b'Error' in out:\n\tsys.exit()\n\nserverStatus = 2;\ntime.sleep(1);\n\n# p.stdin.write(b'1\\n')\n# p.stdin.flush()\n# data = p.stdout.readline()\n# print(data)\n","repo_name":"WheelsOnRoll/PyreBase","sub_path":"pyserver.py","file_name":"pyserver.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7972724129","text":"# Measures Temperature, Humidity, Pressure\n# BME280 - Adafruit \n#Write the data to a file - a time column, temperature, humidity, pressure\n# look up Adafruit CircuitPython BME280 module\n# update code to use that module \n\nimport time\nimport board\nfrom adafruit_bme280 import basic as adafruit_bme280\ni2c = board.I2C() # uses board.SCL and board.SDA\nsensor= adafruit_bme280.Adafruit_BME280_I2C(i2c)\ntimes = []\ntemperatures = []\n\nstart_time = time.time()\nrun_time = 10\nstop_time = start_time + run_time\ncurrent_time = time.time()\nwhile current_time < stop_time:\n\tcurrent_time = time.time()\n\ttemp = sensor.temperature\n\tprint(temp)\n\ttimes.append(current_time)\n\ttemperature.append(temp)\n\ttime.sleep(1)\n\t\nprint(temperatures)\n","repo_name":"litsee/E11-Lab-Work","sub_path":"weather_daq.py","file_name":"weather_daq.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"13353453049","text":"from app import app\nfrom app.models.models import User,db\nfrom flask import Flask,request,render_template,redirect,url_for,flash,session\n\n\n\n@app.route(\"/user/\", methods=['GET', 'POST'])\ndef user(username):\n if not username:\n flash(\"Invalid username\", \"danger\")\n return redirect(url_for('home'))\n user=User.query.filter_by(username=username).first()\n if not user:\n flash(\"User not found\",'danger')\n return redirect(url_for('home'))\n if request.method == 'GET':\n return render_template(\"user.html\", user=user)","repo_name":"Morball/Marketplace","sub_path":"app/views/user/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14743935617","text":"from heapq import heappop, heappush\n\nfrom shapely.errors import TopologicalError\nfrom shapely.geometry import Point\n\n\nclass Cell:\n \"\"\"A `Cell`'s centroid property is a potential solution to finding the pole\n of inaccessibility for a given polygon. Rich comparison operators are used\n for sorting `Cell` objects in a priority queue based on the potential\n maximum distance of any theoretical point within a cell to a given\n polygon's exterior boundary.\n \"\"\"\n\n def __init__(self, x, y, h, polygon):\n self.x = x\n self.y = y\n self.h = h # half of cell size\n self.centroid = Point(x, y) # cell centroid, potential solution\n\n # distance from cell centroid to polygon exterior\n self.distance = self._dist(polygon)\n\n # max distance to polygon exterior within a cell\n self.max_distance = self.distance + h * 1.4142135623730951 # sqrt(2)\n\n # rich comparison operators for sorting in minimum priority queue\n def __lt__(self, other):\n return self.max_distance > other.max_distance\n\n def __le__(self, other):\n return self.max_distance >= other.max_distance\n\n def __eq__(self, other):\n return self.max_distance == other.max_distance\n\n def __ne__(self, other):\n return self.max_distance != other.max_distance\n\n def __gt__(self, other):\n return self.max_distance < other.max_distance\n\n def __ge__(self, other):\n return self.max_distance <= other.max_distance\n\n def _dist(self, polygon):\n \"\"\"Signed distance from Cell centroid to polygon outline. The returned\n value is negative if the point is outside of the polygon exterior\n boundary.\n \"\"\"\n inside = polygon.contains(self.centroid)\n distance = self.centroid.distance(polygon.exterior)\n for interior in polygon.interiors:\n distance = min(distance, self.centroid.distance(interior))\n if inside:\n return distance\n return -distance\n\n\ndef polylabel(polygon, tolerance=1.0):\n \"\"\"Finds pole of inaccessibility for a given polygon. Based on\n Vladimir Agafonkin's https://github.com/mapbox/polylabel\n\n Parameters\n ----------\n polygon : shapely.geometry.Polygon\n tolerance : int or float, optional\n `tolerance` represents the highest resolution in units of the\n input geometry that will be considered for a solution. (default\n value is 1.0).\n\n Returns\n -------\n shapely.geometry.Point\n A point representing the pole of inaccessibility for the given input\n polygon.\n\n Raises\n ------\n shapely.errors.TopologicalError\n If the input polygon is not a valid geometry.\n\n Example\n -------\n >>> from shapely import LineString\n >>> polygon = LineString([(0, 0), (50, 200), (100, 100), (20, 50),\n ... (-100, -20), (-150, -200)]).buffer(100)\n >>> polylabel(polygon, tolerance=10).wkt\n 'POINT (59.35615556364569 121.83919629746435)'\n \"\"\"\n if not polygon.is_valid:\n raise TopologicalError(\"Invalid polygon\")\n minx, miny, maxx, maxy = polygon.bounds\n width = maxx - minx\n height = maxy - miny\n cell_size = min(width, height)\n h = cell_size / 2.0\n cell_queue = []\n\n # First best cell approximation is one constructed from the centroid\n # of the polygon\n x, y = polygon.centroid.coords[0]\n best_cell = Cell(x, y, 0, polygon)\n\n # Special case for rectangular polygons avoiding floating point error\n bbox_cell = Cell(minx + width / 2.0, miny + height / 2, 0, polygon)\n if bbox_cell.distance > best_cell.distance:\n best_cell = bbox_cell\n\n # build a regular square grid covering the polygon\n x = minx\n while x < maxx:\n y = miny\n while y < maxy:\n heappush(cell_queue, Cell(x + h, y + h, h, polygon))\n y += cell_size\n x += cell_size\n\n # minimum priority queue\n while cell_queue:\n cell = heappop(cell_queue)\n\n # update the best cell if we find a better one\n if cell.distance > best_cell.distance:\n best_cell = cell\n\n # continue to the next iteration if we can't find a better solution\n # based on tolerance\n if cell.max_distance - best_cell.distance <= tolerance:\n continue\n\n # split the cell into quadrants\n h = cell.h / 2.0\n heappush(cell_queue, Cell(cell.x - h, cell.y - h, h, polygon))\n heappush(cell_queue, Cell(cell.x + h, cell.y - h, h, polygon))\n heappush(cell_queue, Cell(cell.x - h, cell.y + h, h, polygon))\n heappush(cell_queue, Cell(cell.x + h, cell.y + h, h, polygon))\n\n return best_cell.centroid\n","repo_name":"shapely/shapely","sub_path":"shapely/algorithms/polylabel.py","file_name":"polylabel.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","stars":3493,"dataset":"github-code","pt":"94"} +{"seq_id":"39444661427","text":"import pyodbc\nfrom indexing import index\nfrom collector import data_collector\n\nconnection = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=localhost\\SQLEXPRESS;Database=master;Trusted_Connection=yes;')\ncursor = connection.cursor()\npages = []\n \ndata = data_collector('04_2020.xlsx', (f'{i}' for i in range(1, 31)))\n#(f'{i}' for i in range(1, 31) make iteration number 1 more than page needed\ninternal_timer = 0\nitem_counter = 0\nfor group in data:\n item_counter += 1\n \n \n for i in range(0, len(group)):\n \n if i == 0:\n date = group[i]\n if i == 1:\n time = group[i]\n if i == 2:\n Tagindex = index(group[i])\n \n if i == 3:\n val = group[i]\n date_time = f'{date} {time}'\n print(f'{date_time}, {Tagindex}, {val} wrote')\n\n \n\n cursor.execute('''INSERT INTO TestDB.dbo.FloatTable (DateAndTime, TagIndex, val) VALUES (?, ?, ?)''', (date_time, Tagindex, val))\n \n internal_timer += 1 \n \n\n\n if internal_timer == 25:\n connection.commit()\n internal_timer = 0 \n\nconnection.commit() \n \n\n \nprint(item_counter,'items wrote')\n","repo_name":"rains-t/SQL-API","sub_path":"SQL_API.py","file_name":"SQL_API.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35848244538","text":"# mobile numbers\n# https://www.hackerrank.com/challenges/standardize-mobile-number-using-decorators/problem\n# Let's dive into decorators! You are given mobile numbers.\n# Sort them in ascending order then print them in the standard format shown below:\n# +91 xxxxx xxxxx\n# The given mobile numbers may have +91, 91 or 0 written before the actual digit number.\n# Alternatively, there may not be any prefix at all.\n# Input Format\n# The first line of input contains an integer, the number of mobile phone numbers.\n# lines follow each containing a mobile number.\n# Output Format\n# Print mobile numbers on separate lines in the required format.\n#\n# Sample Input\n# 3\n# 07895462130\n# 919875641230\n# 9195969878\n#\n# Sample Output\n# +91 78954 62130\n# +91 91959 69878\n# +91 98756 41230\n\n\ndef phones_fixer(func):\n def wrapper(nlist):\n result_list = []\n for numbr in nlist:\n result = list(numbr)\n if '+91' in numbr:\n if 10 < len(numbr) < 12:\n result.insert(3, ' ')\n result.insert(-5, ' ')\n else:\n return 'The number is not correct'\n elif len(numbr) == 11:\n result.insert(0, '+')\n result.insert(1, '9')\n result.insert(2, '1')\n result.insert(3, ' ')\n result.remove(result[4])\n result.insert(-5, ' ')\n elif len(numbr) == 12:\n result.insert(0, '+')\n result.insert(3, ' ')\n result.insert(-5, ' ')\n elif len(numbr) == 10:\n result.insert(0, '+')\n result.insert(1, '9')\n result.insert(2, '1')\n result.insert(3, ' ')\n result.insert(-5, ' ')\n else:\n return 'The number is not correct'\n result_list.append(''.join(result))\n return func(result_list)\n return wrapper\n\n\n@phones_fixer\ndef sort_numbers(numbers_list):\n return '\\n'.join(sorted(numbers_list))\n\n\ndef read_numbers():\n n = int(input('Количество номеров: '))\n numbers = []\n for i in range(n):\n number = input('Введите номер: ')\n numbers.append(number)\n return numbers\n\n\nif __name__ == '__main__':\n numbers = read_numbers()\n print(sort_numbers(numbers))\n","repo_name":"Vladyslav92/Python_HW","sub_path":"lesson_8/1_task.py","file_name":"1_task.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27666402355","text":"from tools.seq import *\nimport glob\nimport argparse\n\n# Convert .py to .json file for faster loading\ndef tojson(fname):\n cc = {'title' :'TITLE', \n 'descset' :None, \n 'instrset':None, \n 'crc' :None}\n seq = 'from tools.seq import *\\n'\n seq += open(fname).read()\n exec(compile(seq, fname, 'exec'), {}, cc)\n encoding = [len(cc['instrset'])]\n for instr in cc['instrset']:\n encoding = encoding + instr.encoding()\n\n config = {'title' :cc['title'],\n 'descset' :cc['descset'],\n 'encoding':encoding}\n ofile = fname.replace('.py','.json')\n open(ofile,mode='w').write(json.dumps(config))\n\ndef main():\n parser = argparse.ArgumentParser(description='convert .py to .json')\n parser.add_argument(\"-d\", \"--dir\" , required=True , help=\"directory\")\n args = parser.parse_args()\n\n for p in glob.iglob(args.dir+'/*.py'):\n print('Converting {}'.format(p))\n tojson(p)\n\nif __name__=='__main__':\n main()\n","repo_name":"slaclab/lcls2-timing-patterns","sub_path":"tools/tojson.py","file_name":"tojson.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27669450043","text":"import csv\nimport io\nimport os\nimport re\nimport subprocess\nimport torch\nimport requests\nimport pandas as pd\nfrom openxai import dgp_synthetic\nfrom errno import EEXIST\nfrom typing import Any, List\nimport torch.utils.data as data\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom urllib.request import urlopen, urlretrieve\n# from xai_benchmark.dataset.Synthetic import dgp_synthetic\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\n\ndef download_file(url, filename):\n # Download the file from the URL\n subprocess.call([\"wget\", \"-O\", filename, url])\n\n with open(filename, \"r\") as f:\n data = f.read()\n\n # Detect the file format\n if '\\t' in data: # if the file is tab delimited\n # Convert the file to CSV format\n data = io.StringIO(data)\n reader = csv.reader(data, delimiter='\\t')\n output = io.StringIO()\n writer = csv.writer(output)\n for row in reader:\n writer.writerow(row)\n data = output.getvalue()\n\n # Save the file to disk\n with open(filename, 'w', newline='') as f:\n f.write(data)\n\n\nclass TabularDataLoader(data.Dataset):\n def __init__(self, path, filename, label, download=False, scale='minmax', gauss_params=None, file_url=None):\n \n \"\"\"\n Load training dataset\n :param path: string with path to training set\n :param label: string, column name for label\n :param scale: string; 'minmax', 'standard', or 'none'\n :param dict: standard params of gaussian dgp\n :return: tensor with training data\n \"\"\"\n\n self.path = path\n\n # Load Synthetic dataset\n if 'Synthetic' in self.path:\n \n '''\n if download:\n url = 'https://raw.githubusercontent.com/chirag126/data/main/'\n self.mkdir_p(path)\n file_download = url + 'dgp_synthetic.py'\n # import ipdb; ipdb.set_trace()\n urlretrieve(file_download, path + 'dgp_synthetic.py')\n\n if not os.path.isdir(path + 'dgp_synthetic.py'):\n raise RuntimeError(\"Dataset not found. You can use download=True to download it\") \n\n from openxai import dgp_synthetic\n \n '''\n\n if gauss_params is None:\n gauss_params = {\n 'n_samples': 2500,\n 'dim': 20,\n 'n_clusters': 10,\n 'distance_to_center': 5,\n 'test_size': 0.25,\n 'upper_weight': 1,\n 'lower_weight': -1,\n 'seed': 564,\n 'sigma': None,\n 'sparsity': 0.25\n }\n \n data_dict, data_dict_train, data_dict_test = dgp_synthetic.generate_gaussians(gauss_params['n_samples'],\n gauss_params['dim'],\n gauss_params['n_clusters'],\n gauss_params['distance_to_center'],\n gauss_params['test_size'],\n gauss_params['upper_weight'],\n gauss_params['lower_weight'],\n gauss_params['seed'],\n gauss_params['sigma'],\n gauss_params['sparsity']).dgp_vars()\n \n self.ground_truth_dict = data_dict\n self.target = label\n \n if 'train' in filename:\n data_dict = data_dict_train\n elif 'test' in filename:\n data_dict = data_dict_test\n else:\n raise NotImplementedError('The current version of DataLoader class only provides training and testing splits')\n \n self.dataset = pd.DataFrame(data_dict['data'])\n data_y = pd.DataFrame(data_dict['target'])\n \n names = []\n for i in range(gauss_params['dim']):\n name = 'x' + str(i)\n names.append(name)\n \n self.dataset.columns = names\n self.dataset['y'] = data_y\n \n # add additional Gaussian related aspects\n self.probs = data_dict['probs']\n self.masks = data_dict['masks']\n self.weights = data_dict['weights']\n self.masked_weights = data_dict['masked_weights']\n self.cluster_idx = data_dict['cluster_idx']\n \n else:\n if download:\n self.mkdir_p(path)\n if file_url is None:\n url = 'https://raw.githubusercontent.com/chirag126/data/main/'\n file_download = url + filename\n urlretrieve(file_download, path + filename)\n else:\n download_file(file_url, path + filename)\n\n if not os.path.isfile(path + filename):\n raise RuntimeError(\"Dataset not found. You can use download=True to download it\")\n\n self.dataset = pd.read_csv(path + filename)\n self.target = label\n\n # Save target and predictors\n self.X = self.dataset.drop(self.target, axis=1)\n \n # Save feature names\n self.feature_names = self.X.columns.to_list()\n self.target_name = label\n\n # Transform data\n if scale == 'minmax':\n self.scaler = MinMaxScaler()\n elif scale == 'standard':\n self.scaler = StandardScaler()\n elif scale == 'none':\n self.scaler = None\n else:\n raise NotImplementedError('The current version of DataLoader class only provides the following transformations: {minmax, standard, none}')\n \n if self.scaler is not None:\n self.scaler.fit_transform(self.X)\n self.data = self.scaler.transform(self.X)\n else:\n self.data = self.X.values\n self.targets = self.dataset[self.target]\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n\n # select correct row with idx\n if isinstance(idx, torch.Tensor):\n idx = idx.tolist()\n \n if 'Synthetic' in self.path:\n return (self.data[idx], self.targets.values[idx], self.weights[idx], self.masks[idx],\n self.masked_weights[idx], self.probs[idx], self.cluster_idx[idx])\n else:\n return (self.data[idx], self.targets.values[idx])\n\n def get_number_of_features(self):\n return self.data.shape[1]\n \n def get_number_of_instances(self):\n return self.data.shape[0]\n\n def mkdir_p(self, mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n try:\n os.makedirs(mypath)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and os.path.isdir(mypath):\n pass\n else:\n raise \n\n\ndef return_loaders(data_name, download=False, batch_size=32, transform=None, scaler='minmax', gauss_params=None):\n \n # Create a dictionary with all available dataset names\n dict = {\n 'adult': ('Adult', transform, 'income'),\n 'compas': ('COMPAS', transform, 'risk'),\n 'german': ('German_Credit_Data', transform, 'credit-risk'),\n 'heloc': ('Heloc', transform, 'RiskPerformance'),\n 'credit': ('Credit', transform, 'SeriousDlqin2yrs'),\n 'synthetic': ('Synthetic', transform, 'y'),\n 'rcdv': ('rcdv1980', transform, 'recid'),\n 'lending-club': ('lending-club', transform, 'loan_repaid'),\n 'student': ('student', transform, 'decision'),\n }\n\n urls = {\n 'rcdv-train': 'https://dataverse.harvard.edu/api/access/datafile/7093737',\n 'rcdv-test': 'https://dataverse.harvard.edu/api/access/datafile/7093739',\n 'lending-club-train': 'https://dataverse.harvard.edu/api/access/datafile/6767839',\n 'lending-club-test': 'https://dataverse.harvard.edu/api/access/datafile/6767838',\n 'student-train': 'https://dataverse.harvard.edu/api/access/datafile/7093733',\n 'student-test': 'https://dataverse.harvard.edu/api/access/datafile/7093734',\n }\n \n if dict[data_name][0] == 'synthetic':\n prefix = './data/' + dict[data_name][0] + '/'\n file_train = 'train'\n file_test = 'test'\n else:\n prefix = './data/' + dict[data_name][0] + '/'\n file_train = data_name + '-train.csv'\n file_test = data_name + '-test.csv'\n\n dataset_train = TabularDataLoader(path=prefix, filename=file_train,\n label=dict[data_name][2], scale=scaler,\n gauss_params=gauss_params, download=download,\n file_url=urls.get(file_train[:-4], None))\n\n dataset_test = TabularDataLoader(path=prefix, filename=file_test,\n label=dict[data_name][2], scale=scaler,\n gauss_params=gauss_params, download=download,\n file_url=urls.get(file_test[:-4], None))\n\n trainloader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)\n testloader = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)\n \n return trainloader, testloader\n","repo_name":"AI4LIFE-GROUP/OpenXAI","sub_path":"openxai/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":9753,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"94"} +{"seq_id":"15276525976","text":"from colorama import Fore\nfrom pathlib import Path\nimport os\nfrom .cmd import Command\n\n\nclass Git(Command):\n def config(self, name, email=None):\n \"\"\"\n Sets up global config for user and email. If no email is\n given, then it defaults to {name}@users.noreply.github.com\n\n git config --global user.name {name}\n git config --global user.email {email}\n \"\"\"\n if email is None:\n email = name + \"@users.noreply.github.com\"\n out = self.run(f\"git config --global user.name {name}\")\n out = self.run(f\"git config --global user.email {email}\")\n\n def pull(self):\n out = self.run(\"git pull\")\n return out\n\n # def outdated(self):\n # out = self.run(\"\")\n\n\ndef pull(folder=None):\n # given a folder, it will change into each subfolder\n # and do a \"git pull\"\n if folder is None:\n folder = \".\"\n\n cmd = Git()\n dirname = Path(folder).expanduser().absolute()\n for d in [f for f in dirname.iterdir() if f.is_dir()]:\n os.chdir(str(d))\n git_repo = Path(\".git\").exists()\n if git_repo:\n print(f\"{Fore.CYAN}=======================\")\n print(f\"{d.stem}\")\n out = cmd.run(\"git config --get remote.origin.url\")\n out = out.replace(\"\\n\",\"\").split(\":\")[1]\n print(f\"{out}\")\n print(f\"======================={Fore.RESET}\")\n\n out = cmd.pull()\n # out = out.replace(\"\\n\", \"\\n \")\n if out.find(\"Already up to date.\") > -1:\n print(f\"{Fore.GREEN}{out}{Fore.RESET}\")\n else:\n print(f\"{out}\")\n\n out = cmd.run(\"git status\")\n if out.find(\"Untracked\") > -1:\n print(f\"{Fore.YELLOW}{out}{Fore.RESET}\")\n os.chdir(\"..\")\n\n# pull(\"~/github\")\n\ndef config():\n pass\n\ndef update():\n pass","repo_name":"walchko/project-phoenix","sub_path":"project_phoenix/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1208121927","text":"from sys import stdin,stdout\ninput = lambda : stdin.readline().rstrip()\nprint =lambda x : stdout.write(str(x)+\"\\n\")\n\nfor _ in range(int(input())):\n n, q = map(int, input().split())\n a = [int(i) for i in input().split()]\n d1 = [0 for _ in range(n+1)]\n d2 = [0 for _ in range(n+1)]\n d1[0] = float(\"-inf\")\n d2[0] = 0\n for i in range(n):\n d1[i+1] = max(d1[i], d2[i]+a[i])\n d2[i+1] = max(d2[i], d1[i] - a[i])\n\n print(max(d1[-1], d2[-1]))","repo_name":"ironnicko/competitive","sub_path":"PokemonArmy[Easy Version].py","file_name":"PokemonArmy[Easy Version].py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30471776461","text":"class Solution:\n def equalSubstring(self, s: str, t: str, maxCost: int) -> int:\n cost = []\n flag = 0\n for i in range(len(s)):\n tmp = abs(ord(s[i]) - ord(t[i]))\n cost.append(abs(ord(s[i]) - ord(t[i])))\n if tmp <= maxCost:\n flag = 1\n if flag == 0:\n return 0\n if len(s) == 1:\n return 1\n left = 0\n right = 0\n cur_cost = cost[0]\n while True:\n right += 1\n cur_cost += cost[right]\n if cur_cost > maxCost:\n cur_cost -= cost[left]\n left += 1\n if right == len(s) - 1:\n break\n return right - left + 1","repo_name":"Evan-Zhangyf/LeetCode-Record","sub_path":"题目记录/1208. 尽可能使字符串相等.py","file_name":"1208. 尽可能使字符串相等.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70842670389","text":"#!/usr/bin/env python3\n\n\"\"\" Perform Insitu visualization of important metrics\n\nThis script defines functions to create Pandas dataframes out\nof current optimization (or parameter variation) state.\n\nThe current principle is to chug all info through CSV files.\nSeperate visualization toolkits (rg. Dash) can pick up these files.\n\nIO operations are not intensive, this should be enough\n\"\"\"\n\nimport hydra, logging\nimport subprocess as sb\nfrom omegaconf import DictConfig, OmegaConf\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\n\nfrom dash import Dash, html, dcc, Input, Output\nimport dash_bootstrap_components as dbc\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\n\nfrom core import process_input_command\nfrom ax.service.scheduler import Scheduler\nfrom ax.storage.json_store.save import save_experiment\n\nlog = logging.getLogger(__name__)\n\napp = Dash(__name__, external_stylesheets=[dbc.themes.MATERIA])\n\ndef data_from_experiment(scheduler: Scheduler):\n # Trial Parameters with corresponding objective values\n cfg = scheduler.experiment.runner.cfg\n params_df = pd.DataFrame()\n exp_df = scheduler.experiment.fetch_data().df\n if \"trial_index\" in exp_df.columns:\n exp_df = exp_df.set_index([\"trial_index\", \"metric_name\"]).unstack(level=1)[\"mean\"]\n trials = scheduler.experiment.get_trials_by_indices(range(exp_df.shape[0]))\n for tr in trials:\n params_df = pd.concat([params_df,\n pd.DataFrame({\n **tr.arm.parameters,\n **tr._properties,\n \"GenerationModel\": scheduler.generation_strategy.model._model_key},\n index=[tr.index])])\n df = pd.merge(exp_df, params_df, left_index=True, right_index=True)\n df.index.name=\"trial_index\"\n df.to_csv(f\"{cfg.problem.name}_report.csv\")\n save_experiment(scheduler.experiment, f\"{cfg.problem.name}_experiment.json\")\n\n@hydra.main(version_base=None, config_path=\".\", config_name=\"config.yaml\")\ndef dash_main(cfg : DictConfig):\n app.title = cfg.problem.name\n @app.callback(Output('live-update-graph', 'figure'),\n Input('interval-component', 'n_intervals'))\n def update_graph(fig):\n data = pd.DataFrame()\n try:\n data = pd.read_csv(f\"{cfg.problem.name}_report.csv\")\n except:\n log.warn(\"Could not visualize current state\")\n return fig\n nrows = len(cfg.problem.objectives.keys())\n fig = make_subplots(rows=nrows, cols=1)\n i=1\n for key, _ in cfg.problem.objectives.items():\n print(np.abs(stats.zscore(data[key])))\n df = data[(np.abs(stats.zscore(data[key])) < 1)]\n ifig = px.scatter(df, x=df.index, y=key, hover_name=key, hover_data=df.columns)\n fig.add_trace(\n ifig['data'][0],\n row=i, col=1\n )\n fig['layout']['xaxis{}'.format(i)]['title']='trial_index'\n fig['layout']['yaxis{}'.format(i)]['title']=key\n i += 1\n return fig\n\n @app.callback(Output('images', 'children'),\n Input('interval-component', 'n_intervals'))\n def update_images(children):\n data = pd.DataFrame()\n try:\n data = pd.read_csv(f\"{cfg.problem.name}_report.csv\")\n except:\n log.warn(\"Could not visualize current state\")\n return []\n df = data.tail(cfg.visualize.n_figures)\n figure_uris = []\n for _, row in df.iterrows():\n case = OmegaConf.create({\"name\": cfg.meta.clone_destination+row[\"casename\"]})\n image_uri = sb.check_output(list(process_input_command(cfg.visualize.figure_generator,\n case)), cwd=case.name, stderr=sb.PIPE)\n figure_uris.append({ **row.to_dict(),\n \"image\": image_uri.decode(\"utf-8\").strip(' ').replace('\\\"', '').replace('\\\\n', '')})\n return [\n html.Div(style={'width': f'{100/cfg.visualize.n_figures}%', 'float': 'left'},\n children=[\n html.Img(src=uri[\"image\"], width='100%', style={'margin':'10px'}),\n html.Div(children=[\n html.P(children=elm)\n for elm in OmegaConf.to_yaml(OmegaConf.create(uri)).splitlines()\n ])\n ])\n for uri in figure_uris ]\n\n updates = dcc.Interval(\n id='interval-component',\n interval=float(cfg.visualize.update_interval)*1000, # in milliseconds\n n_intervals=0,\n )\n\n app.layout = html.Div(children=[\n updates,\n html.H1(children=f'Optimization for {cfg.problem.name}',\n style={'text-align':'center', 'padding':'20px'}),\n html.H2(children=f'Optimization Metrics',\n style={'padding':'5px'}),\n dcc.Graph(id='live-update-graph'),\n html.H2(children=f'Insight into latest trials',\n style={'padding':'5px'}),\n html.Div(id='images', style={'padding':'10px'}),\n html.H2(children=f'Your configuration',\n style={'padding':'5px'}),\n html.Div(children=[\n html.Code(children=OmegaConf.to_yaml(cfg), style={'white-space': 'pre-wrap'})\n ],\n style = {'padding': '20px', 'margin': '10px'}\n )\n ])\n app.run_server(debug=False, port=int(cfg.visualize.port), host=cfg.visualize.host)\n\nif __name__ == '__main__':\n dash_main()\n","repo_name":"OFDataCommittee/OFMLHackathon","sub_path":"2023-07/bayesian-optimization/foamBO/foamDash.py","file_name":"foamDash.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"94"} +{"seq_id":"32183899504","text":"from __future__ import division\nimport sklearn.metrics\nfrom functions.save_plot import *\nfrom functions.read_data import *\nimport numpy as np\nimport scipy\nimport pylab as pl\nfrom numpy import fft\n\n\n#setup für die files\nfile = \"/home/michael/Desktop/Versuch 004/full.csv\"\n#file = \"C:\\\\Users\\\\s1656255\\\\Desktop\\\\test.csv\"\nhead_names = [\"Zeit\", \"Abstand\", \"Spannung\", \"Zaehler\", \"Abstand.2\", \"Spannung.2\", \"Zaehler.2\", \"Abstand.3\",\n \"Spannung.3\", \"Zaehler.3\", \"Abstand.4\", \"Spannung.4\", \"Zaehler.4\",\"Abstand.5\", \"Spannung.5\", \"Zaehler.5\",\n \"Abstand.6\", \"Spannung.6\", \"Zaehler.6\", \"Strom I\", \"Temperatur\"]\n#zählen der anzahl der zeilen\nwith open(file) as f:\n row_count = sum(1 for line in f)\n\n#setup für die anzahl der koeffizienten, sowie die testdaten\nnrows = 400000\nskiprows = 2000000-nrows\nn_predict = 50000\n#df = read_data_full(column=[\"Abstand\"], nrows = nrows, skiprows=skiprows)\n#df = abs(df-4)\n\n\n\n#berechnung der fourierkoeffizienten\ndef fourierExtrapolation(x, n_predict):\n n = x.size\n # anzahl der fourierkoeffizienten\n n_harm = 3000000\n t = np.arange(0, n)\n # trenddetektion\n p = np.polyfit(t, x, 1)\n # trendentfernung\n x_notrend = x - p[0] * t\n # trendentfernung in x domäne\n x_freqdom = fft.fft(x_notrend)\n #frequenzen berechnen\n f = fft.fftfreq(n)\n indexes = range(n)\n # sort indexes by frequency, lower -> higher\n indexes = list(range(n))\n\n t = np.arange(0, n + n_predict)\n restored_sig = np.zeros(t.size)\n for i in indexes[:1 + n_harm * 2]:\n #berechnung der amplitude\n ampli = np.absolute(x_freqdom[i]) / n\n #berechnung der phase\n phase = np.angle(x_freqdom[i])\n restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)\n return restored_sig + p[0] * t\n\n#plot range aufsetzen\nstart_plot = nrows - 10000\nend_plot = nrows + 10000\nslice = 50\n#val_data = read_data_full_4(column=[\"Abstand\"], skiprows = nrows, nrows = n_predict)\n#val_data = abs(val_data-4)\ndef main():\n vals = [\"Abstand\", \"Abstand.2\", \"Abstand.3\", \"Abstand.4\", \"Abstand.5\", \"Abstand.6\"]\n vals = [\"Abstand.6\"]\n for val in vals:\n #daten laden\n df = read_data_full_4(column=[val], nrows=nrows, skiprows=skiprows)\n df = abs(df - 4)\n #validierungsdaten laden\n val_data = read_data_full_4(column=[val], skiprows=nrows, nrows=n_predict)\n val_data = abs(val_data - 4)\n x = df[val]\n x = np.array(x)\n #fourierreihe anwenden\n extrapolation = fourierExtrapolation(x, n_predict)\n #nicht mehr ganz sicher was das macht, vermutlich den plot auf ein fenster anpassen\n\n #pl.plot(np.arange(0, extrapolation.size)[nrows::slice], extrapolation[nrows::slice], 'r', label='extrapolation')\n #pl.plot(np.arange(0, x.size)[start_plot::slice], x[start_plot::slice], 'b', label='x', linewidth=3)\n #pl.plot(np.arange(nrows, nrows+n_predict)[start_plot::slice], val_data[start_plot::slice], color=\"green\", alpha = 0.7)\n #pl.legend()\n #pl.show()\n #save_plot(\"Prediction_Fourier_series\")\n print(val)\n\n #fehlerabweichung ausgeben\n print(sklearn.metrics.mean_squared_error(val_data, extrapolation[nrows:]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mischva11/python_abschlussarbeit","sub_path":"spektral/fourierreihe.py","file_name":"fourierreihe.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17954851376","text":"# General imports \nimport numpy as np \nimport sys \nimport time \nimport os\nimport re\nimport math\nimport gc\nimport pickle\n\n# local imports\nfrom emotionDatasetAnalyzer import emotionSensorReader\n\n# Machine learning libs \nimport torchtext as TT\nimport torch as T\nfrom sklearn import svm\nfrom sklearn import linear_model\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error,explained_variance_score\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\nfrom punctuator import Punctuator\nimport nltk\nfrom nltk import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n\ndevice = T.device(\"cuda:0\" if T.cuda.is_available() else \"cpu\")\n\nclass TC():\n def __init__(self, args):\n self.emo_sensor_reader = emotionSensorReader()\n # self.trainSet, self.testSet = self.emo_sensor_reader.loadSplittedDataset()\n \n self.name_embedding = \"bert\" # to be selected from args\n self.device = T.device(\"cuda:0\" if T.cuda.is_available() else \"cpu\")\n self.embedding = None\n self.tokenizer = None\n self.scaler_emb = None\n \n self.scaler_tc = None\n self.model = None\n self.C = 1\n self.eps = 0.1 # eps-tube size for no penalty (squared l2 penalty)\n self.gamma = 'scale' #1e-8 #'scale' # \"auto\" or \"scale\"(not used for linear kernel)\n self.degree = 3 # just for polynomial kernel\n self.kernel_type = 'poly' # ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’\n \n # load embedding\n self._loadEmbedding()\n \n self.usePunctuator = True # from args\n self.useStemming = True\n \n \n if self.usePunctuator:\n self.punctuator = Punctuator('./models/punctuator/Demo-Europarl-EN.pcl')\n \n if self.useStemming:\n self.stemmer = PorterStemmer()\n try: \n self.stop_words = set(stopwords.words('english'))\n except:\n nltk.download('stopwords')\n self.stop_words = set(stopwords.words('english'))\n \n # this operation increase accuracy but make computation slower\n def _separeteSentences(self, text):\n text = self.punctuator.punctuate(text)\n sents_list = sent_tokenize(text)\n return sents_list\n \n # check whether there's a word that has been wrongly recognized (is out of the context)\n \n def _filterRecognizedText(self,text, analyze_word):\n if not(self.name_embedding == \"bert\"): return \n \n\n text = re.sub(r'[^\\w\\s]','',text) \n \n words_list = word_tokenize(text)\n \n words_list = [self.stemmer.stem(word) for word in words_list]\n \n \n x_emb = self._wordToEmbedding(words_list, from_speech= True)\n\n analyze_word = 6\n \n x_avg = np.delete(x_emb, analyze_word, axis = 0)\n # print(x_avg.shape)\n average_vec = np.mean(x_avg, axis = 0)\n average_vec = average_vec.reshape(1,-1)\n # print(x_emb.shape)\n # print(average_vec.shape)\n \n similarities = []\n for i,emb_word in enumerate(x_emb):\n similarity = cosine_similarity(emb_word.reshape(1,-1), average_vec)\n\n similarities.append(similarity[0][0])\n \n print(similarities)\n \n\n \n # abs_max = np.max(np.abs(x_emb))\n \n # print(abs_max)\n \n # x_emb /= abs_max\n \n \n return x_emb\n \n \n \n def _preProcessSentence(self, sentence):\n sentence = sentence.lower()\n\n #remove punctuations and other non alphabetic characters\n sentence = re.sub(r'[^\\w\\s]','',sentence) \n\n words_list = word_tokenize(sentence)\n\n # remove english stopwords\n words_list = [word for word in words_list if not word in self.stop_words]\n\n if self.useStemming:\n words_list = [self.stemmer.stem(word) for word in words_list]\n \n # todo: eliminate words not related to the sentences, i.e. error in the speech recognition phase\n \n return words_list\n \n \n def _wordToEmbedding(self, x, is_training = False, is_testing = False, from_speech = False):\n \n if self.name_embedding == \"glove\":\n if is_training or is_testing:\n \n # remove spaces from input\n x = [x_i.strip() for x_i in x] \n \n # get embedding\n x_emb = [np.array(self.embedding[x_i])for x_i in x]\n \n \n # to numpy array embeddings\n x_emb = np.array(x_emb)\n \n \n if is_training:\n # define scaler and scale input \n scaler = preprocessing.StandardScaler().fit(x_emb)\n self.scaler_emb = scaler\n \n x_emb_scaled = self.scaler_emb.transform(x_emb)\n \n print(x_emb_scaled.shape) \n \n else: # simple forward\n x = x.strip()\n \n x_emb = np.array(self.embedding[x])\n \n x_emb = np.expand_dims(x_emb,0)\n x_emb_scaled = self.scaler_emb.transform(x_emb)\n \n elif self.name_embedding == \"bert\":\n \n if is_training or is_testing:\n \n # remove spaces from input\n x = [x_i.strip() for x_i in x] \n \n # since here we work with words no sentence or sentences we can omit start & end tag: [CLS] [SEP]\n \n # tokenize word\n x_token = [self.tokenizer.tokenize(x_i) for x_i in x]\n \n \n # padding for different lenghts\n max_length_token = max([len(x_i) for x_i in x_token])\n for x_i in x_token:\n while(len(x_i) < max_length_token):\n x_i.append('[PAD]')\n \n # token to id\n x_token_idx = [self.tokenizer.convert_tokens_to_ids(x_i) for x_i in x_token]\n \n # token ids to Tensor\n x_token_idx = T.tensor(x_token_idx).to(self.device)\n \n with T.no_grad():\n embedding_layers, _ = self.embedding(x_token_idx)\n \n # take embedding from the final layer (|h|=12)\n x_emb = embedding_layers[11].to('cpu').numpy()\n \n # take first element of the embedding\n x_emb = x_emb[:,0,:]\n\n if is_training:\n # define scaler and scale input \n scaler = preprocessing.StandardScaler().fit(x_emb)\n self.scaler_emb = scaler\n \n x_emb_scaled = self.scaler_emb.transform(x_emb)\n \n elif from_speech:\n #check dimension(multi/single phrase case)\n # if(type(x[0]) == str): #single sentence\n \n # strip no more needed after the pre-processing\n x_token = [self.tokenizer.tokenize(x_i) for x_i in x]\n \n max_length_token = max([len(x_i) for x_i in x_token])\n for x_i in x_token:\n while(len(x_i) < max_length_token):\n x_i.append('[PAD]')\n \n print(x_token)\n \n x_token_idx = [self.tokenizer.convert_tokens_to_ids(x_i) for x_i in x_token]\n \n x_token_idx = T.tensor(x_token_idx).to(self.device)\n \n with T.no_grad():\n embedding_layers, _ = self.embedding(x_token_idx)\n \n x_emb = embedding_layers[11].to('cpu').numpy()\n \n x_emb = x_emb[:,0,:]\n \n if self.scaler_emb == None:\n self.loadScalerEmb()\n \n x_emb_scaled = self.scaler_emb.transform(x_emb)\n \n # else: #multi sentences \n # print(\"multi sentences case\")\n # print(x)\n \n # x_token = [[self.tokenizer.tokenize(x_i) for x_i in x_word] for x_word in x]\n # print(x_token)\n \n # max_length_token = max([max([len(x_i) for x_i in x_word]) for x_word in x_token])\n # for x_word in x_token:\n # for x_i in x_word:\n # while(len(x_i) < max_length_token):\n # x_i.append('[PAD]')\n \n # print(x_token)\n \n # x_token_idx = [[self.tokenizer.convert_tokens_to_ids(x_i) for x_i in x_word] for x_word in x_token]\n # print(x_token_idx)\n # x_token_idx = T.tensor(x_token_idx).to(self.device)\n \n \n else: \n # simple forward on word\n x = x.strip()\n \n x_token = self.tokenizer.tokenize(x)\n x_token_idx = self.tokenizer.convert_tokens_to_ids(x_token)\n x_token_idx = T.tensor([x_token_idx]).to(self.device)\n \n \n with T.no_grad():\n embedding_layers, _ = self.embedding(x_token_idx)\n \n x_emb = embedding_layers[11].to('cpu').numpy()\n x_emb = x_emb[:,0,:]\n if self.scaler_emb is None:\n self.loadScalerEmb()\n x_emb_scaled = self.scaler_emb.transform(x_emb)\n \n return x_emb_scaled\n \n \n \n def _loadEmbedding(self):\n # glove = TT.vocab.GloVe(name=\"6B\", dim=100)\n \n if self.name_embedding == \"glove\":\n self.embedding = TT.vocab.GloVe(name=\"840B\", dim=300)\n \n elif self.name_embedding == \"bert\":\n self.embedding = BertModel.from_pretrained('bert-base-uncased')\n self.embedding.eval()\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n self.embedding.to(self.device)\n \n \n def _getTestSet(self):\n _ , testSet = self.emo_sensor_reader.loadSplittedDataset()\n return testSet\n \n def _getTrainSet(self):\n trainSet, _ = self.emo_sensor_reader.loadSplittedDataset()\n return trainSet\n \n # ---------------------------- [load and save] ----------------------------\n \n def _saveModel(self, folder=\"models/TC_SVM/\"):\n if not os.path.exists(folder):\n os.makedirs(folder)\n \n if self.kernel_type == None:\n path_save = os.path.join(folder,\"msvr.sav\")\n else:\n path_save = os.path.join(folder,\"msvr_\"+ str(self.name_embedding)+ \".sav\")\n\n pickle.dump(self.model, open(path_save, 'wb'))\n \n def loadModel(self, folder=\"models/TC_SVM/\"):\n \n if self.kernel_type == None:\n path_load = os.path.join(folder,\"msvr.sav\")\n else:\n path_load = os.path.join(folder,\"msvr_\"+ str(self.name_embedding)+ \".sav\")\n \n self.model = pickle.load(open(path_load,'rb'))\n self.loadScalerEmb()\n self.loadScalerTC()\n \n def _saveScalerEmb(self, folder=\"models/TC_SVM/scaler\"):\n if not os.path.exists(folder):\n os.makedirs(folder)\n \n path_save = os.path.join(folder,\"scaler_xTrain_\"+str(self.name_embedding) +\".sav\")\n pickle.dump(self.scaler_emb, open(path_save, 'wb'))\n \n def loadScalerEmb(self, folder=\"models/TC_SVM/scaler\"):\n path_save = os.path.join(folder,\"scaler_xTrain_\"+str(self.name_embedding) +\".sav\")\n self.scaler_emb = pickle.load(open(path_save,'rb'))\n \n def _saveScalerTC(self, folder=\"models/TC_SVM/scaler\"):\n if not os.path.exists(folder):\n os.makedirs(folder)\n \n path_save = os.path.join(folder,\"scaler_yTrain.sav\")\n pickle.dump(self.scaler_emb, open(path_save, 'wb'))\n \n def loadScalerTC(self, folder=\"models/TC_SVM/scaler\"):\n path_save = os.path.join(folder,\"scaler_yTrain.sav\")\n self.scaler_emb = pickle.load(open(path_save,'rb'))\n \n # ---------------------------- [load and save] ----------------------------\n\n def _computeMetrics(self,y_pred, y_target):\n \n # estimate the errors\n \n evaluations = {}\n \n mses = []\n maes = []\n rmses = []\n \n print(\"- Computing evaluation metrics for the text classifier:\")\n \n mse_disgust = mean_squared_error(y_target[:,0], y_pred[:,0])\n mae_disgust = mean_absolute_error(y_target[:,0], y_pred[:,0])\n rmse_disgust = math.sqrt(mse_disgust)\n \n mses.append(mse_disgust);maes.append(mae_disgust),rmses.append(rmse_disgust)\n print(\"-- Disgust: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_disgust, rmse_disgust, mae_disgust))\n \n mse_surprise = mean_squared_error(y_target[:,1], y_pred[:,1])\n mae_surprise= mean_absolute_error(y_target[:,1], y_pred[:,1])\n rmse_surprise = math.sqrt(mse_surprise)\n mses.append(mse_surprise);maes.append(mae_surprise);rmses.append(rmse_surprise)\n print(\"-- Surprise: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_surprise, rmse_surprise, mae_surprise))\n \n mse_neutral = mean_squared_error(y_target[:,2], y_pred[:,2])\n mae_neutral= mean_absolute_error(y_target[:,2], y_pred[:,2])\n rmse_neutral = math.sqrt(mse_neutral)\n mses.append(mse_neutral);maes.append(mae_neutral);rmses.append(rmse_neutral)\n print(\"-- Neutral: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_neutral, rmse_neutral, mae_neutral))\n \n mse_anger = mean_squared_error(y_target[:,3], y_pred[:,3])\n mae_anger = mean_absolute_error(y_target[:,3], y_pred[:,3])\n rmse_anger = math.sqrt(mse_anger)\n mses.append(mse_anger);maes.append(mae_anger);rmses.append(rmse_anger)\n print(\"-- Anger: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_anger, rmse_anger, mae_anger))\n \n mse_sad = mean_squared_error(y_target[:,4], y_pred[:,4])\n mae_sad = mean_absolute_error(y_target[:,4], y_pred[:,4])\n rmse_sad = math.sqrt(mse_sad)\n mses.append(mse_sad);maes.append(mae_sad);rmses.append(rmse_sad)\n print(\"-- Sad: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_sad, rmse_sad, mae_sad))\n \n mse_happy = mean_squared_error(y_target[:,5], y_pred[:,5])\n mae_happy = mean_absolute_error(y_target[:,5], y_pred[:,5])\n rmse_happy = math.sqrt(mse_happy)\n mses.append(mse_happy);maes.append(mae_happy);rmses.append(rmse_happy)\n print(\"-- Happy: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_happy,rmse_happy, mae_happy))\n \n mse_fear = mean_squared_error(y_target[:,6], y_pred[:,6])\n mae_fear = mean_absolute_error(y_target[:,6], y_pred[:,6])\n rmse_fear = math.sqrt(mse_fear)\n mses.append(mse_fear);maes.append(mae_fear);rmses.append(rmse_fear)\n print(\"-- Fear: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_fear, rmse_fear, mae_fear))\n \n mse_global = mean_squared_error(y_target, y_pred)\n mae_global = mean_absolute_error(y_target, y_pred)\n rmse_global= math.sqrt(mse_global)\n mses.append(mse_global);maes.append(mae_global);rmses.append(rmse_global)\n print(\"-- Global error: MSE -> {:.8f} RMSE -> {:.8f} MAE -> {:.8f}\".format(mse_global, rmse_global, mae_global) )\n \n \n evaluations['mse'] = mses\n evaluations['mae'] = maes\n evaluations['rmse'] = rmses\n \n \n variance_score = explained_variance_score(y_target, y_pred, multioutput=\"variance_weighted\")\n print(\"-- Explained variance score -> {:.8f}\".format(variance_score))\n \n r2 = r2_score(y_target, y_pred, multioutput=\"variance_weighted\")\n print(\"-- R2 score-> {:.8f}\".format(r2))\n \n return evaluations\n \n \n \n def train_TC(self, save_model = True, use_full = False):\n print(\"- Training the text classifier model...\")\n \n self.model = MultiOutputRegressor(svm.SVR(kernel= \"rbf\",\\\n gamma = \"scale\", C = 5, epsilon= 1e-4, cache_size= 2000, max_iter= -1, tol = 1e-5))\n # - Training the text classifier model...\n # - Using the full dataset for training...\n # - Testing the text classifier model...\n # - Computing evaluation metrics for the text classifier:\n # -- Disgust: MSE -> 0.10972640 RMSE -> 0.33124975 MAE -> 0.04245848\n # -- Surprise: MSE -> 0.04602675 RMSE -> 0.21453846 MAE -> 0.03793087\n # -- Neutral: MSE -> 0.03004032 RMSE -> 0.17332145 MAE -> 0.03118618\n # -- Anger: MSE -> 0.04954111 RMSE -> 0.22257831 MAE -> 0.03512609\n # -- Sad: MSE -> 0.05785598 RMSE -> 0.24053270 MAE -> 0.04432290\n # -- Happy: MSE -> 0.04829081 RMSE -> 0.21975169 MAE -> 0.03961154\n # -- Fear: MSE -> 0.06316022 RMSE -> 0.25131698 MAE -> 0.04356229\n # -- Global error: MSE -> 0.05780594 RMSE -> 0.24042866 MAE -> 0.03917119\n # -- Explained variance score -> 0.94296737\n # -- R2 score-> 0.94167403\n # - Testing the text classifier model...\n # - Computing evaluation metrics for the text classifier:\n # -- Disgust: MSE -> 0.69622562 RMSE -> 0.83440135 MAE -> 0.52792654\n # -- Surprise: MSE -> 0.70038943 RMSE -> 0.83689272 MAE -> 0.55796328\n # -- Neutral: MSE -> 0.49501726 RMSE -> 0.70357463 MAE -> 0.52640148\n # -- Anger: MSE -> 0.91329972 RMSE -> 0.95566716 MAE -> 0.63889260\n # -- Sad: MSE -> 0.62127248 RMSE -> 0.78820840 MAE -> 0.56550408\n # -- Happy: MSE -> 0.56738550 RMSE -> 0.75324996 MAE -> 0.54563259\n # -- Fear: MSE -> 0.65362400 RMSE -> 0.80847016 MAE -> 0.60561190\n # -- Global error: MSE -> 0.66388772 RMSE -> 0.81479305 MAE -> 0.56684749\n # -- Explained variance score -> 0.36242966\n # -- R2 score-> 0.35639843\n # ----------------------------------------- end train test model tuning\n \n \n \n trainset = self._getTrainSet()\n \n if use_full:\n print(\"- Using the full dataset for training...\")\n tmp = self._getTestSet()\n trainset = np.concatenate((trainset,tmp))\n \n \n # separate x and y from trainSet\n x,y = trainset[:,0],trainset[:,1:]\n \n # targets to numpy array \n y = np.array(y)\n \n # scale targets\n self.scaler_tc = preprocessing.StandardScaler().fit(y)\n y = self.scaler_tc.transform(y)\n \n\n x_emb_scaled = self._wordToEmbedding(x, is_training= True)\n \n self.model.fit(x_emb_scaled,y)\n \n if save_model: \n self._saveModel()\n self._saveScalerEmb()\n self._saveScalerTC()\n \n gc.collect()\n \n \n def test_TC(self, setType = \"test\"):\n print(\"- Testing the text classifier model...\")\n \n if self.model == None:\n self.loadModel()\n \n if self.scaler_tc == None:\n self.loadScalerTC()\n \n if setType == \"train\": testset = self._getTrainSet()\n else: testset = self._getTestSet()\n \n # separate x and y from testSet\n x,y = testset[:,0],testset[:,1:]\n \n # targets to numpy array \n y = np.array(y)\n \n # scale targets\n y = self.scaler_tc.transform(y)\n \n # process x\n x_emb_scaled = self._wordToEmbedding(x,is_testing=True)\n \n # make predictions\n y_pred = self.model.predict(x_emb_scaled)\n \n # print(y_pred.shape)\n # print(y_pred)\n # print(y.shape)\n # print(y)\n \n # measure the error\n self._computeMetrics(y_pred,y)\n gc.collect()\n \n def predict_Word2Sentiment(self, x):\n \n if self.model == None:\n self.loadModel()\n \n # process x\n x_emb = self._wordToEmbedding(x)\n \n # make predictions\n y = self.model.predict(x_emb)\n print(y.shape)\n \n print(y)\n return y, x_emb\n \n def predict_wordsList2Sentiment(self, words):\n print(words)\n words = self._preProcessSentence(words)\n print(len(words))\n print(words)\n x_emb = self._wordToEmbedding(words, from_speech= True)\n print(x_emb.shape)\n y = self.model.predict(x_emb)\n print(y.shape)\n \n y = np.mean(y, axis=0)\n \n return y\n \n \n def predict_Corpus2Sentiment(self,c):\n \n if not(self.usePunctuator):\n # corpus as a full sentence\n return self.predict_wordsList2Sentiment(c)\n \n sents = self._separeteSentences(text)\n \n if len(sents)==1:\n # no sentences separation needed detected\n return self.predict_wordsList2Sentiment(c)\n \n y_sents = [self.predict_wordsList2Sentiment(words) for words in sents]\n print(y_sents)\n y = np.mean(y_sents, axis=0)\n print(y)\n \n return y\n\n def get_rankedEmotions(self, y_score):\n emotions = self.emo_sensor_reader.getLabels()\n ranked_emo = {}\n for idx,y_val in enumerate(y_score):\n ranked_emo[emotions[idx]] = y_score[idx]\n \n print(ranked_emo)\n ranked_emo = sorted(ranked_emo.items(), key = lambda kv:(kv[1], kv[0]), reverse = True )\n print(ranked_emo)\n return ranked_emo\n \n # generic forward method for word/s and sentence/s\n def forward(self, x):\n \n startTime = time.time()\n \n y = self.predict_Corpus2Sentiment(x)\n y_ranked = self.get_rankedEmotions(y)\n \n print(\"End prediction of emotions, time: {} [s]\".format((time.time() -startTime)))\n return y,y_ranked\n \n \n\n# ---------------------------> [test section]\n \nnew = TC(0)\n\n# test sentence from asr emotions ranking\nif False:\n new.loadModel()\n new.usePunctuator = True \n text = \"today i was having fun playing with my cousin when a stranger came up into the house he was tall and thin he asked about his parents but they weren't at home he said to let them know about the visit \"\n new.get_rankedEmotions(new.predict_Corpus2Sentiment(text))\n\n\n# test identification of wrong word recognized \nif False:\n \n # sentence_ok = \"how are you? I am fine thanks\"\n sentence_notok = \"how are you? I pen thanks\"\n \n new.usePunctuator = False\n # sentence_notok = \"today i've played videogames all the day, so relaxing\"\n analyze_word = 4\n \n emb = new._filterRecognizedText(sentence_notok,analyze_word)\n \n # analyze_word = 6 \n \n # print(words_list[analyze_word])\n \n # x_emb = self._wordToEmbedding(words_list, from_speech= True)\n \n # for i in range(len(words_list)):\n # -------------------------------------------------------------------------------------------- \n # x_avg = np.delete(x_emb, i, axis = 0)\n # # print(x_avg.shape)\n # average_vec = np.mean(x_avg, axis = 0)\n # average_vec = average_vec.reshape(1,-1)\n # # print(x_emb.shape)\n # # print(average_vec.shape)\n \n # similarities = []\n # for i,emb_word in enumerate(x_emb):\n # similarity = cosine_similarity(emb_word.reshape(1,-1), average_vec)\n\n # similarities.append(similarity[0][0])\n \n # print(similarities)\n \n # -------------------------------------------------------------------------------------------\n # model = BertForMaskedLM.from_pretrained('bert-base-uncased')\n # model.eval()\n \n # model.to(self.device)\n \n # text = re.sub(r'[^\\w\\s]','',text) \n \n # words_list = word_tokenize(text)\n \n # words_list = [self.stemmer.stem(word) for word in words_list]\n \n # for word in words_list:\n # print(word)\n \n # # look at speech case in word embeeding \n \n # # for i in range(len(words_list)):\n # analyze_word = 6\n \n # words_list[analyze_word] = '[MASK]'\n \n # x_token = [self.tokenizer.tokenize(x_i) for x_i in words_list]\n \n # max_length_token = max([len(x_i) for x_i in x_token])\n # for x_i in x_token:\n # while(len(x_i) < max_length_token):\n # x_i.append('[PAD]')\n \n # print(x_token)\n \n # x_token_idx = [self.tokenizer.convert_tokens_to_ids(x_i) for x_i in x_token]\n \n # x_token_idx = T.tensor(x_token_idx).to(self.device)\n \n # with T.no_grad():\n # predictions = model(x_token_idx)\n \n # predicted_index = T.argmax(predictions[0, analyze_word]).item()\n # predicted_token = self.tokenizer.convert_ids_to_tokens([predicted_index])[0]\n \n # print(predicted_token)\n \n # ------------------------------------------------------------------------------------------- \n \n # print(emb.shape)\n\n # emb_ww = np.delete (emb,analyze_word, axis = 0)\n # print(emb_ww.shape)\n \n # average_vec = np.mean(emb_ww, axis = 0)\n\n \n\n # acc = 0\n # print(\"----------cos----------\")\n # for i,emb_word in enumerate(emb):\n # # print(emb_word.shape)\n # cs = T.cosine_similarity( T.tensor( np.expand_dims(emb[analyze_word],axis= 0)),T.tensor( np.expand_dims(emb_word,axis= 0) ) )\n # print(cs)\n # acc += float(cs[0])\n \n # print(acc)\n # print(\"--------------------\")\n \n # for emb_word in emb:\n # # print(emb_word.shape)\n # print(T.cosine_similarity( T.tensor( np.expand_dims(average_vec,axis= 0)),T.tensor( np.expand_dims(emb_word,axis= 0) ) ))\n \n\n # delta_vec = emb[analyze_word] - average_vec \n # print(delta_vec.shape)\n # delta = np.linalg.norm(delta_vec)\n # print(delta)\n \n \n\n# test single word prediction score\nif True:\n new.train_TC(save_model=True, use_full=True)\n\n\n new.test_TC(setType =\"train\")\n new.test_TC(setType = \"test\")\n \n # t1 = \"sun\"\n # t2 = \"injured\"\n \n # y1,x1 = new.predict_Word2Sentiment(t1)\n # y2,x2 = new.predict_Word2Sentiment(t2)\n \n # print(T.cosine_similarity(T.tensor(x1),T.tensor(x2)))\n # print(T.cosine_similarity(T.tensor(y1),T.tensor(y2)))","repo_name":"FabrCas/Theory_of_mind_study","sub_path":"Project: Theory of mind and emotional resonance study/textClassifier.py","file_name":"textClassifier.py","file_ext":"py","file_size_in_byte":27843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42288984037","text":"# Created by: René Vilar S.\n# Algorithmic Toolbox - Coursera 2021\n\n\ndef evaluate(a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n assert False\n\n\ndef get_min_max(i, j, op, min_values, max_values):\n lowest_min = 10000\n highest_max = -10000\n\n for k in range(i, j):\n a = evaluate(max_values[i][k], max_values[k + 1][j], op[k])\n b = evaluate(max_values[i][k], min_values[k + 1][j], op[k])\n c = evaluate(min_values[i][k], max_values[k + 1][j], op[k])\n d = evaluate(min_values[i][k], min_values[k + 1][j], op[k])\n\n lowest_min = min(lowest_min, a, b, c, d)\n highest_max = max(highest_max, a, b, c, d)\n\n return lowest_min, highest_max\n\n\ndef get_maximum_value(dataset):\n op = dataset[1:len(dataset):2]\n d = dataset[0:len(dataset) + 1:2]\n n = len(d)\n\n min_values = [[0 for i in range(n)] for j in range(n)]\n max_values = [[0 for i in range(n)] for j in range(n)]\n\n for i in range(n):\n min_values[i][i] = int(d[i])\n max_values[i][i] = int(d[i])\n\n for s in range(1, n):\n for i in range(n - s):\n j = i + s\n min_values[i][j], max_values[i][j] = get_min_max(i, j, op, min_values, max_values)\n return max_values[0][n - 1]\n\n\nif __name__ == \"__main__\":\n print(get_maximum_value(input()))\n","repo_name":"Zzpecter/Coursera_AlgorithmicToolbox","sub_path":"week6/3_placing_parentheses.py","file_name":"3_placing_parentheses.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10299689649","text":"#coding = utf - 8\n\n\nimport fnmatch\n'''\n> fnmatch.fnmatchcase(filename,pattern):该函数与上一个函数功能大致相同,只是该函数区分大小写\n> fnmatch.filter(names,pattern):该函数对names列表进行过滤,返回names类标中匹配pattern的文件名\n 组成的子集合。\n'''\n\nnames = ['a.py','b.py','c.py','d.py']\n#对names列表进行过滤\nsub = fnmatch.filter(names,'[ac].py') #1代码\nprint(sub)\n\n'''\n上面���序定义了一个['a.py','b.py','c.py','d.py']集合,该集合中的4个元素都代表了\n指定文件(实际文件是否存在,fnmatch并不关心)。接下来程序在#1代码处调filter()函数\n对names进行过滤,过滤完成后只保留匹配[ac].py模式的文件名————要求文件名要么是\na.py,要么是c.py。\n'''\n\n\n\n\n","repo_name":"Naruterador/Coding","sub_path":"Python/python_self_study/crazy_python/chapter12_file_IO/fnmatch/os.path2_test.py","file_name":"os.path2_test.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"36328581570","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import ClienteCadForm, ServicesForm\nfrom .models import ClienteModel, Servicos\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef cliente_cad(request):\n page = 'clientes/cliente_cad.html'\n form = ClienteCadForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('cliente_list')\n\n return render(request, page, {'form': form})\n\n\ndef cliente_edit(request, cpf):\n page = 'clientes/cliente_edit.html'\n cliente = ClienteModel.objects.get(cpf=cpf)\n form = ClienteCadForm(request.POST or None, instance=cliente)\n if request.POST:\n if form.is_valid() and 'salvar' in request.POST:\n form.save()\n return redirect('cliente_list')\n if 'excluir' in request.POST:\n cliente.delete()\n return redirect('cliente_list')\n\n return render(request, page, {'form': form, 'cliente': cliente})\n\n\ndef cliente_list(request):\n page = 'clientes/cliente_list.html'\n clientes = ClienteModel.objects.all()\n clientes = clientes.order_by('nome')\n\n return render(request, page, {'clientes': clientes})\n\n\ndef cliente_detail(request, cpf):\n page = 'clientes/cliente_detail.html'\n cliente = ClienteModel.objects.filter(cpf=cpf)\n\n return render(request, page, {'cliente': cliente})\n\n\ndef services_cad(request):\n page = 'services/services_cad.html'\n form = ServicesForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('services_list')\n\n return render(request, page, {'form': form})\n\n\ndef services_edit(request, id):\n page = 'services/services_edit.html'\n services = Servicos.objects.get(id=id)\n form = ServicesForm(request.POST or None, instance=services)\n if request.POST:\n if form.is_valid() and 'salvar' in request.POST:\n form.save()\n return redirect('services_list')\n if 'excluir' in request.POST:\n services.delete()\n return redirect('services_list')\n\n return render(request, page, {'form': form, 'services': services})\n\n\ndef services_list(request):\n page = 'services/services_list.html'\n services = Servicos.objects.all()\n services = services.order_by('nome')\n\n return render(request, page, {'services': services})\n\n\ndef services_detail(request, id):\n page = 'services/services_detail.html'\n services = Servicos.objects.filter(id=id)\n\n return render(request, page, {'services': services})","repo_name":"caiquegoncalves21/OPE","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7348482710","text":"import filesReader\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n\ndef transform(dataset, n_features=5000):\n # 将原始文本转化为tf-idf特征矩阵\n vectorizer = TfidfVectorizer(analyzer=filesReader.split_into_words, max_features=n_features)\n X = vectorizer.fit_transform(dataset)\n return X, vectorizer\n\n\ndef train(X, vectorizer, true_k=14, showLable=False):\n # 模型训练\n km = KMeans(n_clusters=true_k, max_iter=300, n_init=1)\n km.fit(X)\n if showLable:\n print('Top terms per cluster:')\n order_centroids = km.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n for i in range(true_k):\n print(\"Cluster:\", i, end='')\n for ind in order_centroids[i, :10]:\n print(terms[ind] + ' ', end='')\n print()\n result = list(km.predict(X))\n print('Cluster distribution:')\n print(dict([(i, result.count(i)) for i in result]))\n return km.inertia_\n\n\ndef test():\n # 测试选择最优参数\n dataset = filesReader.contents('./answer')\n # print(\"documents:\", len(dataset))\n X, vectorizer = transform(dataset, n_features=5000)\n true_ks = []\n scores = []\n for i in range(10, 21):\n score = train(X, vectorizer, true_k=i)\n print(i, score)\n true_ks.append(i)\n scores.append(score)\n plt.figure(figsize=(8, 4))\n plt.plot(true_ks, scores, label=\"distortion\", color=\"red\", linewidth=1)\n plt.xlabel(\"number of clusters\")\n plt.ylabel(\"dsitortion\")\n plt.legend()\n plt.show()\n\n\ndef out():\n # 在最优参数下输出聚类结果\n dataset = filesReader.contents('./answer')\n X, vectorizer = transform(dataset, n_features=5000)\n score = train(X, vectorizer, true_k=14, showLable=True) / len(dataset)\n print(score)\n\n\n# test()\nout()\n","repo_name":"Titiuu/Topic-Detection","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"28374665960","text":"#\n#### Author: Hamad Al Marri \n#### Date: Feb 11th, 2020\n#\n#\n#\tThis program is free software: you can redistribute it and/or modify\n#\tit under the terms of the GNU General Public License as published by\n#\tthe Free Software Foundation, either version 3 of the License, or\n#\t(at your option) any later version.\n#\n#\tThis program is distributed in the hope that it will be useful,\n#\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n#\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#\tGNU General Public License for more details.\n#\n#\tYou should have received a copy of the GNU General Public License\n#\talong with this program. If not, see .\n#\n#\n#\n#\n# This is the entry point to Gamma editor. The gamma.py will load\n# config parameters and load the builder (UI structure of the main window).\n# This Application instance is the main root of gamma. It holds\n# references to everything needed for other plugins such as config, \n# window, builder, and plugins_manager.\n# Also it loads the eager plugins in self.plugins_manager.load_plugins()\n# which call activate for each plugin and store plugins references in\n# plugins_manager.plugins\n\nimport sys\n\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_version('GtkSource', \"4\")\nfrom gi.repository import Gio, Gtk, Gdk, GtkSource, GObject\n\nimport config\nimport signal_handler\nfrom plugins.plugins_manager import PluginsManager\n\n\nclass Application(Gtk.Application):\n\n\tdef __init__(self, *args, **kwargs):\n\t\t\t\t\n\t\t# make the package name as \"io.gitlab.hamadmarri.gamma\"\n\t\t# FLAGS_NONE means no passing arguments from command line, this\n\t\t# might be changed later to support new window, new file, or open a file\n\t\tsuper().__init__(*args, application_id=f\"io.gitlab.hamadmarri.gamma\", \n\t\t\t\t\t\tflags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE, **kwargs)\n\t\t\n\t\t\n\t\t# this line is important to mak gtk object(newer version of pygtk) to\n\t\t# include gtk sourceview.\n\t\tGObject.type_register(GtkSource.View)\n\t\t\n\t\tself.name = \"GammaApplication\"\n\t\tself.window = None\n\t\tself.builder = None\n\t\tself.is_debugging = False\n\t\t\n\t\t# config contains important paths and settings for ui, styles, plugins\n\t\tself.config = config.config_paths_and_settings\n\n\t\t# builder is the object responsible of\n\t\t# translating .ui xml files (widgets design/layout. see glade) to\n\t\t# be in the gtk objects form \n\t\tself.load_builder()\n\t\t\n\t\t# plugins_manager for anything related to plugins (eager plugins)\n\t\tself.plugins_manager = PluginsManager(self)\n\t\t\t\t\n\t\t# signal_handler is for handling general signals such as\n\t\t# key press, and basic window resizing paned\n\t\t# SignalHandler also makes it easier for other plugins to\n\t\t# process key bindings. It loop through all plugins and \n\t\t# call key_bindings function passing (event, keyval_name, ctrl, alt, shift)\n\t\t# which is an easy design for plugins to set there key bindings\n\t\tself.signal_handler = signal_handler.SignalHandler(self)\n\t\t\n\n\n\tdef load_builder(self):\n\t\tself.builder = Gtk.Builder()\n\t\t\n\t\t# load .ui file, its path is in config file\n\t\tself.builder.add_from_file(self.config[\"ui-path\"])\n\n\n\n\tdef set_handlers(self):\n\t\t# this line connects signals in handlers object to \n\t\t# some functions. \"handlers\" is set by SignalHandler and\n\t\t# plugins that need to bind signals to functions\n\t\tself.builder.connect_signals(self.signal_handler.handlers)\n\t\t\n\n\tdef do_startup(self):\n\t\tGtk.Application.do_startup(self)\n\n\t\n\tdef do_command_line(self, command_line):\n\t\targs = command_line.get_arguments()\n\n\t\tself.signal_handler.emit(\"log\", self, \"do_command_line:\" + str(args))\n\t\t\n\t\tif len(args) == 1:\n\t\t\tself.do_activate()\n\t\telif args[1] == \"--new-window\":\n\t\t\tself.do_activate(new_window=True)\n\t\telif args[1] == \"--verbose\":\n\t\t\tself.is_debugging = True\n\t\t\t\n\t\t\t# make sure at least the main window is open\n\t\t\tself.do_activate()\n\t\t\t\n\t\t\tif len(args) > 2:\n\t\t\t\t# open files\n\t\t\t\tself.open_files(args[2:])\n\t\telse:\n\t\t\t# make sure at least the main window is open\n\t\t\tself.do_activate()\n\n\t\t\t# open files\n\t\t\tself.open_files(args[1:])\n\t\t\t\n\t\treturn 0\n\t\n\t\t\t\n\t\t\n\tdef do_activate(self, new_window=False):\n\t\tif not self.window:\n\t\t\tself.show_first_window()\n\t\telif new_window:\n\t\t\tself.show_new_window()\n\n\t\t# I think there is a bug\n\t\t# eventhough header_left_side is set to visible\n\t\t# but it doesn't show up unless window.show_all() which\n\t\t# it changed to show() to make sourcemap (mini source view)\n\t\t# able to be invisible\n\t\tself.builder.get_object(\"header_left_side\").show_all()\n\t\t\n\t\n\tdef show_first_window(self):\n\t\t# get id=window (ui element in .ui) from builder\n\t\tself.window = self.builder.get_object(\"window\")\n\t\t\n\t\t# bind this builder to this window\n\t\tself.window.builder = self.builder\n\t\t\n\t\t# loading plugins calls their activate functions.\n\t\t# in plugins_manager.py, you can comment out plugins in\n\t\t# plugin_list array\n\t\tself.plugins_manager.load_plugins()\n\t\tself.set_handlers()\n\n\t\t# must set the parent application of \n\t\t# window to this app(self)\n\t\tself.window.props.application = self\n\t\tself.window.set_icon_name(\"io.gitlab.hamadmarri.gamma\")\n\t\tself.window.show()\n\t\tself.window.connect(\"focus_in_event\", self.window_event)\n\t\t\n\n\tdef show_new_window(self):\n\t\tself.load_builder()\n\t\t\n\t\t# get id=window (ui element in .ui) from builder\n\t\tself.window = self.builder.get_object(\"window\")\n\t\t\n\t\t# bind this builder to this window\n\t\tself.window.builder = self.builder\n\n\t\tself.plugins_manager.activate_plugins()\n\t\tself.set_handlers()\n\t\t\t\t\n\t\t# must set the parent application of \n\t\t# window to this app(self)\n\t\tself.window.props.application = self\n\t\tself.window.set_icon_name(\"io.gitlab.hamadmarri.gamma\")\n\t\tself.window.show()\n\t\tself.window.connect(\"focus_in_event\", self.window_event)\n\t\t\n\n\t\n\tdef window_event(self, w, e=None):\n\t\tself.window = w\n\t\tself.builder = w.builder\n\t\tself.signal_handler.emit(\"windo-focus-in\", w)\n\n\n\n\tdef open_files(self, filenames):\t\n\t\tself.signal_handler.emit(\"log\", self, \"do_open:\" + str(filenames))\n\t\tself.plugins_manager.THE(\"files_manager\", \"open_files\", {\"filenames\": filenames})\n\n\nif __name__ == \"__main__\":\n\tapp = Application()\n\tapp.run(sys.argv)\n\t\n\n","repo_name":"hamadmarri/gamma-text-editor","sub_path":"gamma.py","file_name":"gamma.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"30894456075","text":"from posts.models import Post\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, redirect\nfrom .post_base import PostBaseView\nfrom django.views.generic import DetailView \nfrom django.contrib.auth.decorators import login_required\n\n@login_required()\ndef post_detail(request, pk):\n \n post = Post.objects.get(pk=pk)\n if(request.method ==\"GET\"):\n context = {\n \"post\": post,\n }\n\n return render(\n request, \n \"posts/post_detail.html\",\n context = context,\n )\n if(request.method==\"POST\"):\n comment = post.comment_set.create(\n content=request.POST.get(\"content\")\n )\n\n return redirect(\n reverse(\n \"post-detail\", \n kwargs={\n 'pk':post.id,\n },\n ), \n )\n\n\n\n","repo_name":"toy-crane/fastblog","sub_path":"fastblog/posts/views/post_detail.py","file_name":"post_detail.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6763283834","text":"#!/usr/bin/env python3\nimport argparse\nfrom pathlib import Path\nimport random\n\nimport cv2\nimport numpy as np\n\nfrom colormotion import dataset\nfrom colormotion.nn.layers import load_weights\nfrom colormotion.nn.model.filters_optical_flow import model, warp_features\nfrom colormotion.user_guided import ab_and_mask_matrix\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Evaluate a colorization model using optical flow for coherent colorization of videos.')\n parser.add_argument('--save', action='store_true', help='save results to a video file')\n parser.add_argument('--ui', action='store_true', help='show UI')\n parser.add_argument('encoder', type=Path, help='encoder weights')\n parser.add_argument('decoder', type=Path, help='decoder weights')\n parser.add_argument('video', help='video file or webcam id')\n return parser.parse_args()\n\n\ndef capture_generator(capture):\n while True:\n success, frame = capture.read()\n if not success:\n return\n yield frame\n\n\ndef directory_generator(path):\n frames = sorted(Path(path).iterdir())\n for filename in frames:\n image = cv2.imread(str(filename))\n if image is not None:\n yield image\n\n\ndef open_video(video):\n if video.isdigit():\n # Ints are threated as camera indexes\n video = int(video)\n elif Path(video).is_dir():\n return directory_generator(video)\n return capture_generator(cv2.VideoCapture(video))\n\n\ndef main(args): # pylint: disable=too-many-locals,too-many-statements\n m = model()\n load_weights(m, args.encoder, by_name=True)\n load_weights(m, args.decoder, by_name=True)\n\n video = args.video\n capture = open_video(video)\n\n writer = None\n if args.save:\n truth = random.choice(['L', 'R'])\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n if video.isdigit():\n stem = video\n else:\n stem = Path(video).stem\n filename = 'output_{}_{}.avi'.format(stem, truth)\n print('Saving to {}'.format(filename))\n writer = cv2.VideoWriter(filename, fourcc, 30.0, (512, 256))\n truth = cv2.VideoWriter('truth_{}'.format(filename), fourcc, 30.0, (256, 256))\n colormotion = cv2.VideoWriter('colormotion_{}'.format(filename), fourcc, 30.0, (256, 256))\n\n def on_trackbar(value):\n on_trackbar.value = value / 100000\n\n on_trackbar(16)\n\n if args.ui:\n cv2.namedWindow('ColorMotion')\n cv2.createTrackbar('Mask percentage * 0.1%', 'ColorMotion', 16, 100, on_trackbar)\n\n l_tm1 = None\n prev = None\n interpolated_features_tm1 = None\n prev_mask = None\n # while True:\n for _, frame in zip(range(300), capture):\n frame = cv2.resize(frame, (256, 256), interpolation=cv2.INTER_AREA)\n l, ab = dataset.bgr_to_lab((frame / 255).astype(np.float32))\n if l_tm1 is None:\n # Set warped_features = encoded_features on the first frame\n _, warped_features, _ = m.predict([\n np.array([ab_and_mask_matrix(ab, on_trackbar.value)]), np.array([l]), np.empty((1, 32, 32, 512))])\n else:\n warped_features = warp_features(l_tm1, l, interpolated_features_tm1)[np.newaxis]\n\n mask = np.array([ab_and_mask_matrix(ab, on_trackbar.value)])\n if prev_mask is not None:\n prev_mask[:, :, 2] *= .8 # pylint: disable=unsupported-assignment-operation\n # mask_valid = mask[:, :, :, 2:3]\n # condition = np.stack((mask_valid, ) * 3, axis=-1)\n # mask = np.where(condition, mask, prev_mask)\n mask += prev_mask\n x, _, interpolated_features = m.predict([\n np.array([ab_and_mask_matrix(ab, on_trackbar.value)]), np.array([l]), warped_features])\n prev_mask = mask\n\n ab = x[0]\n if prev is not None:\n ab = (ab + 2 * prev) / 3\n prev = ab\n bgr = np.round(255 * dataset.lab_to_bgr(l, ab)).astype('uint8')\n if writer:\n output = np.concatenate((bgr, frame), axis=1)\n writer.write(output)\n truth.write(frame)\n colormotion.write(bgr)\n if args.ui:\n cv2.imshow('Original stream', frame)\n cv2.imshow('ColorMotion', bgr)\n if (cv2.waitKey(1) & 255) == ord('q'):\n break\n\n interpolated_features_tm1 = interpolated_features[0]\n l_tm1 = l\n\n\nif __name__ == '__main__':\n main(parse_args())\n","repo_name":"ColorMotion/ColorMotion","sub_path":"scripts/evaluation/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31480755161","text":"from tkinter import *\nimport tkinter.messagebox\n\ntk= Tk()\ntk.title(\"Tic Tac Toe by JOYDIP\")\nframe = Frame(tk)\nframe2= Frame(tk)\nframe3= Frame(tk)\n\nclick = TRUE\n\ndef check(button):\n global click\n if button[\"text\"] == \" \" and click == TRUE:\n button[\"text\"] = \"X\"\n click= FALSE\n elif button[\"text\"] == \" \" and click == FALSE:\n button[\"text\"] = \"O\"\n click= TRUE\n elif (button1[\"text\"] == \"X\" and button2[\"text\"] == \"X\" and button3[\"text\"] == \"X\" or\n button4[\"text\"] == \"X\" and button4[\"text\"] == \"X\" and button4[\"text\"] == \"X\" or\n button7[\"text\"] == \"X\" and button8[\"text\"] == \"X\" and button9[\"text\"] == \"X\" or\n button1[\"text\"] == \"X\" and button4[\"text\"] == \"X\" and button7[\"text\"] == \"X\" or\n button2[\"text\"] == \"X\" and button5[\"text\"] == \"X\" and button8[\"text\"] == \"X\" or\n button3[\"text\"] == \"X\" and button6[\"text\"] == \"X\" and button9[\"text\"] == \"X\" or\n button3[\"text\"] == \"X\" and button5[\"text\"] == \"X\" and button7[\"text\"] == \"X\" or\n button1[\"text\"] == \"X\" and button5[\"text\"] == \"X\" and button9[\"text\"] == \"X\"):\n tkinter.messagebox.showerror(\"Winner X\", \"Congrats\")\n elif (button1[\"text\"] == \"O\" and button2[\"text\"] == \"O\" and button3[\"text\"] == \"O\" or\n button4[\"text\"] == \"O\" and button4[\"text\"] == \"O\" and button4[\"text\"] == \"O\" or\n button7[\"text\"] == \"O\" and button8[\"text\"] == \"O\" and button9[\"text\"] == \"O\" or\n button1[\"text\"] == \"O\" and button4[\"text\"] == \"O\" and button7[\"text\"] == \"O\" or\n button2[\"text\"] == \"O\" and button5[\"text\"] == \"O\" and button8[\"text\"] == \"O\" or\n button3[\"text\"] == \"O\" and button6[\"text\"] == \"O\" and button9[\"text\"] == \"O\" or\n button3[\"text\"] == \"O\" and button5[\"text\"] == \"O\" and button7[\"text\"] == \"O\" or\n button1[\"text\"] == \"O\" and button5[\"text\"] == \"O\" and button9[\"text\"] == \"O\"):\n tkinter.messagebox.showerror(\"Winner O\", \"Congrats\")\n\nbutton = StringVar()\n\nbutton1= Button(frame, text=\" \", height = 4 , width = 4,command=lambda:check(button1))\nbutton2= Button(frame, text=\" \", height = 4 , width = 4,command=lambda:check(button2))\nbutton3= Button(frame, text=\" \", height = 4 , width = 4,command=lambda:check(button3))\n\nbutton1.pack(side=LEFT)\nbutton2.pack(side=LEFT)\nbutton3.pack(side=LEFT)\n\nbutton4= Button(frame2, text=\" \", height = 4 , width = 4,command=lambda:check(button4))\nbutton5= Button(frame2, text=\" \", height = 4 , width = 4,command=lambda:check(button5))\nbutton6= Button(frame2, text=\" \", height = 4 , width = 4,command=lambda:check(button6))\nbutton4.pack(side=LEFT)\nbutton5.pack(side=LEFT)\nbutton6.pack(side=LEFT)\n\nbutton7= Button(frame3, text=\" \", height = 4 , width = 4,command=lambda:check(button7))\nbutton8= Button(frame3, text=\" \", height = 4 , width = 4,command=lambda:check(button8))\nbutton9= Button(frame3, text=\" \", height = 4 , width = 4,command=lambda:check(button9))\nbutton7.pack(side=LEFT)\nbutton8.pack(side=LEFT)\nbutton9.pack(side=LEFT)\n\n\nframe.pack()\nframe2.pack()\nframe3.pack()\n\ntk.mainloop()\n","repo_name":"joydipnath/Tic-Tac-Toe-Game-using-python","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14277322704","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n p1 = list1\n p2 = list2\n tail = None\n head = None\n \n while (p1 is not None or p2 is not None):\n data = None\n if (p1 is not None and p2 is not None):\n if (p1.val <= p2.val):\n data = p1.val\n p1 = p1.next\n else:\n data = p2.val\n p2 = p2.next\n elif (p1 is not None):\n data = p1.val\n p1 = p1.next\n else:\n data = p2.val\n p2 = p2.next\n \n if (tail is None):\n head = self.insertAtEnd(tail, data)\n tail = head\n else:\n tail = self.insertAtEnd(tail, data)\n \n return head\n \n \n \n def insertAtEnd(self, tail, data):\n ntbi = ListNode(data)\n if (tail is not None):\n tail.next = ntbi\n return ntbi\n ","repo_name":"ps06756/Leetcode-Practice-Problems","sub_path":"Merge Two Sorted Lists/merge two sorted list.py","file_name":"merge two sorted list.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"71211029428","text":"import logging\nfrom logging.handlers import RotatingFileHandler\n\nfrom .config import Configuration\n\nlogger = logging.Logger(\"iTB2RF\", logging.DEBUG)\n\n\ndef setup_logger(config: Configuration):\n console_handler = logging.StreamHandler()\n console_handler.setLevel(config.loggingConfiguration.console.logLevel)\n console_handler.setFormatter(logging.Formatter(config.loggingConfiguration.console.logFormat))\n logger.addHandler(console_handler)\n\n file_handler = RotatingFileHandler(\n filename=config.loggingConfiguration.file.fileName,\n mode=\"a\",\n maxBytes=1 * 1024 * 1024,\n backupCount=2,\n encoding=\"utf_8\",\n delay=False,\n )\n file_handler.setLevel(config.loggingConfiguration.file.logLevel)\n file_handler.setFormatter(logging.Formatter(config.loggingConfiguration.file.logFormat))\n logger.addHandler(file_handler)\n","repo_name":"imbus/testbench2robotframework","sub_path":"testbench2robotframework/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"42188224702","text":"#시간 복잡도가 O(ElogV)이다.\n#힙(우선순위 큐를 구현)의 자료구조를 이용하여 특정 노드까지의 최단 거리에 대한 정보를 힙에 담아서 처리하므로 더욱 빠르다.\nimport heapq\nimport sys\nimput=sys.stdin.readline\nINF=int(1e9)\nn,m=map(int,input().split())\nstart=int(input())\ngraph=[[]for i in range(n+1)]\nvisited=[False]*(n+1)\ndistance=[INF]*(n+1)\n\nfor _ in range(m):\n a,b,c=map(int,input().split()) #모든 간선 정보를 입력받는다. a번 노드에서 b노드로 가는 비용이 c라는 의미\n graph[a].append((b,c))\n\ndef dijkstra(start):\n q=[]\n heapq.heappush(q,(0,start)) #시작 노드로 가기 위한 최단 경로는 0으로 설정하여 큐에 삽입\n distance[start]=0\n while q: #큐가 비어있지 않다면\n dist, now=heapq.heappop(q) #가장 최단 거리가 짧은 노드에 대한 정보 꺼내기\n if distance[now]= NUM_NODES:\n\t\tprint(\"All nodes started\")\n\t\tsys.exit(0)\n\tif i == MAX_SECONDS:\n\t\tprint(\"Failed to start all nodes\")\n\t\tsys.exit(1)\n\n\t# Sleep for 1 second.\n\ttime.sleep(1)\n","repo_name":"lwr20/calico-k8s-cluster","sub_path":"vagrant/scripts/wait_for_cluster.py","file_name":"wait_for_cluster.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"20197049820","text":"'''\nAuthors: Allison Kim and Madeleine Emmons\nDate: April 7, 2019\n\nThis program allows a user to extract information from a csv file containing\nbook titles and authors, and use either one of the information to create\na list in forward or reverse alphabetical order. Command line arguments\nin the correct format would print out sorted list.\n'''\n\nimport sys, csv\nimport operator\n\n'''\nThis function's parameters are a cvs filename that contains book titles and authors,\nand a Boolean that checks whether the list should be alphabetical in ascending or\ndescending order. Its output is a printed list of book titles, either in alphabetical\nor reverse alphabetical order.\n'''\ndef sort_books(file, how):\n with open(file, newline = \"\") as csvfile:\n list = csv.reader(csvfile)\n #Sorts the items in the file by book title, column 1.\n sorted_books = sorted(list, key=operator.itemgetter(0))\n if how:\n for book in sorted_books: \n print(book[0], sep = \"\\n\")\n else:\n #Prints the list in reverse order.\n sorted_books.reverse()\n for book in sorted_books: \n print(book[0], sep = \"\\n\")\n\n'''\nThis function's parameters are a cvs filename that contains book titles and authors,\nand a Boolean that checks whether the list should be alphabetical in ascending or\ndescending order. Its output is a printed list of authors, either in alphabetical\nor reverse alphabetical order.\n'''\ndef sort_authors(filename, boolean):\n names = []\n with open(filename) as csvfile:\n books = csv.reader(csvfile, delimiter=\",\")\n #Loops through the file and extracts the authors' names \n #from each row and adds them to a list. \n for row in books:\n name = row[2].split(\" (\")\n name = name[0]\n names.append(name)\n #Sorts the books in alphabetical order by last name. \n names = sorted(names, key = lambda x: x.split(\" \")[-1])\n #Prints the list in reverse order. \n if boolean == False:\n names_rev = []\n for y in names:\n names_rev.insert(0, y)\n for j in names_rev:\n print(j)\n else:\n for x in names:\n print(x) \n\ndef main():\n #Catches command line argument error:\n #should include all 3 or 4 requirements.\n if len(sys.argv) < 3 or len(sys.argv) > 4:\n print('Usage: blah blah blah', file=sys.stderr)\n \n else:\n file_object = sys.argv[1]\n\n try:\n action = sys.argv[2]\n \n #Makes sure csv file being called is valid.\n try:\n f = open(file_object)\n f.close()\n except FileNotFoundError:\n print('Usage: blah blah blah', file=sys.stderr)\n return\n\n except:\n print('Usage: blah blah blah', file=sys.stderr)\n return\n\n try:\n action = sys.argv[2]\n except:\n print('Usage: blah blah blah', file=sys.stderr)\n return\n \n try:\n #Using command line arguments, checks if list to be printed should be in\n #alphabetical or reverse alphabetical order.\n direction = sys.argv[3]\n if direction == \"reverse\":\n direction = False\n elif direction == \"forward\":\n direction = True\n else:\n print('Usage: blah blah blah', file=sys.stderr)\n return\n \n except:\n #The direction argument is not mandatory. Consider absent argument as\n #\"forward\".\n direction = True\n\n if action == \"books\":\n sort_books(file_object, direction)\n \n elif action == \"authors\":\n sort_authors(file_object, direction)\n \n else:\n print('Usage: blah blah blah', file=sys.stderr)\n \nif __name__ == '__main__':\n main()\n\n\n","repo_name":"carleton-cs257-spring18/books-emmons_kim","sub_path":"books/books1.py","file_name":"books1.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71703065268","text":"import pytest\nfrom django.core.management import call_command\n\nfrom books.models.library import Library\nfrom utils.assert_queries import assert_django_queries_manager\n\n\n@pytest.fixture(scope=\"session\")\ndef django_db_setup(django_db_setup, django_db_createdb, django_db_blocker):\n with django_db_blocker.unblock():\n if not Library.objects.count():\n call_command(\n \"generate_data\",\n libraries=3,\n books=100,\n avg_readers=3,\n max_readers=10,\n persons=20,\n )\n\n\n@pytest.fixture\ndef assert_django_queries():\n \"\"\"\n A fixture to check for actual database queries being run.\n\n\n To specify the query you can should use this format: {model_path}:{operation}\n model_path or operation could be omitted.\n operation is one of the following: INSERT, UPDATE, DELETE, SELECT,\n SAVEPOINT, RELEASE SAVEPOINT, ROLLBACK TO SAVEPOINT\n\n Usage:\n\n # unordered\n ```\n with assert_django_queries({\n \"api.Account\": 2, # 2 queries on this model, any type\n \"data.Occurrence:DELETE\": 1, # 1 DELETE query on this model\n }):\n # do stuff\n ```\n - In this mode you can use ASSERT_DJANGO_QUERIES_ANY or ASSERT_DJANGO_QUERIES_MULTIPLE_ANY\n - In this mode you can deduplicate same query by specifing the number of repetition.\n ex: [\n \"ggdjango_tests.A:INSERT\",\n \"ggdjango_tests.A:INSERT\",\n \"ggdjango_tests.A:INSERT\",\n ] => [\"ggdjango_tests.A:INSERT:3\",]\n - In this mode you can specify an id to `savepoint` operations to precise the order.\n ex: [\n \"1:SAVEPOINT\", # creation of a first savepoint\n \"ggdjango_tests.A:SELECT\",\n \"2:SAVEPOINT\", # creation of a second savepoint\n \"ggdjango_tests.A:INSERT\",\n \"1:RELEASE SAVEPOINT\", # first we close the first savepoint\n \"2:RELEASE SAVEPOINT\", # then the second\n ]\n\n # ordered\n ```\n with assert_django_queries([\n \"api.Account:SELECT\",\n \"api.Account:INSERT\",\n \":SAVEPOINT\",\n \"data.Occurrence:DELETE\",\n \":RELEASE SAVEPOINT\"\n ]):\n # do more stuff\n ```\n - In this mode you can set the kwargs `extra` if you want to allow extra queries.\n\n Edge cases not handled:\n - union queries\n - queries that use a subquery in their FROM clause\n \"\"\"\n return assert_django_queries_manager\n","repo_name":"GitGuardian/blog-playground","sub_path":"books/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"70658103989","text":"#\n# @lc app=leetcode.cn id=55 lang=python3\n#\n# [55] 跳跃游戏\n#\n\n# @lc code=start\n\"\"\"\n给定一个非负整数数组 nums ,你最初位于数组的 第一个下标 。\n\n数组中的每个元素代表你在该位置可以跳跃的最大长度。\n\n判断你是否能够到达最后一个下标。\n\n示例 1:\n\n输入:nums = [2,3,1,1,4]\n输出:true\n解释:可以先跳 1 步,从下标 0 到达下标 1, 然后再从下标 1 跳 3 步到达最后一个下标。\n示例 2:\n\n输入:nums = [3,2,1,0,4]\n输出:false\n解释:无论怎样,总会到达下标为 3 的位置。但该下标的最大跳跃长度是 0 , 所以永远不可能到达最后一个下标。\n\"\"\"\nfrom typing import List\n\n\n# 贪心\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n n, right_max = len(nums), 0\n for i in range(n):\n if i <= right_max:\n right_max = max(i + nums[i], right_max)\n if right_max >= n - 1:\n return True\n return False\n\n\n# @lc code=end\n","repo_name":"xtboooo/MyLeetcode","sub_path":"medium/55.跳跃游戏.py","file_name":"55.跳跃游戏.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39483647678","text":"import unittest\nfrom reverse_linked_list import Solution, ListNode\n\nclass Test(unittest.TestCase):\n\n def test_make_list(self):\n l = self.make_list([1,2,3,4,5])\n self.verify_list(l, [1,2,3,4,5])\n\n def test_reverse_middle(self):\n s = Solution()\n nodes = self.make_list([1,2,3,4,5])\n result = s.reverseBetween(nodes, 2, 4)\n self.verify_list(result, [1,4,3,2,5])\n\n def test_reverse_left(self):\n s = Solution()\n nodes = self.make_list([1,2,3,4,5])\n result = s.reverseBetween(nodes, 1, 3)\n self.verify_list(result, [3,2,1,4,5])\n\n def test_reverse_right(self):\n s = Solution()\n nodes = self.make_list([1,2,3,4,5])\n result = s.reverseBetween(nodes, 3, 5)\n self.verify_list(result, [1,2,5,4,3])\n\n def test_reverse_single_right(self):\n s = Solution()\n nodes = self.make_list([1,2,3,4,5])\n result = s.reverseBetween(nodes, 5, 5)\n self.verify_list(result, [1,2,3,4,5])\n\n def test_reverse_single_left(self):\n s = Solution()\n nodes = self.make_list([1,2,3,4,5])\n result = s.reverseBetween(nodes, 1, 1)\n self.verify_list(result, [1,2,3,4,5])\n\n def make_list(self, nodes):\n head = ListNode(val=1)\n cur = head\n for i in nodes[1:]:\n cur.next = ListNode(val=i)\n cur = cur.next\n return head\n\n def verify_list(self, result, expected):\n r_list = []\n c = result\n while c is not None:\n r_list.append(c.val)\n c = c.next\n\n self.assertEqual(r_list, expected)\n","repo_name":"dyronald/leetcode_problems","sub_path":"tests/test_reverse_linked_list.py","file_name":"test_reverse_linked_list.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21748865087","text":"# coding:utf-8\n\nfrom time import time\n\n# number = 100\ndef timeit_wrapper(func, *args, **kwargs):\n def wrapped():\n for i in range(100):\n start = time()\n re = func(*args, **kwargs)\n stop = time()\n print('第{0}次,{1}秒'.format(i, str(stop-start)))\n return re\n return wrapped","repo_name":"tang-xy/GeoTiffOnCeph","sub_path":"MyTimeit.py","file_name":"MyTimeit.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"26415267268","text":"from src.platform.glassfish.authenticate import checkAuth\nfrom src.platform.glassfish.interfaces import GINTERFACES\nfrom auxiliary import Auxiliary\nfrom re import findall\nfrom log import LOG\nimport json\nimport utility\n\nclass Auxiliary:\n\n def __init__(self):\n self.name = 'List deployed applications'\n self.versions = ['Any']\n self.flag = 'gf-list'\n\n def check(self, fingerprint):\n \"\"\"\n \"\"\"\n\n if fingerprint.title == GINTERFACES.GAD:\n return True\n\n return False\n\n def run(self, fingerengine, fingerprint):\n \"\"\"\n \"\"\"\n\n utility.Msg(\"Obtaining deployed applications...\")\n base = 'https://{0}:{1}'.format(fingerengine.options.ip,\n fingerprint.port)\n uri = '/management/domain/applications/list-applications'\n headers = { \"Accept\" : \"application/json\" }\n \n cookie = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title)\n if not cookie:\n utility.Msg(\"Could not get auth on %s:%s\" % (fingerengine.options.ip,\n fingerprint.port),\n LOG.ERROR)\n return\n\n if fingerprint.version in ['3.0']:\n base = base.replace('https', 'http')\n uri = '/management/domain/applications/application'\n return self._parse_old(base + uri, cookie)\n\n\n response = utility.requests_get(base+uri, auth=cookie, headers=headers)\n if response.status_code is 200:\n\n data = json.loads(response.content)\n if not 'properties' in data.keys():\n utility.Msg(\"No applications found.\")\n return\n\n utility.Msg(\"Discovered %d deployed apps\" % len(data['properties']))\n for entry in data['properties'].keys():\n utility.Msg(' /%s' % entry)\n\n def _parse_old(self, url, cookie):\n \"\"\" Of course 3.0 doesn't expose list-applications ...\n \"\"\"\n\n headers = {\n \"Accept\" : \"application/json\",\n \"X-Requested-By\" : \"requests\"\n }\n\n response = utility.requests_get(url, auth=cookie, headers=headers)\n if response.status_code is 200:\n\n data = json.loads(response.content)\n if not u\"Child Resources\" in data.keys():\n utility.Msg(\"No apps found\")\n return\n\n for entry in data[u\"Child Resources\"]:\n splt = entry.rsplit('/',1 )[1]\n utility.Msg(\" /%s\" % splt)\n","repo_name":"hatRiot/clusterd","sub_path":"src/platform/glassfish/auxiliary/list_wars.py","file_name":"list_wars.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"94"} +{"seq_id":"26079140878","text":"import pytest\nfrom formaster.storages.form_storage_implementation import \\\n FormStorageImplimentation\nfrom formaster.models.form import Form\n\n\n@pytest.mark.django_db\ndef test_delete_form(create_user, create_two_form_titles):\n # Arrange\n form_id = 1\n form_storage = FormStorageImplimentation()\n\n # Act\n form_storage.delete_form(form_id=form_id)\n\n # Assert\n with pytest.raises(Form.DoesNotExist):\n Form.objects.get(id=form_id)\n","repo_name":"gundetisagar/formaster","sub_path":"formaster/tests/storages/test_delete_form.py","file_name":"test_delete_form.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74895467509","text":"#!/usr/bin/env python3\n\n__description__='''\n\nCreate .csv states file for PastML.\n\n'''\n\n\nif __name__ == '__main__':\n\n import sys\n import argparse\n from readFilesFunctions import *\n\n app=argparse.ArgumentParser(description=__description__)\n app.add_argument(\"-msa\",type=str,help=\"MSA in fasta format.\")\n app.add_argument(\"-tree\",type=str,help=\"Tree in .nwk format.\")\n app.add_argument(\"-out\",type=str,help=\"Output file.\")\n args=app.parse_args()\n\n # Get MSA\n msa=readAlign(args.msa,\"fasta\")\n msadic=align2dic(msa)\n \n # Get tree names\n tree_names=getTreenames(args.tree,\"newick\")\n\n # Write output\n out=open(args.out,\"w\")\n # Header\n out.write(\"ID\")\n for name in msadic:\n seq=msadic[name]\n break\n for col in range(len(seq)):\n out.write(\",col\"+str(col))\n out.write(\"\\n\")\n # Gap/residue as binary 0/1\n for name in tree_names:\n seq=msadic[name]\n out.write(name)\n for col in seq:\n if col==\"-\":\n state=0\n else:\n state=1\n out.write(\",\"+str(state))\n out.write(\"\\n\")\n out.close()\n","repo_name":"edgano/nf_homoplasy","sub_path":"bin/pastml/get_states.py","file_name":"get_states.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13068837516","text":"from datetime import datetime, timedelta, timezone\nimport pytest\nfrom uuid import uuid4\nfrom random import randrange\n\nfrom opencivicdata.legislative.models import (\n LegislativeSession,\n EventAgendaItem,\n EventRelatedEntity,\n)\nfrom opencivicdata.core.models import Jurisdiction, Division\nfrom opencivicdata.legislative.models import EventDocument, BillAction, EventLocation\nimport requests\nfrom councilmatic_core.models import Membership\nfrom lametro.models import (\n LAMetroPerson,\n LAMetroEvent,\n LAMetroBill,\n LAMetroOrganization,\n LAMetroSubject,\n EventBroadcast,\n)\n\n\ndef get_uid_chunk(uid=None):\n \"\"\"\n Create the UID chunk like the one we append to slugs to ensure\n they're unique.\n \"\"\"\n if not uid:\n uid = str(uuid4())\n\n return uid[:13].replace(\"-\", \"\")\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef bill(db, legislative_session):\n class BillFactory:\n def build(self, **kwargs):\n bill_info = {\n \"id\": \"ocd-bill/2436c8c9-564f-4cdd-a2ce-bcfe082de2c1\",\n \"title\": \"APPROVE the policy for a Measure M Early Project Delivery Strategy\",\n \"created_at\": \"2017-06-09 13:06:21.10075-05\",\n \"updated_at\": \"2017-06-09 13:06:21.10075-05\",\n \"identifier\": \"2017-0686\",\n \"slug\": \"2017-0686\",\n \"classification\": [\"Report\"],\n \"legislative_session\": legislative_session,\n \"extras\": {\"restrict_view\": False},\n }\n\n bill_info.update(kwargs)\n\n bill = LAMetroBill.objects.create(**bill_info)\n\n return bill\n\n return BillFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef bill_action(db, bill, metro_organization):\n class BillActionFactory:\n def build(self, **kwargs):\n bill_action_info = {\n \"organization\": metro_organization.build(),\n \"description\": \"test action\",\n \"date\": \"2019-11-09\",\n \"order\": 999,\n }\n\n bill_action_info.update(kwargs)\n\n if not bill_action_info.get(\"bill\"):\n bill_action_info[\"bill\"] = bill.build()\n\n bill_action = BillAction.objects.create(**bill_action_info)\n\n return bill_action\n\n return BillActionFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef division(db):\n division_info = {\n \"id\": \"ocd-division/country:us/state:ca/county:los_angeles\",\n \"name\": \"LA\",\n }\n\n division = Division.objects.create(**division_info)\n\n return division\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef jurisdiction(db, division):\n jurisdiction_info = {\n \"id\": \"ocd-jurisdiction/country:us/state:ca/county:los_angeles/transit_authority\",\n \"division_id\": \"ocd-division/country:us/state:ca/county:los_angeles\",\n }\n\n jurisdiction = Jurisdiction.objects.create(**jurisdiction_info)\n\n return jurisdiction\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef legislative_session(db, jurisdiction):\n session_info = {\n \"identifier\": \"2017\",\n \"jurisdiction_id\": \"ocd-jurisdiction/country:us/state:ca/county:los_angeles/transit_authority\",\n \"name\": \"2017 Legislative Session\",\n }\n\n session = LegislativeSession.objects.create(**session_info)\n\n return session\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef event(db, jurisdiction):\n class EventFactory:\n def build(self, has_broadcast=True, **kwargs):\n event_info = {\n \"id\": \"ocd-event/17fdaaa3-0aba-4df0-9893-2c2e8e94d18d\",\n \"created_at\": \"2017-05-27 11:10:46.574-05\",\n \"updated_at\": \"2017-05-27 11:10:46.574-05\",\n \"name\": \"System Safety, Security and Operations Committee\",\n \"start_date\": \"2017-05-18 12:15\",\n \"slug\": uuid4(),\n \"jurisdiction\": jurisdiction,\n }\n\n event_info.update(kwargs)\n\n event = LAMetroEvent.objects.create(**event_info)\n\n # Get event from queryset so it has the start_time annotation from the manager\n metro_event = LAMetroEvent.objects.get(id=event.id)\n\n if metro_event.start_time < datetime.now(timezone.utc) and has_broadcast:\n EventBroadcast.objects.create(event=metro_event)\n\n return metro_event\n\n return EventFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef event_agenda_item(db, event):\n class EventAgendaItemFactory:\n def build(self, **kwargs):\n event_agenda_item_info = {\n \"order\": 1,\n }\n\n if not kwargs.get(\"event\"):\n kwargs[\"event\"] = event.build()\n\n event_agenda_item_info.update(kwargs)\n\n event_agenda_item = EventAgendaItem.objects.create(**event_agenda_item_info)\n\n return event_agenda_item\n\n return EventAgendaItemFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef event_related_entity(db, event_agenda_item):\n class EventRelatedEntityFactory:\n def build(self, **kwargs):\n event_related_entity_info = {}\n\n if not kwargs.get(\"agenda_item\"):\n kwargs[\"agenda_item\"] = event_agenda_item.build()\n\n event_related_entity_info.update(kwargs)\n\n event_related_entity = EventRelatedEntity.objects.create(\n **event_related_entity_info\n )\n\n return event_related_entity\n\n return EventRelatedEntityFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef event_document(db):\n class EventDocumentFactory:\n def build(self, **kwargs):\n event_document_info = {\n \"event_id\": \"ocd-event/17fdaaa3-0aba-4df0-9893-2c2e8e94d18d\",\n }\n\n event_document_info.update(kwargs)\n\n event_document = EventDocument.objects.create(**event_document_info)\n\n event_document.links.create(\n url=\"https://metro.legistar.com/View.ashx?M=A&ID=545192&GUID=19F05A99-F3FB-4354-969F-67BE32A46081\"\n )\n\n return event_document\n\n return EventDocumentFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef metro_person(db):\n class LAMetroPersonFactory:\n def build(self, **kwargs):\n uid = str(uuid4())\n\n person_info = {\n \"id\": \"ocd-person/\" + uid,\n \"name\": \"Wonder Woman\",\n \"slug\": \"wonder-woman-\" + get_uid_chunk(uid),\n }\n\n person_info.update(kwargs)\n\n person = LAMetroPerson.objects.create(**person_info)\n\n return person\n\n return LAMetroPersonFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef metro_organization(db):\n class LAMetroOrganizationFactory:\n def build(self, **kwargs):\n uid = str(uuid4())\n\n organization_info = {\n \"id\": \"ocd-organization/\" + uid,\n \"name\": \"Planning and Programming Committee \" + uid,\n \"slug\": \"planning-and-programming-committee-\" + get_uid_chunk(uid),\n }\n\n organization_info.update(kwargs)\n\n organization = LAMetroOrganization.objects.create(**organization_info)\n\n return organization\n\n return LAMetroOrganizationFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef membership(db, metro_organization, metro_person):\n class MembershipFactory:\n def build(self, **kwargs):\n related_org = metro_organization.build()\n related_person = metro_person.build()\n\n membership_info = {\n \"id\": randrange(10000),\n \"organization\": related_org,\n \"person\": related_person,\n \"start_date\": datetime.now().date().isoformat(),\n \"end_date\": (datetime.now() + timedelta(days=7)).date().isoformat(),\n }\n\n membership_info.update(kwargs)\n\n membership = Membership.objects.create(**membership_info)\n\n return membership\n\n return MembershipFactory()\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef metro_subject(db):\n class LAMetroSubjectFactory:\n def build(self, **kwargs):\n if \"name\" in kwargs:\n current_subject = kwargs.get(\"name\")\n else:\n current_subject = \"Metro Gold Line\"\n\n if \"guid\" in kwargs:\n guid = kwargs.get(\"guid\")\n else:\n guid = \"0000-0-0000\"\n\n subject_info = {\n \"name\": current_subject,\n \"guid\": guid,\n }\n\n subject_info.update(kwargs)\n\n subject = LAMetroSubject.objects.create(**subject_info)\n\n return subject\n\n return LAMetroSubjectFactory()\n\n\n@pytest.fixture\ndef concurrent_current_meetings(event):\n \"\"\"\n Two meetings scheduled to begin in the next five minutes.\n \"\"\"\n board_meeting_info = {\n \"id\": \"ocd-event/ef33b22d-b166-458f-b254-b81f656ffc09\",\n \"name\": \"Regular Board Meeting\",\n \"start_date\": LAMetroEvent._time_from_now(minutes=3)\n .replace(second=0, microsecond=0)\n .isoformat(),\n }\n board_meeting = event.build(**board_meeting_info)\n\n construction_meeting_info = {\n \"id\": \"ocd-event/FEC6A621-F5C7-4A88-B2FB-5F6E14FE0E35\",\n \"name\": \"Construction Committee\",\n \"start_date\": LAMetroEvent._time_from_now(minutes=3)\n .replace(second=0, microsecond=0)\n .isoformat(),\n }\n construction_meeting = event.build(**construction_meeting_info)\n\n return board_meeting, construction_meeting\n\n\n@pytest.fixture\n@pytest.mark.django_db\ndef event_location(db, jurisdiction):\n class EventLocationFactory:\n def build(self, **kwargs):\n related_jurisdiction = jurisdiction\n\n event_location_info = {\n \"name\": \"TEST\",\n \"jurisdiction\": related_jurisdiction,\n }\n\n event_location_info.update(**kwargs)\n\n event_location = EventLocation.objects.create(**event_location_info)\n\n return event_location\n\n return EventLocationFactory()\n\n\n@pytest.fixture\ndef mocked_streaming_meeting(mocker):\n mock_response = mocker.MagicMock(spec=requests.Response)\n mock_response.status_code = 200\n\n mocker.patch(\"lametro.models.requests.get\", return_value=mock_response)\n\n return mock_response\n","repo_name":"Metro-Records/la-metro-councilmatic","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"94"} +{"seq_id":"72713148470","text":"#!/usr/bin/python3\n\"\"\"test script for the State class\"\"\"\nimport unittest\nfrom models.base_model import BaseModel\nfrom models.state import State\nfrom models import storage\n\n\nclass TestState(unittest.TestCase):\n \"Test class for the State class\"\n def setUp(self):\n \"Reset the objects dictionary before each test\"\n storage.__objects = {}\n\n def test_inheritance(self):\n \"testing the inheritance functionality\"\n state = State()\n self.assertIsInstance(state, BaseModel)\n self.assertIsInstance(state, State)\n\n def test_attributes(self):\n \"testing the attributes\"\n state = State()\n self.assertTrue(hasattr(state, 'name'))\n\n def test_attribute_default(self):\n \"testing the default attributes\"\n state = State()\n self.assertEqual(state.name, \"\")\n\n def test_attribute_assignment(self):\n \"tetsing the assignment of class attributes\"\n state = State()\n state.name = \"California\"\n self.assertEqual(state.name, \"California\")\n\n def test_save_method(self):\n \"testing the inherited save method\"\n state = State()\n prev_updated_at = state.updated_at\n state.save()\n self.assertNotEqual(prev_updated_at, state.updated_at)\n\n def test_to_dict_method(self):\n \"testing the inherited to_dict method\"\n state = State()\n state.name = \"Ondo\"\n state.save()\n state_dict = state.to_dict()\n\n self.assertIsInstance(state_dict, dict)\n self.assertIn('__class__', state_dict)\n self.assertEqual(state_dict['__class__'], 'State')\n self.assertIn('id', state_dict)\n self.assertIn('created_at', state_dict)\n self.assertIn('updated_at', state_dict)\n self.assertIn('name', state_dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"TeeMeeLehin/AirBnB_clone","sub_path":"tests/test_models/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29815612829","text":"import xlrd\nimport csv\nimport re\nfrom library.FileProcessBasic import FileProcessBasic\nimport util\nimport os\nfrom datetime import datetime\nfrom xlrd import xldate_as_datetime, xldate_as_tuple\n\n\nclass Record:\n\tdef __init__(self,excel):\n\n\t\tself.table = excel.sheet_by_name(\"隧道\")\n\n\t\tself.header = {\n\t\t\"处理卡编号\":None,\n\t\t\"工程名称\": None,\n\t\t\"里程桩号(变更位置)\": None,\n\t\t\"变更类型\": None,\n\t\t\"原衬砌类别\": None,\n\t\t\"变更后衬砌类别\": None,\n\t\t\"变更原因\": None,\n\t\t\"处理意见\": None,\n\t\t\"处理卡签发日期\": None,\n\t\t\"变更令签发日期\": None,\n\t\t}\n\t\tself.start_row = 0\n\n\t\tself.get_header()\n\t\tself.dataset = self.get_dataset()\n\n\t# get header position\n\tdef get_header(self):\n\t\tn_rows = self.table.nrows\n\t\tn_col = self.table.ncols\n\n\t\tfor row in range(n_rows):\n\t\t\tcells = self.table.row_values(row)\n\n\t\t\tif cells[0] != '序号':\n\t\t\t\tcontinue\n\n\t\t\tself.start_row = row + 2\n\t\t\tfor col in range(n_col):\n\t\t\t\ttext = cells[col].replace('\\n','')\n\t\t\t\tif text in self.header:\n\t\t\t\t\tself.header[text] = col\n\t\t\tbreak\n\n\n\t# get all row data\n\tdef get_dataset(self):\n\t\tn_row = self.table.nrows\n\n\t\t# save all row data\n\t\tdataset = []\n\t\tfor row in range(self.start_row, n_row):\n\t\t\trow_data = self.get_data(row)\n\t\t\tdataset.append(row_data)\n\n\t\treturn dataset\n\n\n\t# get row data\n\tdef get_data(self, row):\n\t\tdata = self.header.copy()\n\t\tcells = self.table.row_values(row)\n\n\t\t# get cells directly\n\t\tfor key in data:\n\t\t\tindex = data[key]\n\t\t\tif index != None:\n\t\t\t\tcontent = cells[index]\n\t\t\t\tif isinstance(content, str):\n\t\t\t\t\tdata[key] = content.replace('\\n','')\n\t\t\t\telse:\n\t\t\t\t\tdata[key] = content\n\n\t\t# get CHAG_TYPE1, CHAG_TYPE1, CHAG_TYPE1\n\t\ttext = data[\"处理意见\"]\n\t\ttext_split = text.split(\"变更\",1)\n\t\tif len(text_split) == 2:\n\t\t\tbefore = re.findall('[a-zA-Z0-9]+',text_split[0])\n\t\t\tafter = re.findall('[a-zA-Z0-9]+',text_split[1])\n\n\t\t\tif len(before) != 0 and len(before[-1]) >= 3 :\n\t\t\t\tdata['原衬砌类别'] = before[-1]\n\t\t\tif len(after) != 0 and len(after[0]) >=3:\n\t\t\t\tdata['变更后衬砌类别'] = after[0]\n\n\t\t\tpos = text_split[1].find('类型')\n\t\t\tif pos >=2:\n\t\t\t\tdata['变更类型'] = text_split[1][pos-2 : pos]\n\t\t\telse:\n\t\t\t\tpos = text_split[0].find('类型')\n\t\t\t\tif pos >= 2:\n\t\t\t\t\tdata['变更类型'] = text_split[0][pos-2 : pos]\n\n\t\t# uniform date format\n\t\tdata['变更令签发日期'] = self.format_date(data['变更令签发日期'])\n\n\t\treturn data\n\n\n\t# format date \n\tdef format_date(self, content):\n\t\t# str type\n\t\tif isinstance(content, str):\n\t\t\ttime = re.findall('[0-9]+',content)\n\t\t\tif len(time) == 3:\n\t\t\t\tcontent = str(time[0])+\".\"+str(time[1])+\".\"+str(time[2])\n\t\t# float type\n\t\telif isinstance(content, float):\n\t\t\ttime = datetime(*xldate_as_tuple(content,0))\n\t\t\tyear = str(int(time.strftime('%Y')))\n\t\t\tmonth = str(int(time.strftime('%m')))\n\t\t\tday = str(int(time.strftime('%d')))\n\t\t\tcontent = year + \".\" + month + \".\" +day\n\n\t\treturn content\n\n\nclass Processor(FileProcessBasic):\n\n def save(self, output, record):\n output_path = os.path.join(output, \"CHAG.csv\")\n header = record.header.keys()\n util.check_output_file(output_path, header)\n\n with open(output_path, \"a+\", encoding=\"utf_8_sig\", newline=\"\") as f:\n \tfor data in record.dataset:\n \t\tw = csv.DictWriter(f, data.keys())\n \t\tw.writerow(data)\n\n def run(self, input_path, output_path):\n \t'''\n \tfile_to_process = set()\n \tfor file in os.listdir(input_path):\n \t\tabsolute_file_path = os.path.join(input_path, file)\n\n \t\tif file.endswith(\".xlsx\") or file.endswith(\".xls\"):\n \t\t\tfile_to_process.add(absolute_file_path)\n\n \tfor file in file_to_process:\n \t\texcel = xlrd.open_workbook(file)\n \t\trecord = Record(excel)\n\n \t\tself.save(output_path, record)\n \t\tprint(\"提取完成\" + file)\n \t'''\n \tif os.path.isfile(input_path):\n \t\tif input_path.endswith(\".xlsx\") or input_path.endswith(\".xls\"):\n \t\t\texcel = xlrd.open_workbook(input_path)\n \t\t\trecord = Record(excel)\n\n \t\t\tself.save(output_path, record)\n \t\t\tprint(\"提取完成\" + input_path)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\tinput_path = 'D:/Death in TJU/Junior_2nd/iS3 Lab2/tasks/土木数据/数据提取文件示例/源数据/施工变更'\n\toutput_path = \"D:/Death in TJU/Junior_2nd/iS3 Lab2/tasks/task3\"\n\tprocessor = Processor()\n\tprocessor.run(input_path, output_path)\n\t","repo_name":"Tim4Dec/iS3-Lab2","sub_path":"library/CHAG/FileProcess_CHAG.py","file_name":"FileProcess_CHAG.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"36323131761","text":"# coding=utf-8\n\nfrom sandcrawler.scraper import ScraperBase, SimpleScraperBase, CloudFlareDDOSProtectionMixin\n\n\nclass SpacemovCom(SimpleScraperBase):\n BASE_URL = 'https://spacemov.is'\n OTHER_URLS = ['https://spacemov.org', 'https://spacemov.io', 'http://spacemov.io','https://spacemov.tv', 'https://spacemovhd.com', 'https://www.spacemov.tv', 'https://www.spacemov.net', 'http://www.spacemov.ag', ]\n SCRAPER_TYPES = [ScraperBase.SCRAPER_TYPE_OSP, ]\n LANGUAGE = 'eng'\n LONG_SEARCH_RESULT_KEYWORD = '2016'\n MEDIA_TYPES = [ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV]\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING]\n\n\n def _fetch_search_url(self, search_term, media_type):\n return self.BASE_URL + '/search-query/{}/'.format(self.util.quote(search_term))\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_link = soup.find('a', text=u'Next →')\n return next_link['href'] if next_link else None\n\n def get(self, url, **kwargs):\n return super(self.__class__, self).get(url, allowed_errors_codes=[403, ], **kwargs)\n\n def _parse_search_result_page(self, soup):\n\n results = soup.select('div[class=\"movies-list movies-list-full\"] div.ml-item a.ml-mask')\n if not results and len(results) == 0:\n return self.submit_search_no_results()\n\n for results in results:\n\n result = results['href']\n title = results.text\n if not title:\n title = result.strip('/').split('/')[-1].replace('-',' ').split('full movie')[0]\n self.submit_search_result(\n link_url=result,\n link_title=title\n )\n\n\n def parse(self, parse_url, **extra):\n soup = self.get_soup(parse_url + '/watching/')\n self._parse_parse_page(soup)\n\n def _parse_parse_page(self, soup):\n #self.log.debug(soup)\n title = soup.select_one('h1').text.strip()\n index_page_title = self.util.get_page_title(soup)\n\n results = soup.select('#servers-list li[data-drive]')\n for result in results:\n movie_link = result['data-drive']\n self.submit_parse_result(\n index_page_title=index_page_title,\n link_url=movie_link,\n link_text=title,\n )","repo_name":"realchief/Scraping_BeautifulSoup_phantomjs","sub_path":"scrapers/spacemov_com.py","file_name":"spacemov_com.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"71305570549","text":"\"\"\"Defines patterns URL for learning_logs\"\"\"\n\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n # Home page\n path('', views.index, name='index'),\n # Output all topics\n path('topics/', views.topics, name='topics'),\n # Page with detailed information of the topic\n path('topics//', views.topic, name='topic'),\n # Page for adding a new topic\n path('new_topic/', views.new_topic, name='new_topic'),\n # Page for adding new entry\n path('new_entry//', views.new_entry, name='new_entry'),\n # Page for edit entries\n path('edit_entry//', views.edit_entry, name='edit_entry'),\n]\n","repo_name":"Hawool/Learning_Log","sub_path":"learning_log/learning_logs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14418426658","text":"\"\"\"\nCP1404/CP5632 - Practical\nAnswer the following questions:\n1. When will a ValueError occur? - When the numerator or denominator are not valid numbers\n2. When will a ZeroDivisionError occur? - When the input of 0 is put in the denominator\n3. Could you change the code to avoid the possibility of a ZeroDivisionError? - Except ZeroDivisionError: print(0)\n\"\"\"\n\ntry:\n numerator = int(input(\"Enter the numerator: \"))\n denominator = int(input(\"Enter the denominator: \"))\n fraction = numerator / denominator\n print(fraction)\nexcept ValueError:\n print(\"Numerator and denominator must be valid numbers!\")\nexcept ZeroDivisionError:\n print(\"0\")\nprint(\"Finished.\")","repo_name":"lachlanarboit/cp1404_practicals","sub_path":"prac_02/exceptions_demo.py","file_name":"exceptions_demo.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16197509271","text":"import io\nimport sys\n\n# input here\n_INPUT = \"\"\"\\\n7\n1 2 3 4 5 6\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n\n\nN = int(input())\nA = [0] * 2 + list(map(int, input().split()))\n\nl = [0] * (N + 1)\nfor i in range(2, N + 1):\n l[A[i]] += 1\n\nfor i in range(1, N + 1):\n print(l[i])\n","repo_name":"n-nooobu/atcoder","sub_path":"abc163/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73088456630","text":"import asyncio\nfrom string import Template\n\nfrom discord.ext import commands\n\nfrom twitch_client import twitch_client\n\n\nclass Twitch(commands.Cog):\n\n \"\"\"Tools related to Twitch.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def followers(self, ctx, channel: str):\n \"\"\"Returns how many followers a Twitch channel has.\"\"\"\n try:\n followers = await twitch_client.get_followers(channel)\n except IndexError:\n return await ctx.send(f\"Could not get followers for {channel}\")\n\n await ctx.send(followers)\n\n @commands.group(name=\"10k\", invoke_without_command=True)\n async def _10k(self, ctx):\n \"\"\"Set of 10k follower commands for Twitch.\"\"\"\n await ctx.send(embed=self.bot.error_embed(\n ctx,\n title=\"Invalid command passed\",\n description=\"An invalid subcommand was passed.\"\n ))\n\n @_10k.command(name=\"when\")\n async def whenis10k(self, ctx, channel: str, *, msg: str=None):\n \"\"\"\n Returns how far a Twitch channel is from 10k followers.\n\n Optionally can take a message format, formatted with ${left}.\n Defaults to format \"${left} until 10k\".\n \"\"\"\n try:\n followers = await twitch_client.get_followers(channel)\n except IndexError:\n return await ctx.send(f\"Could not get followers for {channel}\")\n\n left = 10000 - followers\n\n if msg:\n return await ctx.send(Template(msg).substitute(left=left))\n\n await ctx.send(f\"{left} until 10k\")\n\n @_10k.command(name=\"update\")\n async def update10k(self, ctx, channel: str, id: int, *, msg: str=None):\n \"\"\"Update 10k message by ID.\"\"\"\n try:\n followers = await twitch_client.get_followers(channel)\n except IndexError:\n return await ctx.send(f\"Could not get followers for {channel}\")\n\n left = 10000 - followers\n message = await ctx.get_message(id)\n\n if msg:\n return await message.edit(\n content=Template(msg).substitute(left=left)\n )\n\n await message.edit(content=f\"{left} until 10k\")\n\n @_10k.command(name=\"autoupdate\")\n async def autoupdate10k(self, ctx, channel, chan_id, msg_id, *, msg=None):\n \"\"\"Automatically update 10k message by ID every 5 minutes.\"\"\"\n try:\n chan_id = int(chan_id)\n msg_id = int(msg_id)\n except ValueError:\n return await ctx.send(\"Invalid input\")\n\n chan = self.bot.get_channel(chan_id)\n\n if not chan:\n return await ctx.send(\"Could not find discord channel\")\n\n message = await chan.get_message(msg_id)\n try:\n followers = await twitch_client.get_followers(channel)\n except IndexError:\n return await ctx.send(f\"Could not get followers for {channel}\")\n\n if chan_id in self.autoupdate:\n self.autoupdate[chan_id][msg_id] = {\"channel\": channel, \"msg\": msg}\n else:\n self.autoupdate[chan_id] = {\n msg_id: {\"channel\": channel, \"msg\": msg}\n }\n\n self.dump(\"autoupdate10k.json\", self.autoupdate)\n await ctx.send(\n f\"Updating message with ID {msg_id} with {msg} every 5 minutes\"\n )\n\n while True:\n followers = await twitch_client.get_followers(channel)\n left = 10000 - followers\n\n if msg:\n await message.edit(content=Template(msg).substitute(left=left))\n else:\n await message.edit(content=f\"{left} until 10k\")\n\n await asyncio.sleep(300)\n\n @_10k.command(name=\"delete\")\n async def delautoupdate(self, ctx, msg_id, channel_id=None):\n \"\"\"Remove message from 10k auto update.\"\"\"\n if not channel_id:\n channel_id = ctx.channel.id\n\n del self.autoupdate[channel_id][msg_id]\n\n if not self.autoupdate[channel_id]:\n del self.autoupdate[channel_id]\n\n self.dump(\"autoupdate10k.json\", self.autoupdate)\n await ctx.send(\n \"Removed message {} in channel {} from auto update\".format(\n msg_id, channel_id\n )\n )\n\n\ndef setup(bot):\n bot.add_cog(Twitch(bot))\n","repo_name":"zaxutic/Discord-FkPyramids","sub_path":"cogs/twitch.py","file_name":"twitch.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31974891885","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport thermal_monitor.config as config\nfrom thermal_monitor.thermal_frame import ThermalFrame\nimport thermal_monitor.utils as utils\n\n# path_mov = r\"C:\\Users\\RDCunha\\Documents\\GitHub\\CANDOR\\thermal_monitoring\\high_low_high_breathingl.mp4\"\npath_mov = r\"U:\\Users Common\\NLamb\\CANDOR\\captures\\ca_demo_v3_alex\\ca_video_thermal.mp4\"\ncount = 0 \nframe_counter = 0\nerror_counter = 0\ntotal_frame_counter = 500\nstart_frame_counter = 0\nstart_frame = 0\nsuccess = True\nfinal_face = []\nfinal_br = []\nframe_flag = True\n\nthermal_frame_queue = []\ntemperature_pool = {}\nbreath_rate_pool = {}\nbreath_curve_ax_pool = {}\ntimestamp = []\ntimestamp_frame = []\n\ndef visualize_bounding_boxes(annotation_frame, faces):\n for face in faces:\n cv2.rectangle(\n annotation_frame,\n tuple(face.bounding_box[:2]),\n tuple(face.bounding_box[2:]),\n utils.uuid_to_color(face.uuid, mode='bgr'),\n 1\n )\n cv2.putText(\n annotation_frame,\n face.uuid[:2],\n (face.bounding_box[0], face.bounding_box[1] - 2),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n utils.uuid_to_color(face.uuid, mode='bgr'),\n 1\n )\n\ndef visualize_breath_rates(annotation_frame, faces):\n face_uuids = [face.uuid for face in faces]\n keys = [*breath_rate_pool.keys()]\n for key in keys:\n if key not in face_uuids:\n breath_rate_pool.pop(key, None)\n for face in faces:\n if face.uuid not in breath_rate_pool or breath_rate_pool[face.uuid][0] >= config.BREATH_RATE_UPDATE_FRAMES:\n breath_rate = face.breath_rate\n if breath_rate is None:\n return\n breath_rate_pool[face.uuid] = [0, breath_rate]\n else:\n breath_rate = breath_rate_pool[face.uuid][1]\n final_br.append(round((breath_rate * 60), 3))\n breath_rate_pool[face.uuid][0] += 1\n cv2.putText(\n annotation_frame,\n str(breath_rate * 60)[:5] + ' bpm',\n (face.bounding_box[0], face.bounding_box[3] + 24),\n cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, \n utils.uuid_to_color(face.uuid, mode='bgr'), \n 1\n )\n\n# def visualize_breath_curves(faces):\n# if plot_update_counter < config.BREATH_CURVE_UPDATE_FRAMES:\n# plot_update_counter += 1\n# return\n# plot_update_counter = 0\n# if not breath_curve_figure_state:\n# if breath_curve_figure is not None:\n# breath_curve_figure.clear()\n# breath_curve_figure = plt.figure()\n# plt.show(block=False)\n# if set([face.uuid for face in faces]) != set(breath_curve_ax_pool.keys()):\n# for key, value in breath_curve_ax_pool.items():\n# value.remove()\n# breath_curve_ax_pool = {}\n# for index, face in enumerate(faces):\n# if face.uuid not in breath_curve_ax_pool:\n# ax = breath_curve_figure.add_subplot(len(faces), 1, index + 1, label=face.uuid)\n# breath_curve_ax_pool[face.uuid] = ax\n# else:\n# ax = breath_curve_ax_pool[face.uuid]\n# ax.clear()\n# ax.plot(*face.breath_samples, c=utils.uuid_to_color(face.uuid, ub=1))\n# ax.set_title(face.uuid[:4])\n# plt.draw()\n# plt.pause(0.001)\n# return plot_update_counter\n\ndef plot(var):\n plt.plot(np.linspace(0, len(var), len(var)), var)\n plt.title(\"Respiration Rate\")\n plt.xlabel(\"Frames\")\n plt.ylabel(\"Breath Frequency (Units?)\")\n plt.show()\n\n# print(\"Checking video path\")\ntry: \n video = cv2.VideoCapture(path_mov)\nexcept:\n print(\"Video does not exist\")\n\n\nwhile frame_counter < total_frame_counter:\n success, frame = video.read()\n\n if not success:\n error_counter += 1\n continue\n\n if start_frame > start_frame_counter:\n start_frame_counter += 1\n continue\n\n frame_counter += 1\n\n count += (1/60)\n timestamp.append(count)\n timestamp_frame.append(count)\n if frame_counter % config.MAX_CACHED_FRAMES == 0:\n timestamp_frame = []\n # print(\"Timestamp main:\")\n # print(timestamp)\n\n print('Visualizing estimation result. Press Ctrl + C to stop.')\n # print(\"entered run method\")\n thermal_frame = ThermalFrame(frame, timestamp_frame)\n # if thermal_frame._detect() is None:\n # frame_flag = False\n if len(thermal_frame_queue) > 0:\n thermal_frame.link(thermal_frame_queue[-1])\n if len(thermal_frame_queue) >= config.MAX_CACHED_FRAMES:\n thermal_frame_queue.pop(0)\n thermal_frame_queue[0].detach()\n thermal_frame_queue.append(thermal_frame)\n annotation_frame = thermal_frame.thermal_frame\n visualize_bounding_boxes(annotation_frame, thermal_frame.thermal_faces)\n visualize_breath_rates(annotation_frame, thermal_frame.thermal_faces)\n for face in thermal_frame.thermal_faces:\n final_face.append(face.breath_samples[1][-1]) \n # print(\"Breath samples\")\n # print(*face.breath_samples)\n # if not frame_flag:\n # print('No bounding box')\n # try:\n # final_face.append(final_face[-1])\n # except:\n # continue\n # else:\n # final_face.append(face.breath_samples[1][-1]) \n # visualize_breath_curves(thermal_frame.thermal_faces)\n # cv2.imshow('thermal monitoring', cv2.resize(annotation_frame, config.VISUALIZATION_RESOLUTION))\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n # print(\"Frames completed\", str(frame_counter))\n # frame_flag = True\n\n# print(final_face)\nprint(\"Final rate\")\nprint(final_br)\nprint(len(final_br))\n# plot(final_face)\nplot(final_br)\nvideo.release()\ncv2.destroyAllWindows()\n\n\nprint(\"completed\")\n# exit()\n","repo_name":"rdcunha27/thermal_face","sub_path":"customMonitor.py","file_name":"customMonitor.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17055299157","text":"import copy\n\nimport numpy as np\nimport cv2\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport mmcv\nfrom mmcv.cnn import Linear, bias_init_with_prob, build_activation_layer\nfrom mmcv.cnn.bricks.transformer import build_feedforward_network\nfrom mmcv.runner import auto_fp16, force_fp32\nfrom mmcv.utils import TORCH_VERSION, digit_version\nfrom mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.dense_heads import AnchorFreeHead\nfrom mmdet.models.utils import build_transformer\nfrom mmdet.models.utils.transformer import inverse_sigmoid\n\n\nclass MLP(nn.Module):\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\n@HEADS.register_module()\nclass RelationshipHead(nn.Module):\n def __init__(self,\n in_channels_o1,\n in_channels_o2=None,\n shared_param=True,\n loss_rel=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25)):\n super().__init__()\n\n self.MLP_o1 = MLP(in_channels_o1, in_channels_o1, 128, 3)\n self.shared_param = shared_param\n if shared_param:\n self.MLP_o2 = self.MLP_o1\n else:\n self.MLP_o2 = MLP(in_channels_o2, in_channels_o2, 128, 3)\n self.classifier = MLP(256, 256, 1, 3)\n self.loss_rel = build_loss(loss_rel)\n\n def forward_train(self, o1_feats, o1_assign_results, o2_feats, o2_assign_results, gt_adj):\n rel_pred = self.forward(o1_feats, o2_feats)\n losses = self.loss(rel_pred, gt_adj, o1_assign_results, o2_assign_results)\n return losses\n\n def get_relationship(self, o1_feats, o2_feats):\n rel_pred = self.forward(o1_feats, o2_feats)\n rel_results = rel_pred.squeeze(-1).sigmoid()\n rel_results = [_ for _ in rel_results]\n return rel_results\n\n def forward(self, o1_feats, o2_feats):\n # feats: D, B, num_query, num_embedding\n o1_embeds = self.MLP_o1(o1_feats[-1])\n o2_embeds = self.MLP_o2(o2_feats[-1])\n\n num_query_o1 = o1_embeds.size(1)\n num_query_o2 = o2_embeds.size(1)\n o1_tensor = o1_embeds.unsqueeze(2).repeat(1, 1, num_query_o2, 1)\n o2_tensor = o2_embeds.unsqueeze(1).repeat(1, num_query_o1, 1, 1)\n\n relationship_tensor = torch.cat([o1_tensor, o2_tensor], dim=-1)\n relationship_pred = self.classifier(relationship_tensor)\n\n return relationship_pred\n\n def loss(self, rel_preds, gt_adjs, o1_assign_results, o2_assign_results):\n B, num_query_o1, num_query_o2, _ = rel_preds.size()\n o1_assign = o1_assign_results[-1]\n o1_pos_inds = o1_assign['pos_inds']\n o1_pos_assigned_gt_inds = o1_assign['pos_assigned_gt_inds']\n\n if self.shared_param:\n o2_assign = o1_assign\n o2_pos_inds = o1_pos_inds\n o2_pos_assigned_gt_inds = o1_pos_assigned_gt_inds\n else:\n o2_assign = o2_assign_results[-1]\n o2_pos_inds = o2_assign['pos_inds']\n o2_pos_assigned_gt_inds = o2_assign['pos_assigned_gt_inds']\n\n targets = []\n for i in range(B):\n gt_adj = gt_adjs[i]\n target = torch.zeros_like(rel_preds[i].squeeze(-1), dtype=gt_adj.dtype, device=rel_preds.device)\n xs = o1_pos_inds[i].unsqueeze(-1).repeat(1, o2_pos_inds[i].size(0))\n ys = o2_pos_inds[i].unsqueeze(0).repeat(o1_pos_inds[i].size(0), 1)\n target[xs, ys] = gt_adj[o1_pos_assigned_gt_inds[i]][:, o2_pos_assigned_gt_inds[i]]\n targets.append(target)\n targets = torch.stack(targets, dim=0)\n\n targets = 1 - targets.view(-1).long()\n rel_preds = rel_preds.view(-1, 1)\n # weight = (1 - targets) * 3 + targets\n\n loss_rel = self.loss_rel(rel_preds, targets)\n\n if digit_version(TORCH_VERSION) >= digit_version('1.8'):\n loss_rel = torch.nan_to_num(loss_rel)\n\n return dict(loss_rel=loss_rel)\n","repo_name":"OpenGVLab/InternImage","sub_path":"autonomous_driving/openlane-v2/plugin/mmdet3d/baseline/models/heads/relationship_head.py","file_name":"relationship_head.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":2049,"dataset":"github-code","pt":"94"} +{"seq_id":"33350552511","text":"# -*- coding: utf-8 -*-\n# @Time : 18-9-5\n# @Author : baifendian_tyf\n# @File : alignement.py\n# @Software: PyCharm\n\nfrom __future__ import unicode_literals, print_function\nimport codecs\n\ndef alignment_fun1(ref, hyp, debug=True):\n r = ref.split()\n h = hyp.split()\n # costs will holds the costs, like in the Levenshtein distance algorithm\n costs = [[0 for inner in range(len(h) + 1)] for outer in range(len(r) + 1)]\n # backtrace will hold the operations we've done.\n # so we could later backtrace, like the WER algorithm requires us to.\n backtrace = [[0 for inner in range(len(h) + 1)] for outer in range(len(r) + 1)]\n\n OP_OK = 0\n OP_SUB = 1\n OP_INS = 2\n OP_DEL = 3\n\n # First column represents the case where we achieve zero\n # hypothesis words by deleting all reference words.\n for i in range(1, len(r) + 1):\n costs[i][0] = 1 * i\n backtrace[i][0] = OP_DEL\n\n # First row represents the case where we achieve the hypothesis\n # by inserting all hypothesis words into a zero-length reference.\n for j in range(1, len(h) + 1):\n costs[0][j] = 1 * j\n backtrace[0][j] = OP_INS\n\n # computation\n for i in range(1, len(r) + 1):\n for j in range(1, len(h) + 1):\n if r[i - 1] == h[j - 1]:\n costs[i][j] = costs[i - 1][j - 1]\n backtrace[i][j] = OP_OK\n else:\n substitutionCost = costs[i - 1][j - 1] + 1 # penalty is always 1\n insertionCost = costs[i][j - 1] + 1 # penalty is always 1\n deletionCost = costs[i - 1][j] + 1 # penalty is always 1\n\n costs[i][j] = min(substitutionCost, insertionCost, deletionCost)\n if costs[i][j] == substitutionCost:\n backtrace[i][j] = OP_SUB\n elif costs[i][j] == insertionCost:\n backtrace[i][j] = OP_INS\n else:\n backtrace[i][j] = OP_DEL\n\n # back trace though the best route:\n i = len(r)\n j = len(h)\n numSub = 0\n numDel = 0\n numIns = 0\n numCor = 0\n if debug:\n print(\"OP\\tREF\\tHYP\")\n lines = []\n while i > 0 or j > 0:\n if backtrace[i][j] == OP_OK:\n numCor += 1\n i -= 1\n j -= 1\n if debug:\n lines.append(\"OK\\t\" + r[i] + \"\\t\" + h[j])\n elif backtrace[i][j] == OP_SUB:\n numSub += 1\n i -= 1\n j -= 1\n if debug:\n lines.append(\"SUB\\t\" + r[i] + \"\\t\" + h[j])\n elif backtrace[i][j] == OP_INS:\n numIns += 1\n j -= 1\n if debug:\n lines.append(\"INS\\t\" + \"****\" + \"\\t\" + h[j])\n elif backtrace[i][j] == OP_DEL:\n numDel += 1\n i -= 1\n if debug:\n lines.append(\"DEL\\t\" + r[i] + \"\\t\" + \"****\")\n if debug:\n lines = reversed(lines)\n for line in lines:\n print(line)\n print(\"#cor \" + str(numCor))\n print(\"#sub \" + str(numSub))\n print(\"#del \" + str(numDel))\n print(\"#ins \" + str(numIns))\n # print ref\n # print hyp\n # print numSub, numDel , numIns,len(r),(numSub + numDel + numIns) / (float)(len(r))\n # return numSub, numDel, numIns, len(r), lines\n return numCor, lines\n\ndef alignment_fun2(str_a, str_b):\n lensum = float(len(str_a) + len(str_b))\n # 得到一个二维的数组,类似用dp[lena+1][lenb+1],并且初始化为0\n lengths = [[0 for j in range(len(str_b) + 1)] for i in range(len(str_a) + 1)]\n\n # enumerate(a)函数: 得到下标i和a[i]\n for i, x in enumerate(str_a):\n for j, y in enumerate(str_b):\n if x == y:\n lengths[i + 1][j + 1] = lengths[i][j] + 1\n else:\n lengths[i + 1][j + 1] = max(lengths[i + 1][j], lengths[i][j + 1])\n\n # 到这里已经得到最长的子序列的长度,下面从这个矩阵中就是得到最长子序列\n result = \"\"\n x, y = len(str_a), len(str_b)\n res_tuple = []\n while x != 0 and y != 0:\n # 证明最后一个字符肯定没有用到\n if lengths[x][y] == lengths[x - 1][y]:\n x -= 1\n elif lengths[x][y] == lengths[x][y - 1]:\n y -= 1\n else: # 用到的从后向前的当前一个字符\n assert str_a[x - 1] == str_b[y - 1] # 后面语句���真,类似于if(a[x-1]==b[y-1]),执行后条件下的语句\n result = str_a[x - 1] + result # 注意这一句,这是一个从后向前的过程\n # print x,y\n res_tuple.append((x, y))\n # print str_a[x-1],str_b[y-1]\n x -= 1\n y -= 1\n\n # 和上面的代码类似\n # if str_a[x-1] == str_b[y-1]:\n # result = str_a[x-1] + result #注意这一句,这是一个从后向前的过程\n # x -= 1\n # y -= 1\n count = 0\n length_max = len(str_a) if len(str_a) > len(str_b) else len(str_b)\n res_a = [-1 for i in range(length_max)]\n res_b = [-1 for i in range(length_max)]\n # res_tuple=res_tuple[::-1]\n res_a = res_b = ''\n a_index = [i[0] for i in res_tuple]\n b_index = [i[1] for i in res_tuple]\n x = ''\n y = ''\n\n index_map = range(len(str_a))\n\n for i in range(len(a_index) - 1):\n a = a_index[i] - a_index[i + 1]\n b = b_index[i] - b_index[i + 1]\n if a > b:\n str_b = str_b[:b_index[i + 1]] + '&' * (a - b) + str_b[b_index[i + 1]:]\n elif a < b:\n # print str_a[:a_index[i+1]]\n str_a = str_a[:a_index[i + 1]] + '&' * (b - a) + str_a[a_index[i + 1]:]\n # print str_a[a_index[i+1]:]\n # print(str_a)\n\n if len(str_a) > len(str_b):\n str_b += '&' * (len(str_a) - len(str_b))\n\n for i in range(len(str_a)):\n if str_a[i] == str_b[i]:\n count += 1\n print(str_a[i], str_b[i])\n\n return count, str_a, str_b\n # print(res_a)\n # # print str_b\n # print(res_b)\n\ndef extract_info(ref, pre):\n\n numCor, str_a, str_b = alignment_fun2(ref, pre)\n print('numCor1: %d' % numCor)\n\n\nif __name__ == '__main__':\n\n f1 = '/home/tyf/api/api_test/testdata/测试文书/传唤证/常玮平传唤通知.jpg_2018-08-15_15-37-23.txt'\n f2 = '/home/tyf/api/api_test/testdata/测试文书/传唤证/工红芬.txt'\n\n fref = codecs.open(f1, 'r', encoding='utf-8').readlines()\n fpred = codecs.open(f2, 'r', encoding='utf-8').readlines()\n fref = [s.strip() for s in fref]\n fpred = [s.strip() for s in fpred]\n\n str1 = ''.join(fref)\n str2 = ''.join(fpred)\n # str1 = u'天气真好阿'\n # str2 = u'今天天气真好阿'\n # numCor, str_a, str_b = alignment_fun2(str1, str2)\n # print('numCor1: %d' % numCor)\n\n extract_info(str1, str2)\n\n # s1 = u' '.join([char for char in str1])\n # s2 = u' '.join([char for char in str2])\n # numCor, lines = alignment_fun1(s1, s2)\n # print('numCor2: %d' % numCor)\n\n\n","repo_name":"TanYufei/tools","sub_path":"wer/alignement.py","file_name":"alignement.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"8841147517","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom statsmodels.distributions.empirical_distribution import ECDF\n\n\n\ndef read_data():\n \"\"\"\n\n :rtype: 对应的收入为空的dataframe等\n \"\"\"\n np.set_printoptions(edgeitems=10)\n np.core.arrayprint._line_width = 180\n df = pd.read_csv(\"cs-training.csv\")\n df_mis_inc = df[df['MonthlyIncome'].isna()] # 只输出为true的df,而true就是MonthlyIncome为空的\n df_not_mis_inc = df[df['MonthlyIncome'].notna()] # 输出收入没丢的\n varNames = ['RevolvingUtilizationOfUnsecuredLines', 'age', 'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio',\n 'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate', 'NumberRealEstateLoansOrLines',\n 'NumberOfTime60-89DaysPastDueNotWorse', 'NumberOfDependents']\n return df_mis_inc, df_not_mis_inc, varNames\n\ndef visualizeECDF( variable, data):\n df = data[:] # 入参是一个向量。故[:]: for a (say) NumPy array, it will create a new view to the same data.\n ecdf = ECDF(df[variable])\n x = np.linspace(min(df[variable]), np.nanpercentile(df[variable], 99.9))\n y = ecdf(x) # y关于x的经验分布函数 公式详见https://en.wikipedia.org/wiki/Empirical_distribution_function\n plt.step(x, y)\n\ndef show():\n df_mis_inc, df_not_mis_inc, varNames=read_data()\n np.random.seed(100)\n fig, axes = plt.subplots(nrows=9, ncols=2) #9行两列的图\n fig.tight_layout()\n fig.set_figheight(45)\n fig.set_figwidth(15)\n plt.subplots_adjust(hspace=0.8)\n for i in [1, 3, 5, 7, 9, 11, 13, 15, 17]:\n ax = plt.subplot(9, 2, i)\n ax.set_title(varNames[(i - 1) // 2])\n visualizeECDF(varNames[(i - 1) // 2], df_not_mis_inc)\n ax = plt.subplot(9, 2, i + 1)\n ax.set_title(varNames[(i - 1) // 2])\n visualizeECDF(varNames[(i - 1) // 2], df_mis_inc)\n # debitRatio在df_not_mis_inc和df_mis_inc中的经验分布函数体现的差距较大\n # NumberOfDependencies的差距也大\n\n# DebtRatio is distributed Overall\ndef debitRatio():\n df = pd.read_csv(\"cs-training.csv\")\n perc = range(81)\n val = []\n for i in perc:\n val.append(np.percentile(df['DebtRatio'], i))\n plt.plot(perc, val, 'go-', linewidth=2, markersize=12)\n\n# 显示负债率最高的有收入的人 的 top1%的负债率曲线\n # 和收入缺失的人的 整体负债率 大致相当\n # 故用前者的收入来替代后者,就归0吧\ndef debtRatioAboutIncome():\n df_mis_inc, df_not_mis_inc, var_names = read_data()\n perc1 = [99.0, 99.1, 99.2, 99.3, 99.4, 99.5, 99.6, 99.7, 99.8, 99.9]\n perc2 = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]\n val1 = []\n val2 = []\n for i in perc1:\n val1.append(np.percentile(df_not_mis_inc['DebtRatio'], i))# 收入无缺失且负债比率高于99%的人的经验曲线\n #给出来的值是超过i%的值,该值是属于df_not_mis_inc['DebtRatio']的一项值\n for i in perc2:\n val2.append(np.percentile(df_mis_inc['DebtRatio'], i)) # 收入缺失,负债比例从0到90的人的经验曲线\n plt.plot(perc1, val1)\n plt.plot(perc2, val2)\n plt.show()\n\ndef NumberOfDependents():\n df_mis_inc, df_not_mis_inc, var_names = read_data()\n df_not_mis_inc.hist(\"NumberOfDependents\")\n plt.xticks(np.arange(0,20,1))\n df_mis_inc.hist(\"NumberOfDependents\")\n plt.xticks(np.arange(0,20,1))\n plt.show()\n\ndef NumberOfDependents_about_income():\n df_mis_inc, df_not_mis_inc, var_names = read_data()\n print(df_mis_inc['NumberOfDependents'].value_counts())\n print(df_not_mis_inc.loc[df_not_mis_inc['NumberOfDependents']==0,[\"MonthlyIncome\"]].mean())\n print(df_not_mis_inc.loc[df_not_mis_inc['NumberOfDependents']==1,['MonthlyIncome']].mean())\n print(df_not_mis_inc.loc[df_not_mis_inc['NumberOfDependents']>1,[\"MonthlyIncome\"]].mean())\n\nif __name__ == '__main__':\n debtRatioAboutIncome()\n NumberOfDependents_about_income()\n\n\n\n","repo_name":"XuhuiJiang/GiveMeCredit","sub_path":"credit/missing_value_analysis.py","file_name":"missing_value_analysis.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29257685304","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 19 22:06:09 2018\n\n@author: yuxiang\n\"\"\"\n\nfrom numpy import exp,sqrt,log\nfrom scipy.stats import norm\n\ndef get_hist_volatility(ticker,startdate,enddate,frequency):\n import pandas as pd\n import numpy as np\n from yahoofinancials import YahooFinancials\n \n histprice = YahooFinancials(ticker)\n dfprice = pd.DataFrame(histprice.get_historical_price_data(startdate,enddate,frequency)[ticker]['prices'])\n dfprice.insert(0,'cusip',ticker) \n df=dfprice[['cusip','formatted_date','close']]\n \n df.loc[:,'lag_close']=df.close.shift(1)\n df['daily_return']=df.close/df.close.shift(1)-1\n df['volatility']=np.std(df.daily_return)\n \n v_daily=df.loc[0,'volatility']\n v_annual=v_daily*np.sqrt(365)\n return v_annual\n\ndef BS_price(call_or_put,S,K,r,v,T):\n d1=(log(S/K)+(r+0.5*v**2)*T)/(v*sqrt(T))\n d2=d1-v*sqrt(T)\n if(call_or_put=='call'):\n return S*norm.cdf(d1)-exp(-r*T)*K*norm.cdf(d2)\n else:\n return S*norm.cdf(-d1)-exp(-r*T)*K*norm.cdf(-d2)\n\ndef BS_vage(call_or_put,S,K,r,v,T):\n d1=(log(S/K)+(r+0.5*v**2)*T)/(v*sqrt(T))\n return S*norm.pdf(d1)*sqrt(T)\n\ndef get_implied_volatility(call_or_put,option_price,S,K,r,sigma,T):\n MAX_ITERATION=100\n EPS=1.0e-7\n\n for i in range(MAX_ITERATION): #i=1 sigma=0.5 call_or_put='call'\n sigma=sigma-(BS_price(call_or_put,S,K,r,sigma,T)-option_price)/BS_vage(call_or_put,S,K,r,sigma,T)\n if(abs(option_price-BS_price(call_or_put,S,K,r,sigma,T))ans:\r\n high = choice-1\r\n chances -=1\r\n count += 1\r\n print(\"Wrong! My number is smaller.\\n\")\r\n else:\r\n low = choice+1\r\n chances -=1\r\n count += 1\r\n print(\"Wrong! My number is higher.\\n\")\r\n\r\n","repo_name":"HelalChow/Pyhton-Basics","sub_path":"Homework/hw4/hc2324_hw4_q7.py","file_name":"hc2324_hw4_q7.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"16551924838","text":"import copy\nimport multiprocessing\nimport time\n\n\nclass Windowing(multiprocessing.Process):\n def __init__(self, aQueue, bQueue, window_size):\n super().__init__()\n\n self.aQueue = aQueue\n self.bQueue = bQueue\n self.window_size = window_size\n\n self.all_key_points = list()\n self.last_append = 0\n\n def run(self):\n while True:\n if not self.aQueue.empty(): # if there's input data\n self.window_frame(self.aQueue.get()) # call func\n\n def window_frame(self, frame):\n self.all_key_points.append(frame)\n self.last_append = time.time()\n\n # When WINDOW_SIZE frames are captured create job to classify\n if len(self.all_key_points) == self.window_size:\n self.bQueue.put(copy.copy(self.all_key_points)) # Record overlapping window\n # Save the last x entries for next window\n self.all_key_points = self.all_key_points[-int(self.window_size * 0.6):]\n","repo_name":"jsonnet/GR2AM","sub_path":"src/utils/gesture_preprocessing/windowing.py","file_name":"windowing.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7675589347","text":"# fibonacci generator\r\n\r\n\r\nprev = 0 # More meaningful identifiers\r\ncurr = 1\r\nanswer = 0\r\nwhile prev < 4 * 1000 * 1000: # 4000000 can be difficult to read\r\n temp = prev\r\n prev += curr # Means add curr to prev\r\n curr = temp\r\n if prev % 2 == 0:\r\n answer += prev\r\nprint(answer)\r\n","repo_name":"Justintc217/recursiveparadox","sub_path":"fibinacci gen euler2.py","file_name":"fibinacci gen euler2.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72294055348","text":"import matplotlib.pyplot as plt\nfrom preparacao_dados import dados\nfrom predicoes import dados_reais_x, dados_reais_y, dados_previsto_x, dados_previsto_y\n\n\"\"\"# Plotando gráficos para entender os dados\ndatas = list(dados['time'])\nvalores = list(dados['PriceUSD'])\nanos = [] # armazenando somente os anos das datas para melhor visualização no gráfico de barras\nfor data in datas:\n anos.append(data.year)\"\"\"\n\n# VISUALIZAÇÕES REAIS:\n\n\"\"\"# Grafico de linha\nplt.plot(datas, valores)\nplt.xlabel(\"Anos\")\nplt.ylabel(\"Preço (USD)\")\nplt.title(\"Valor do Bitcoin x Ano (Valor Real)\")\nplt.show()\n\n# Grafico de barra\nplt.bar(anos, valores)\nplt.xlabel(\"Anos\")\nplt.ylabel(\"Preço (USD)\")\nplt.title(\"Valor do Bitcoin x Ano (Valor Real)\")\nplt.show()\n\n# Grafico de pontos\nplt.scatter(datas, valores)\nplt.xlabel(\"Anos\")\nplt.ylabel(\"Preço (USD)\")\nplt.title(\"Valor do Bitcoin x Ano (Valor Real)\")\nplt.show()\"\"\"\n\n# VISUALIZAÇÕES PREDIÇÃO:\nx = []\nfor dados in dados_reais_x:\n x.append(dados)\nfor dados in dados_previsto_x:\n x.append(dados)\n\ny = []\nfor dados in dados_reais_y:\n y.append(dados)\nfor dados in dados_previsto_y:\n y.append(dados)\n\nplt.plot(x, y)\nplt.xlabel(\"Anos\")\nplt.ylabel(\"Preço (USD)\")\nplt.title(\"Valor do Bitcoin x Ano (Valor Real)\")\nplt.show()\n","repo_name":"luanfelixlima/Predicao_Bitcoin","sub_path":"visualizacoes.py","file_name":"visualizacoes.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20013497670","text":"from pyramid.view import view_config\nfrom kotti_settings.util import get_setting\n\n\n@view_config(name='contentpreview_settings',\n permission='edit',\n renderer='json')\ndef contentpreview_settings(context, request):\n return {'view_name': get_setting('view_name'),\n 'window_size': get_setting('window_size'),\n 'delay_show': get_setting('delay_show'),\n 'delay_hide': get_setting('delay_hide')}\n","repo_name":"j23d/kotti_contentpreview","sub_path":"kotti_contentpreview/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"40384012113","text":"import dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\n'''\n导入MiniBatchDictionaryLearning,MiniBatch是字典学习的一种方法,\n这种方法专门应用于大数据情况下字典学习。\n当数据量非常大时,严格对待每一个样本就会消耗大量的时间,\n而MiniBatch通过降低计算精度来换取时间利益,但是仍然能够通过大量的数据学到合理的词典。\n换言之,普通的DictionaryLearning做的是精品店,量少而精,但是价格高。\n'''\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\n'''\n导入图片复原函数reconstruct_from_patches_2d,它可以通过pitch复原一整张图片。\n'''\nfrom sklearn.feature_extraction.image import reconstruct_from_patches_2d\n'''\n导入测试工具nose下的异常抛出函数SkipTest\n'''\nfrom sklearn.utils.testing import SkipTest\n'''\n导入SciPy版本检测函数sp_version用于检测版本高低,版本低于0.12的SciPy没有我们需要的样本测试用例\n'''\nfrom sklearn.utils.fixes import sp_version\n\n#检测SciPy版本,如果版本太低就抛出一个异常。程序运行结束\nif sp_version < (0, 12):\n raise SkipTest(\"Skipping because SciPy version earlier than 0.12.0 and \"\n \"thus does not include the scipy.misc.face() image.\")\n#解决中文显示问题\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n#要进行去噪的图片索引\ndenoising_image_index = 0\n\n\nprint('开始数据预处理...')\n\n#读取并展示未加噪声的图片\npath_images_without_noise = 'Image_Denoising'\nimages = dataset.show_images(path_images_without_noise, 256)\n\n#为图片添加椒盐噪声并保存\nsp_noise_imgs = dataset.add_sp_and_save(images)\n\n#展示添加椒盐噪声之后的图片,并返回数据预处理之后的图片\n#sp_data中的每张图片shape:(256, 256)\n#经过了归一化:数据值在0到1之间\nsp_data = dataset.show_sp_noise_images(sp_noise_imgs)\nsp_data = np.array(sp_data) #(9, 256, 256)\nsp_patches = dataset.image_data_patch(sp_data) #图片格式的patch块shape(9, 62001, 8, 8)\n#取出第一张椒盐图片的所有patch (62001, 8, 8), reshape成适合训练的形状(62001, 64)\nsp_patches_data = sp_patches[denoising_image_index].reshape((62001, 8 * 8)) #适合训练的shape(62001, 64)\n\n#为图片添加高斯噪声并保存\ngaussian_noise_imgs = dataset.add_gaussian_and_save(images)\n\n#展示添加高斯噪声之后的图片,并返回数据预处理之后的图片\n#gaussian_data:(256, 256)\n#经过了归一化:数据值在0到1之间\ngaussian_data = dataset.show_gaussian_noise_images(gaussian_noise_imgs)\ngaussian_data = np.array(gaussian_data) #(9, 256, 256)\ngaussian_patches = dataset.image_data_patch(gaussian_data) #图片格式的patch块shape(9, 62001, 8, 8)\n#取出第一张高斯图片的所有patch (62001, 8, 8), reshape成适合训练的形状(62001, 64)\ngaussian_patches_data = gaussian_patches[denoising_image_index].reshape((62001, 8 * 8)) #适合训练的shape(62001, 64)\n\n#对原图进行数据预处理\nimages_data = dataset.pretrain_images(images)\nimages_data = np.array(images_data) #(9, 256, 256)\nimages_patches = dataset.image_data_patch(images_data) #图片格式的patch块shape(9, 62001, 8, 8)\n#取出第一张原始图片的所有patch (62001, 8, 8), reshape成适合训练的形状(62001, 64)\nimages_patches_data = images_patches[denoising_image_index].reshape((62001, 8 * 8)) #适合训练的shape(62001, 64)\n\nprint('完成数据预处理...')\n\n\n\nprint('开始展示图片...')\n\n#看看原图的一张图片是什么样的\nplt.figure()\nplt.imshow(images_data[denoising_image_index], cmap='gray')\nplt.title('原图')\nplt.show()\nprint(images_data[denoising_image_index].shape) #看看图片的形状\n\n#看看添加高斯噪声之后的一张图片是什么样的\nplt.figure()\nplt.imshow(gaussian_data[denoising_image_index], cmap='gray')\ngaussian_psnr = dataset.psnr(gaussian_data[denoising_image_index], images_data[denoising_image_index])\nplt.title('添加了高斯噪声的图像\\npsnr : ' + str(round(gaussian_psnr, 2)))\nplt.show()\nprint(gaussian_data[denoising_image_index].shape) #看看图片的形状\n\n#看看添加椒盐噪声之后的一张图片是什么样的\nplt.figure()\nplt.imshow(sp_data[denoising_image_index], cmap='gray')\nsp_psnr = dataset.psnr(sp_data[denoising_image_index], images_data[denoising_image_index])\nplt.title('添加了椒盐噪声的图像\\npsnr : ' + str(round(sp_psnr, 2)))\nplt.show()\nprint(sp_data[denoising_image_index].shape) #看看图片的形状\n\nprint('完成展示图���...')\n\n\n\nprint('开始单层字典学习...')\n\n#针对高斯噪声的单层字典学习去噪\ndataset.gaussian_single_layer_dictionarylearning(gaussian_patches_data, images_patches_data, 0.1)\n\n#针对椒盐噪声的单层字典学习去噪\ndataset.sp_single_layer_dictionarylearning(sp_patches_data, images_patches_data, 0.1)\n\nprint('完成单层字典学习...')\n\n\n\nprint('开始深度字典学习...')\n\n#针对高斯噪声的深层字典学习去噪\ndataset.gaussian_deepdictionarylearning(gaussian_patches_data, images_patches_data, 0.01, 0.1)\n\n#针对椒盐噪声的深度字典学习去噪\ndataset.sp_deepdictionarylearning(sp_patches_data, images_patches_data, 0.01, 0.1)\n\nprint('完成深度字典学习...')","repo_name":"wangaolong/Image_Denoising","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"zh","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"14834672007","text":"\"\"\"\nIl seguente file crea l'ontologia sulla base dei dataset creati in precedenza dagli altri script.\n\"\"\"\n\nimport csv\n\nfrom rdflib import Graph, Literal, Namespace, URIRef, BNode\nfrom rdflib.namespace import RDF\n\n\ng = Graph()\n\nsas = Namespace(\"http://www.standardaccidentstructure.org/ontology/\")\nbase_uri = \"http://www.standardaccidentstructure.org/resource/\"\ng.bind(\"sas\", sas)\n\nsinistri = list()\nluoghi = list()\nveicoli = list()\npersone = list()\n\npersoneCopia = list()\narrayPersona = []\narrayCorrispondenze = []\n\n#Dataset luoghi: elaborazione e caricamento su un dizionario\nprint(\"\\n\\nCreo le istanze di luogo\")\nwith open('../datiElaborati/luoghi.csv') as luogo:\n lettore = csv.DictReader(luogo)\n for riga in lettore:\n uri_luogo = base_uri + 'luogo/' + riga['ID']\n #print(\"\\rCreo l'istanza luogo: \" + uri_luogo)\n classeLuogo = sas.Luogo\n g.add([URIRef(uri_luogo), RDF.type, classeLuogo]) #Creo l'istanza della classe\n\n #Popolo l'istanza con gli attributi\n\n \"\"\" \n #Le URI delle citta prese in considerazione, su dbpedia sono strutturate in questo modo\n #uri_palermo = \"http://dbpedia.org/resource/Palermo\"\n #uri_roma = \"http://dbpedia.org/resource/Roma\"\n #uri_bergamo = \"http://dbpedia.org/resource/Bergamo\"\n #uri_matera = \"http://dbpedia.org/resource/Matera\"\n \"\"\"\n\n g.add([URIRef(uri_luogo), sas.citta, URIRef(\"http://dbpedia.org/resource/\"+riga['Citta'])])\n g.add([URIRef(uri_luogo), sas.via, Literal(riga['Via'])])\n g.add([URIRef(uri_luogo), sas.fondo_stradale, Literal(riga['FondoStradale'])])\n g.add([URIRef(uri_luogo), sas.pavimentazione, Literal(riga['Pavimentazione'])])\n g.add([URIRef(uri_luogo), sas.illuminazione, Literal(riga['Illuminazione'])])\n g.add([URIRef(uri_luogo), sas.coordinate, Literal(riga['Coordinate'])])\n\n\n#Dataset veicoli: elaborazione e caricamento su un dizionario\nprint(\"Creo le istanze di veicolo\")\nwith open('../datiElaborati/veicoli.csv') as veicolo:\n lettore = csv.DictReader(veicolo)\n count = 0 \n for riga in lettore:\n\n #Alcuni ID sono duplicati perche si riferiscono allo stesso sinistri quindi aggiungo un numero univoco\n uri_veicolo = base_uri + 'veicolo/' + str(count) + \"-\" + riga['ID'] \n count = count + 1\n #print(\"\\rCreo l'istanza veicolo: \" + uri_veicolo)\n classeVeicolo = sas.Veicolo\n g.add([URIRef(uri_veicolo), RDF.type, classeVeicolo]) #Creo l'istanza della classe\n\n #Popolo l'istanza con gli attributi\n g.add([URIRef(uri_veicolo), sas.modello, Literal(riga['Modello'])])\n g.add([URIRef(uri_veicolo), sas.targa, Literal(riga['Targa'])])\n g.add([URIRef(uri_veicolo), sas.tipo_veicolo, Literal(riga['Tipo veicolo'])])\n\n#Dataset persone: elaborazione e caricamento su un dizionario\nprint(\"Creo le istanze di persona\")\nwith open('../datiElaborati/persone.csv') as persona:\n lettore = csv.DictReader(persona)\n count = 0 \n for riga in lettore:\n\n #Alcuni ID sono duplicati perche si riferiscono allo stesso sinistri quindi aggiungo un numero univoco\n uri_persona = base_uri + 'persona/' + str(count) + \"-\" + riga['ID'] \n count = count + 1\n #print(\"\\rCreo l'istanza persona: \" + uri_persona)\n classePersona = sas.Persona\n g.add([URIRef(uri_persona), RDF.type, classePersona]) #Creo l'istanza della classe\n\n #Popolo l'istanza con gli attributi\n g.add([URIRef(uri_persona), sas.tipo_persona, Literal(riga['TipoPersona'])])\n g.add([URIRef(uri_persona), sas.sesso, Literal(riga['Sesso'])])\n g.add([URIRef(uri_persona), sas.tipo_lesione, Literal(riga['TipoLesione'])])\n\n #Collego la persona al veicolo che stava conducendo\n uri_veicolo = base_uri + 'veicolo/' + str(count) + \"-\" + riga['ID'] \n g.add([URIRef(uri_persona), sas.viaggia, URIRef(uri_veicolo)])\n\n arrayPersona.append(uri_persona)\n \n# Dataset sinistri: elaborazione e caricamento su un dizionario\nprint(\"Creo le istanze di sinistro\")\nwith open('../datiElaborati/sinistri.csv') as sinistro:\n lettore = csv.DictReader(sinistro)\n for riga in lettore:\n uri_sinistro = base_uri + 'sinistro/' + riga['ID']\n print(riga['ID'])\n classeSinistro = sas.Sinistro\n g.add([URIRef(uri_sinistro), RDF.type, classeSinistro]) #Creo l'istanza della classe\n\n #Popolo l'istanza con gli attributi\n g.add([URIRef(uri_sinistro), sas.data, Literal(riga['Data'])])\n g.add([URIRef(uri_sinistro), sas.ora, Literal(riga['Ora'])])\n g.add([URIRef(uri_sinistro), sas.tipo, Literal(riga['Tipo'])])\n g.add([URIRef(uri_sinistro), sas.causa, Literal(riga['Causa'])])\n g.add([URIRef(uri_sinistro), sas.meteo, Literal(riga['Meteo'])])\n g.add([URIRef(uri_sinistro), sas.visibilita, Literal(riga['Visibilita'])])\n g.add([URIRef(uri_sinistro), sas.illesa, Literal(riga['N.Illesi'])])\n g.add([URIRef(uri_sinistro), sas.ferita, Literal(riga['N.Feriti'])])\n g.add([URIRef(uri_sinistro), sas.prognosi_riservata, Literal(riga['N.PrognosiRiservata'])])\n g.add([URIRef(uri_sinistro), sas.deceduta, Literal(riga['N.Deceduti'])])\n\n #Collego il sinistro ad un luogo definito in precedenza\n uri_luogo = base_uri + 'luogo/' + riga['ID']\n g.add([URIRef(uri_sinistro), sas.localizzato, URIRef(uri_luogo)])\n\n #Collego il sinistro alle persone coinvolte\n for persona in arrayPersona:\n if persona.split(\"-\")[1] == riga['ID']:\n arrayCorrispondenze.append(persona)\n\n for persona in arrayCorrispondenze:\n #print(\"Collego la persona: \" + persona + \" al sinistro \" + riga['ID'])\n g.add([URIRef(uri_sinistro), sas.coinvolge, URIRef(persona)])\n arrayPersona.remove(persona)\n \n arrayCorrispondenze = []\n \n#Serializzazione dell'ontologia e salvataggio\nprint(\"Serializzo l'ontologia. Attendi...\")\ng.serialize(destination='../standardaccidentstructure.ttl', format='turtle')\nprint(\"\\nOntologia creata con SUCCESSO\\n\")\n","repo_name":"SimoneBonsignore/progetto_opendata","sub_path":"script/data_processer.py","file_name":"data_processer.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30754355500","text":"'''\nThis is the main decision making node.\n'''\nimport rclpy\nfrom rclpy.node import Node\nfrom std_msgs.msg import Bool, String\nimport redis\nimport serial\nimport time\n\nclass LDMNode(Node):\n\n def __init__(self):\n super().__init__('ldm_node')\n # -- Vars.\n self.publish_rate = 3 # Seconds\n self.audio_decision = False\n self.video_decision = False\n # -- Objects\n self.subscriber_audio = self.create_subscription(\n Bool, 'decision/audio', self._set_audio_decision_callback, 10)\n self.subscriber_video = self.create_subscription(\n Bool, 'decision/video', self._set_video_decision_callback, 10)\n self.publisher = self.create_publisher(\n Bool, 'ldm_unit/should_serve', 10)\n self.timer = self.create_timer(\n self.publish_rate, self._trigger_callback)\n self.ser = serial.Serial(\n port='/dev/ttyUSB0', #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0\n baudrate = 115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1)\n ## Robot vars. \n self.previous_decision = True \n\n def _set_audio_decision_callback(self, msg):\n self.audio_decision = msg.data\n\n def _set_video_decision_callback(self, msg):\n self.video_decision = msg.data\n\n def _trigger_callback(self):\n msg = Bool()\n # decision = self.audio_decision and self.video_decision\n decision = self.video_decision\n msg.data = decision\n #if decision != self.previous_decision:\n self._serial_transmit(self.previous_decision)\n self.previous_decision = decision\n self.publisher.publish(msg)\n\n def _serial_transmit(self, decision : bool):\n ser = self.ser\n if decision:\n \n ser.write(str.encode(\"YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY\\n\"))\n # time.sleep(1)\n # ser.write(str.encode(\"YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY\"))\n # time.sleep(.25)\n # ser.write(str.encode(\"YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY\"))\n # time.sleep(1)\n # ser.write(str.encode(\"YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY\"))\n # time.sleep(.25)\n self.get_logger().info(\n \"SENDING SHOULD SERVE --------> YES\")\n else:\n\n ser.write(str.encode(\"NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN\\n\"))\n # time.sleep(1)\n # ser.write(str.encode(\"NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN\"))\n # time.sleep(.25)\n # ser.write(str.encode(\"NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN\"))\n # time.sleep(1)\n # ser.write(str.encode(\"NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN\"))\n # time.sleep(.25)\n self.get_logger().info(\n \"SENDING SHOULD SERVE --------> NO\")\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = LDMNode()\n rclpy.spin(node)\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mumair01/CS-classes-public","sub_path":"cs-133-hri-nemo-server-robot/src/src/ldm_unit/ldm_unit/ldm_node.py","file_name":"ldm_node.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9763255223","text":"import menu\n\ndef try_except():\n number = input(\"Enter an integer: \")\n\n try:\n number = int(number)\n return \"Successfully converted string to integer: {}\".format(number)\n except ValueError:\n print(\"handled a ValueError\")\n\ndef try_except_else():\n number = input(\"Enter an integer: \")\n\n try:\n number = int(number)\n except ValueError:\n print(\"handled a ValueError\")\n else:\n return \"Else was called because no exception was thrown\"\n\n return \"only executed because 'else' was not called due to exception\"\n\ndef try_except_except():\n number = input(\"Enter an integer: \")\n\n try:\n if \".\" in number:\n number = float(number)\n else:\n number = int(number)\n\n repeat_string = number * \"repeat\"\n return repeat_string\n except ValueError:\n print(\"handled a ValueError\")\n except:\n print(\"Something other than a ValueError happened\")\n\ndef try_except_try_except():\n number = input(\"Enter a number: \")\n\n try:\n number = int(number)\n return \"Successfully converted string to integer: {}\".format(number)\n except ValueError:\n try:\n number = float(number)\n return \"Successfully converted string to float: {}\".format(number)\n except ValueError:\n print(\"handle ValueError\")\n\ndef try_except_finally():\n number = input(\"Enter an integer: \")\n\n try:\n number = int(number)\n except ValueError:\n print(\"handled a ValueError\")\n finally:\n print(\"This will always print no matter what\")\n\n return \"Made it here just fine\"\n\nif __name__ == '__main__':\n # Bad Options\n options = {\n \"try/except\": try_except,\n \"try/except/else\": \"this is not a function\",\n \"try/except/except\": try_except_except,\n \"try/except/finally\": 1\n }\n\n # Good options\n # options = {\n # \"try/except\": try_except,\n # \"try/except/else\": try_except_else,\n # \"try/except/except\": try_except_except,\n # \"try/except/try/except\": try_except_try_except,\n # \"try/except/finally\": try_except_finally\n # }\n\n menu = menu.Menu(options)\n menu.startMenu()\n","repo_name":"jonathanhockman/lc101","sub_path":"chapter7/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"38013334031","text":"import argparse\nimport itertools\nimport json\nimport logging\nimport site\nimport textwrap\nfrom typing import Any\n\nfrom gql import Client, gql\nfrom gql.transport.aiohttp import AIOHTTPTransport\nfrom tabulate import tabulate\n\nINDENT_SIZE = 4\nREGISTERED_TABLES = {\n \"students\": [\n \"firstName\",\n \"lastName\",\n \"email\",\n \"gender\",\n \"aidApproved\",\n \"familyIncome\",\n ]\n}\n\nVF_FIELDS = [\"aidApproved\", \"familyIncome\"]\n\n\nclass Site:\n def __init__(self, url, site_name=None) -> None:\n if site_name:\n self.site_name = site_name\n\n self.transport = AIOHTTPTransport(url=url)\n self.client = Client(transport=self.transport, fetch_schema_from_transport=True)\n return\n\n def query(self, table, fields) -> Any:\n query = self.generate_query(table, fields)\n\n if query:\n result = self.client.execute(query)\n\n if result:\n return self.postprocess(result)\n\n def query_aid(self):\n if self.site_name != \"postgres\":\n return\n\n query_string = textwrap.dedent(\n \"\"\"\n query getStudentAid {\n allStudentAids { \n nodes {\n studentId\n aidApproved\n familyIncome\n }\n }\n }\n \"\"\"\n )\n\n query = gql(query_string)\n result = self.client.execute(query)\n\n if result:\n rows = result[\"allStudentAids\"][\"nodes\"]\n records = dict()\n\n for row in rows:\n id = row[\"studentId\"]\n row.pop(\"studentId\")\n records[id] = row\n\n return records\n\n def generate_query(self, table, fields):\n\n if self.site_name == \"sqlite\":\n fields_pad = 2\n elif self.site_name == \"postgres\":\n fields_pad = 3\n\n fields_string = textwrap.indent(\n \"\\n\".join([\"id\", *fields]), \" \" * (fields_pad * INDENT_SIZE)\n )\n\n if self.site_name == \"sqlite\":\n query_string = textwrap.dedent(\n \"\"\"\n query getStudents {{\n {table} {{ \n {fields} \n }}\n }}\n \"\"\"\n ).format(\n fields=fields_string,\n table=table,\n )\n\n elif self.site_name == \"postgres\":\n table = \"all\" + table[0].upper() + table[1:]\n\n query_string = textwrap.dedent(\n \"\"\"\n query getStudents {{\n {table} {{\n nodes {{ \n {fields} \n }}\n }}\n }}\n \"\"\"\n ).format(\n fields=fields_string,\n table=table,\n )\n\n if query_string:\n return gql(query_string)\n\n def postprocess(self, result):\n try:\n if self.site_name == \"sqlite\":\n return result[\"students\"]\n elif self.site_name == \"postgres\":\n return result[\"allStudents\"][\"nodes\"]\n\n return result\n except Exception as exc:\n logging.warning(exc_info=exc)\n\n\nclass Middleware:\n def __init__(self, *sites) -> None:\n self.sites = sites\n\n def query(self, table, fields):\n vf_fields = set(fields) & set(VF_FIELDS)\n\n result = list(\n filter(\n lambda v: bool,\n list(\n itertools.chain(\n *[\n site.query(\n table,\n list(filter(lambda v: v not in VF_FIELDS, fields)),\n )\n for site in self.sites\n ]\n )\n ),\n )\n )\n\n if len(vf_fields):\n postgres_site = list(\n filter(lambda v: v.site_name == \"postgres\", self.sites)\n )[0]\n\n vf_result = postgres_site.query_aid()\n\n for row in result:\n rest = vf_result[row[\"id\"]]\n rest_subset = dict((k, rest.get(k)) for k in vf_fields)\n row.update(rest_subset)\n\n return result\n\n\nmiddleware = Middleware(\n Site(\"http://localhost:8888/graphql\", \"sqlite\"),\n Site(\"http://localhost:5000/graphql\", \"postgres\"),\n)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Distributed database simulation with Postgres and SQLite\"\n )\n\n parser.add_argument(\"-t\", \"--table\", type=str, help=\"Table name\", required=True)\n parser.add_argument(\n \"-f\", \"--field\", nargs=\"+\", help=\"Fields to fetch\", required=True\n )\n\n args = parser.parse_args()\n\n tables = REGISTERED_TABLES.keys()\n\n if not args.table in tables:\n print(\"No such table\", args.table)\n exit(1)\n\n fields = REGISTERED_TABLES.get(args.table)\n\n if not fields:\n print(\"Table {} has no registered fields\".format(args.table))\n exit(1)\n\n for field in args.field:\n if not field in fields:\n print(\"No such field\", field)\n exit(1)\n\n result = middleware.query(\n args.table,\n args.field,\n )\n\n header = result[0].keys()\n rows = [row.values() for row in result]\n\n print(tabulate(rows, header))\n","repo_name":"aphilas/ddbs-sim","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30986806603","text":"# BBD's Krita Script Starter Feb 2018\nfrom krita import Extension\nfrom krita import *\nimport pprint\nimport os\nimport sys\nimport xml.etree.ElementTree as ET\n\nEXTENSION_ID = 'pykrita_berenvoedsel'\nMENU_ENTRY = 'berenvoedsel'\n\n\ndef sleep_qt(value):\n \"\"\"Do a sleep of `value` milliseconds\n\n use of python timer.sleep() method seems to be not recommanded in a Qt application.. ??\n \"\"\"\n loop = QEventLoop()\n QTimer.singleShot(value, loop.quit)\n loop.exec()\n pass\n\nclass Berenvoedsel(Extension):\n\n def __init__(self, parent):\n # Always initialise the superclass.\n # This is necessary to create the underlying C++ object\n super().__init__(parent)\n\n def setup(self):\n pass\n\n\n def createActions(self, window):\n action = window.createAction(EXTENSION_ID, MENU_ENTRY, \"tools/scripts\")\n # parameter 1 = the name that Krita uses to identify the action\n # parameter 2 = the text to be added to the menu entry for this script\n # parameter 3 = location of menu entry\n action.triggered.connect(self.action_triggered)\n\n def berenvoedsel_function(self):\n for doc in Krita.instance().documents():\n filepath= doc.fileName()\n #retrieve name\n path_split = os.path.split(str(filepath))\n filename = path_split[1]\n size = len(filename)\n nokra = filename[:size - 4]\n #split text around '_'\n split_file_underscore = nokra.split('_')\n #pad out incremented file number\n filenumber = int(split_file_underscore[-1]) + 1\n filenumber_pad = str(filenumber).zfill(3)\n #join path together with everything we've gathered\n incrementname = f'{path_split[0]}\\\\{nokra[:-3]}{filenumber_pad}.kra'\n doc.saveAs(incrementname)\n doc.close()\n\n print (incrementname)\n\n while os.path.isfile(incrementname) == False:\n sleep_qt(200)\n\n docs = Krita.instance().openDocument(incrementname)\n Krita.instance().activeWindow().addView(docs)\n frame_end = Krita.instance().activeDocument().animationLength()\n\n\n settingRead = Krita.instance().readSetting('','ExportConfiguration-ANIMATION_EXPORT','')\n\n\n\n tree = str(settingRead)\n root = ET.fromstring(tree)\n\n render_split = os.path.split(str(incrementname))\n rendername = render_split[1]\n print (rendername)\n rsize = len(rendername)\n kraless = rendername[:rsize - 4]\n print (kraless)\n\n path_name = render_split[1]\n psize = len(path_name)\n path_name = path_name[:psize - 8]\n\n #parameters written in variables\n\n basename = kraless + '_'\n basename = str(basename)\n\n directory = f\"../../Demo/{path_name}/{kraless}\"\n\n for x in root[5].iter():\n ffmpeg_path = x.text\n ffmpeg_path = str(ffmpeg_path)\n\n video_name = f\"{kraless}.mp4\"\n video_path = f\"{directory}/{video_name}\"\n\n frame_end = str(frame_end)\n last_document_path = str(incrementname)\n\n storedict = {'basename': basename,\n 'custom_ffmpeg_options': '-crf 23 -preset medium -profile:v baseline -pix_fmt yuv420p',\n 'delete_sequence': 'false',\n 'directory': directory,\n 'encode_video': 'true',\n 'ffmpeg_path': ffmpeg_path,\n 'filename': video_path,\n 'first_frame': '0',\n 'frame_export/CICPCompatiblePrimaries': '1',\n 'frame_export/CICPCompatibleTransferFunction': '2',\n 'frame_export/ColorDepthID': 'U8',\n 'frame_export/ColorModelID': 'RGBA',\n 'frame_export/HDRSupported': 'false',\n 'frame_export/ImageContainsTransparency': 'false',\n 'frame_export/sRGB': 'true',\n 'frame_mimetype': 'image/png',\n 'framerate': '24',\n 'height': '1789',\n 'include_audio': 'false',\n 'last_document_path': last_document_path,\n 'last_frame': frame_end,\n 'only_unique_frames': 'true',\n 'sequence_start': '0',\n 'video_mimetype': 'video/mp4',\n 'width': '1960'}\n\n for element in root.iterfind(\"param\"):\n\n current_type = element.attrib[\"type\"]\n current_key = element.attrib[\"name\"]\n dict_value = storedict[current_key]\n\n if (current_type == \"string\" and current_key != \"basename\"):\n dict_value = f\"\"\n\n element.text = dict_value\n\n str_xml = str(ET.tostring(root, encoding='unicode',method=\"xml\"))\n #include header\n str_xml = f\"\\n{str_xml}\"\n #fix fucked up unicode representations of characters\n str_xml = str_xml.replace(\"<\",\"<\")\n str_xml = str_xml.replace(\">\",\">\")\n\n Krita.instance().writeSetting('','ExportConfiguration-ANIMATION_EXPORT',str_xml)\n\n sleep_qt(300)\n Krita.instance().action('render_animation_again').trigger()\n\n pass\n\n def action_triggered(self):\n self.berenvoedsel_function()\n pass # your active code goes here.\n","repo_name":"ProtofrogUH/Trucendoos","sub_path":"Krita_Plugins/Berenvoedsel/berenvoedsel/berenvoedsel.py","file_name":"berenvoedsel.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"296701457","text":"import requests\nimport pandas as pd\nimport altair as alt\n\nurl = 'https://api.covid19api.com/summary'\nsdf = pd.DataFrame(requests.get(url).json().get('Countries')).sort_values(by=['TotalConfirmed'], ascending=False)\nprint(sdf.head())\ntarget = ['TotalRecovered','TotalConfirmed','TotalDeaths']\ntargetColors = ['green','orange','red']\nbar_chart = alt.Chart(sdf[sdf.TotalDeaths > 10]).transform_fold(\n target,\n as_ = ['Category','Count']\n).mark_bar(size=10).encode(\n x=alt.X('Country',sort='-y'),\n y='Count:Q',\n order=alt.Order(# Sort the segments of the bars by this field\n 'Category:N',\n sort='descending'\n ), \n color = alt.Color('Category:N',\n scale = alt.Scale(domain=target, range=targetColors),\n legend = alt.Legend(title=\"Category\")\n )\n).properties(\n width=800,\n height=400,\n title='Covid-19 cases and deaths'\n)\nbar_chart.display() \n","repo_name":"malminhas/covid","sub_path":"getSummary.py","file_name":"getSummary.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33305930865","text":"import numpy as np\nimport pytest\n\nfrom landlab import RasterModelGrid\nfrom landlab.components import FlowAccumulator, HeightAboveDrainageCalculator\n\n\ndef test_route_to_multiple_error_raised():\n mg = RasterModelGrid((10, 10))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n z += mg.x_of_node + mg.y_of_node\n fa = FlowAccumulator(mg, flow_director=\"MFD\")\n fa.run_one_step()\n\n channel__mask = mg.zeros(at=\"node\")\n\n with pytest.raises(NotImplementedError):\n HeightAboveDrainageCalculator(mg, channel_mask=channel__mask)\n\n\ndef test_warn_drainage_pits():\n mg = RasterModelGrid((4, 4))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n elev = np.array([[2, 1, 1, 2], [3, 2, 2, 3], [4, 1, 3, 4], [5, 3, 4, 4]])\n z[:] = elev.reshape(len(z))\n\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n fa.run_one_step()\n\n channel__mask = mg.zeros(at=\"node\")\n channel__mask[[2, 6]] = 1\n hd = HeightAboveDrainageCalculator(mg, channel_mask=channel__mask)\n\n with pytest.warns(UserWarning):\n hd.run_one_step()\n","repo_name":"landlab/landlab","sub_path":"tests/components/hand_calculator/test_hand.py","file_name":"test_hand.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":324,"dataset":"github-code","pt":"94"} +{"seq_id":"9156652237","text":"# given a webpage, scrap all the wallpaper links and download them\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef scrap_wallpaper(movie):\n\n # URL of the HTML page\n url = \"https://www.ghibli.jp/works/\" + movie + \"/\"\n\n output_folder = \"wallpaper/\" + movie\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n # Send a GET request to the URL\n response = requests.get(url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, \"html.parser\")\n \n # Find all image links within the \"gallery\" class\n image_links = soup.select(\".gallery .panelarea\")\n\n # Download and save each image\n for img_link in image_links:\n img_url = img_link[\"href\"]\n title = img_link[\"title\"]\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n img_filename = f\"{title}.jpg\"\n img_path = os.path.join(output_folder, img_filename)\n \n with open(img_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n \n print(f\"Downloaded {img_filename}\")\n else:\n print(f\"Failed to download {img_url}\")\n else:\n print(\"Failed to retrieve the HTML content.\")\n\n print(\"Download complete!\")","repo_name":"boyuchen21/ghibli","sub_path":"scrap_wallpaper.py","file_name":"scrap_wallpaper.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18888006914","text":"from django.conf.urls import patterns, include, url, handler404, handler500\nfrom django.contrib import admin\nfrom views import index, contact, student_list, student_detail\nfrom feedbacks.views import FeedbackView\n\nhandler404 = 'pybursa.views.page_not_found_custom' \nhandler500 = 'pybursa.views.page_error_found_custom'\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', index, name='index'),\n url(r'^contact/$', contact, name='contact'),\n url(r'^feedback/$', FeedbackView.as_view(), name='feedback'),\n url(r'^student_list/$', student_list, name='student_list'),\n url(r'^student_detail/$', student_detail, name='student_detail'),\n url(r'^quadratic/', include('quadratic.urls'), name='results'),\n url(r'^courses/', include('courses.urls', namespace=\"courses\")),\n url(r'^students/', include('students.urls', namespace=\"students\")),\n url(r'^coaches/', include('coaches.urls', namespace=\"coaches\")),\n # url(r'^blog/', include('blog.urls')),\n url(r'^polls/', include('polls.urls', namespace=\"polls\")),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"Kovekser/Pybursa_project","sub_path":"pybursa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18760656936","text":"from typing import List\n\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n intervals.sort(key=lambda x: x[0])\n result = [intervals[0]]\n\n\n for interval in intervals:\n if result[-1][1] >= interval[0]:\n result[-1][1] = max(interval[1],result[-1][1])\n else:\n result.append(interval)\n return result\n\n\nSolution().merge([[1,3],[2,6],[8,10],[15,18]])\nSolution().merge( [[1,4],[4,5]])\n\n","repo_name":"ddobokki/coding-test-practice","sub_path":"leet_code/leet_56.py","file_name":"leet_56.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42788845348","text":"class PMV_Class:\n def __init__(self, DirectoryFile_Info, Configuration, Music_Info, CH_Settings, Video_Select, URL_Data):\n self.DirectoryFile_Info = DirectoryFile_Info\n self.Configuration = Configuration\n self.Music_Info = Music_Info\n self.CH_Settings = CH_Settings\n self.Video_Select = Video_Select\n self.URL_Data = URL_Data\nclass URL_Data:\n def __init__(self, videoURLs, musicURL):\n self.videoURLs = videoURLs\n self.musicURL = musicURL\nclass SectionClass:\n def __init__(self, videoURLs, musicURL):\n self.videoURLs = videoURLs\n self.musicURL = musicURL\nclass Music_Info:\n def __init__(self, musicName, musicType, songStart, songEnd, musicVideoBool, musicVideoOccuranceFactor, trimSong,\n origSoundScale, origSoundFromSection):\n self.musicName = musicName\n self.musicType = musicType\n self.songStart = songStart\n self.songEnd = songEnd\n self.musicVideoBool = musicVideoBool\n self.musicVideoOccuranceFactor = musicVideoOccuranceFactor\n self.trimSong = trimSong\n self.origSoundScale = origSoundScale\n self.origSoundFromSection = origSoundFromSection\n\n\nclass CH_Settings:\n def __init__(self, make_CH_Vid, nSections, requiredDiff, minSections, beatSelect, animationDuration, sdfactor,\n yPosScale, useRankMethod, rollingSections, circleSizeScale, beatEndPosScale):\n self.make_CH_Vid = make_CH_Vid\n self.nSections = nSections\n self.requiredDiff = requiredDiff\n self.minSections = minSections\n self.useRankMethod = useRankMethod\n self.rollingSections = rollingSections\n self.sdfactor = sdfactor\n self.beatSelect = beatSelect\n self.animationDuration = animationDuration\n self.yPosScale = yPosScale\n self.circleSizeScale = circleSizeScale\n self.beatEndPosScale = beatEndPosScale\n\nclass DirectoryFile_Info:\n def __init__(self, finalVidName, vidDownloadDir, pythonDir, introVidDir,\n musicDir, musicVidDir, musicFilePath, finalVidDir, ModelStorageDir):\n self.finalVidName = finalVidName\n self.vidDownloadDir = vidDownloadDir\n self.pythonDir = pythonDir\n self.introVidDir = introVidDir\n self.musicDir = musicDir\n self.musicVidDir = musicVidDir\n self.musicFilePath = musicFilePath\n self.finalVidDir = finalVidDir\n self.ModelStorageDir = ModelStorageDir\n\nclass Configuration:\n def __init__(self, startEndTime, sd_scale, nSplits, randomise, granularity,\n min_length, videoNumber, minVidLength, maxVidLength, cropVidBool,\n cropVidFraction, resize, flipBool,\n addIntro, userName, UseClassifyModel, sectionArray):\n self.startTime = startEndTime[0]\n self.subtractEnd = startEndTime[1]\n self.sd_scale = sd_scale\n self.nSplits = nSplits\n self.randomise = randomise\n self.granularity = granularity\n self.min_length = min_length\n self.videoNumber = videoNumber\n self.minVidLength = minVidLength\n self.maxVidLength = maxVidLength\n self.cropVidBool = cropVidBool\n self.cropVidFraction = cropVidFraction\n self.resize = resize\n self.flipBool = flipBool\n self.addIntro = addIntro\n self.userName = userName\n self.UseClassifyModel = UseClassifyModel\n self.sectionArray = sectionArray #\"other\", \"cunnilingus\", \"titfuck\", \"blowjob_handjob\", \"sex\", \"finish\"\n\nclass Video_Select:\n def __init__(self, includeChannels, includeCategories, includePornstars, excludeCategories,\n excludePornstars, excludeChannels, includeSelectNumber, excludeSelectNumber,\n orCategories, orPornstars, classifiedOnly):\n self.includeChannels = includeChannels\n self.includeCategories = includeCategories\n self.includePornstars = includePornstars\n self.includeSelectNumber = includeSelectNumber\n self.excludeCategories = [x for x in excludeCategories if x not in includeCategories]\n self.excludePornstars = [x for x in excludePornstars if x not in includePornstars]\n self.excludeChannels = [x for x in excludeChannels if x not in includeChannels]\n self.excludeSelectNumber = excludeSelectNumber\n self.orCategories =orCategories\n self.orPornstars = orPornstars\n self.classifiedOnly = classifiedOnly\n","repo_name":"AutoPMVs/PMV_Generator_Prod","sub_path":"PMV_Fns/PMV_Class_Setup.py","file_name":"PMV_Class_Setup.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"17092378200","text":"from miditok.constants import CHORD_MAPS\nfrom miditok import Structured\nfrom pathlib import Path\nfrom music_python.MIDIDataset import MIDIDataset\nimport os\n\nclass Preprocessing:\n\n def __init__(self, midiName):\n pitch_range = range(21, 109)\n beat_res = {(0, 4): 8, (4, 12): 4}\n nb_velocities = 32\n additional_tokens = {\n 'Chord': True, 'Rest': True, 'Tempo': False,\n 'rest_range': (2, 4), # (half,4beats)\n 'Program': False,\n \"chord_maps\": CHORD_MAPS,\n \"chord_tokens_with_root_note\": True,\n }\n special_tokens = [\"PAD\", \"BOS\", \"EOS\"]\n \n self.tokenizer = Structured(pitch_range, beat_res, nb_velocities, additional_tokens, special_tokens=special_tokens)\n self.midiName = midiName\n self.dirname = os.path.dirname(__file__)\n self.midisFolder= self.dirname +'/midi_input_files/'\n self.preprocessFolder = self.dirname + '/preprocessed/' + self.midiName # have a folder for each midi file to not overwrite content\n self.target_path = self.midisFolder + midiName + \".mid\"\n self.bpeFolder = self.preprocessFolder + '/bpe'\n self.tokens_paths = list(Path(self.bpeFolder).glob(\"**/*.json\"))\n\n def getTokenizer(self):\n return self.tokenizer\n \n def loadTokenizerFromJSON(self):\n print(\"Loading tokenizer data...\")\n self.tokenizer = Structured(params=Path(self.bpeFolder+\"/BPEparams.json\"))\n return self.tokenizer\n\n # Used when there is a midi upload...\n # Checks if the midi file has already been preprocssed.\n def checkAlreadyPreprocessed(self):\n return os.path.exists(self.bpeFolder)\n\n # Temporary - sanity check to see if files exist\n def checkFolder(self,path):\n files = os.listdir(path)\n for file_name in files:\n print(file_name)\n \n def loadData(self):\n print(\"Loading prompt token data...\")\n # Only use the token path that has the midi name, not the BPE params\n self.tokens_paths = [path for path in self.tokens_paths if path.name == self.midiName +'.json']\n # Load dataset\n data = MIDIDataset(\n self.tokens_paths, max_seq_len=512, min_seq_len=200, # min_seq_len=384\n )\n return data\n\n def preprocessMidi(self): \n # Creates the tokenizer convert MIDIs to tokens\n Path(self.preprocessFolder).mkdir(exist_ok=True, parents=True)\n \n tokens_path = Path(self.preprocessFolder)\n\n # Look through the midi folder and find all the midi files and make it into an array\n midi_paths = list(Path(self.midisFolder).glob('**/*.mid')) + list(Path(self.midisFolder).glob('**/*.midi')) \n\n # Only have the selected midi in the array\n for i in range(len(midi_paths) - 1, -1, -1):\n if str(midi_paths[i]) != str(Path(self.target_path)):\n print(str(midi_paths[i]) + \" deleted\")\n del midi_paths[i]\n\n\n print(midi_paths)\n self.tokenizer.tokenize_midi_dataset(midi_paths, tokens_path)\n\n # Learn and apply BPE to data we just tokenized\n tokens_bpe_path = Path(self.bpeFolder)\n tokens_bpe_path.mkdir(exist_ok=True, parents=True)\n self.tokenizer.learn_bpe(\n vocab_size=512,\n tokens_paths=list(tokens_path.glob(\"**/*.json\")),\n start_from_empty_voc=False,\n )\n self.tokenizer.apply_bpe_to_dataset(\n tokens_path,\n tokens_bpe_path,\n )\n print(\"applied bpe\")\n # Loads tokens and create data loaders for training\n # tokens_paths = list(Path('/content/drive/MyDrive/Datasets/example').glob(\"**/*.json\"))\n # dataset = MIDIDataset(\n # self.tokens_paths, max_seq_len=512, min_seq_len=200, # min_seq_len=384\n # )\n self.tokenizer.save_params(Path(self.bpeFolder + '/BPEparams.json'))\n # print(dataset)\n\n# if __name__ == \"__main__\":\n# midiName = \"alla-turca\"\n# preprocess = Preprocessing(midiName)\n# if(not preprocess.checkAlreadyPreprocessed()):\n# preprocess.preprocessMidi()\n# tokenise = preprocess.loadTokenizerFromJSON()\n","repo_name":"rnat697/74_auto_complete_for_music","sub_path":"pyqt_demo_code/music_python/prepocessing.py","file_name":"prepocessing.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70798013110","text":"# SPDX-License-Identifier: GPL-2.0-or-later\n\nfrom __future__ import annotations\n\nimport itertools as it\nimport typing\nimport warnings\n\nfrom typing import Iterable, List, Optional, Tuple\n\nimport bpy\n\nfrom bpy.props import (BoolProperty,\n CollectionProperty,\n IntProperty,\n PointerProperty,\n StringProperty)\n\nfrom bpy.types import Image, PropertyGroup\n\nfrom . import tiled_storage\n\nfrom .utils.image import (clear_channel,\n copy_image_channel,\n copy_image_channel_to_rgb,\n delete_udim_files,\n SplitChannelImageRGB)\nfrom .utils.layer_stack_utils import get_layer_stack_from_prop\nfrom .utils.naming import unique_name_in\n\nfrom .channel import Channel\nfrom .material_layer import MaterialLayer\nfrom .preferences import get_addon_preferences\nfrom .udim_layout import UDIMLayout\n\n\nclass SplitChannelImageProp(SplitChannelImageRGB, PropertyGroup):\n \"\"\"A wrapper around bpy.types.Image. Each RGB channel may be\n allocated to a layer or channel. This is used by layers that\n share their images with other layers or by baked layer channels.\n \"\"\"\n\n # The value of r, g, and b when they are not allocated\n unallocated_value = \"\"\n\n image: PointerProperty(\n type=bpy.types.Image\n )\n name: StringProperty(\n name=\"Name\",\n update=SplitChannelImageProp._name_update\n )\n identifier: StringProperty(\n name=\"Identifier\",\n description=\"A unique identifier\",\n default=\"\"\n )\n r: StringProperty(\n name=\"Red Channel\",\n default=\"\"\n )\n g: StringProperty(\n name=\"Green Channel\",\n default=\"\"\n )\n b: StringProperty(\n name=\"Blue Channel\",\n default=\"\"\n )\n\n def __eq__(self, other):\n if isinstance(other, SplitChannelImageRGB):\n return self.identifier == other.identifier\n\n return super().__eq__(other)\n\n def delete(self):\n image = self.image\n\n if image is None:\n return\n if image.source == 'TILED':\n im = self.image_manager\n if im.udim_layout.is_temp_image(image):\n delete_udim_files(image)\n\n # Remove hidden images or images that are not saved\n if (image.name.startswith(\".\")\n or (not image.filepath_raw and not image.packed_files)):\n bpy.data.images.remove(image)\n\n self.image_manager.remove_from_tiled_storage(image)\n\n self.image = None\n\n def release_image(self) -> Optional[Image]:\n \"\"\"Disassociate the underlying bpy.types.Image from this\n instance, setting self.image to None. Returns the\n bpy.types.Image or None if this instance has no image.\n \"\"\"\n image = self.image\n if image is None:\n return None\n self.image_manager.remove_from_tiled_storage(image)\n self.image = None\n return image\n\n def allocate_all_to_layer(self, layer: MaterialLayer) -> None:\n self.allocate_all_to(layer.identifier)\n\n def allocate_single_to_layer(self, layer: MaterialLayer, ch: int) -> None:\n self.allocate_single_to(layer.identifier, ch)\n\n def allocate_to_layer(self, layer: MaterialLayer, ch: int = -1) -> None:\n self.allocate_to(layer.identifier, ch)\n\n def allocate_to_channel_bake(self, channel, ch: int = -1) -> None:\n if channel.layer is not None:\n channel_str = f\"{channel.layer.identifier}.{channel.name}\"\n else:\n channel_str = f\"{channel.layer_stack.identifier}.{channel.name}\"\n self.allocate_to(channel_str, ch)\n\n def initialize_as_layer_image(self,\n name: str,\n image_manager: ImageManager) -> None:\n \"\"\"Initialize the image so that it can be used to store\n MaterialLayer image data.\n \"\"\"\n if self.image is not None:\n raise RuntimeError(\"Already initialized\")\n\n im = image_manager\n\n # TODO Move to ImageManager\n if im.uses_tiled_images:\n self.image = im.udim_layout.create_tiled_image(\n name, is_data=True, is_float=im.use_float)\n else:\n self.image = bpy.data.images.new(name,\n im.image_width, im.image_height,\n alpha=False, is_data=True,\n float_buffer=im.use_float)\n\n self.name = self.image.name\n self.identifier = im.create_identifier()\n\n # Alter the image data so that the image can be packed\n if not im.uses_tiled_images:\n self.image.pixels[0] = 0.0\n self.image.pack()\n\n def initialize_as_bake_image(self,\n image_manager: ImageManager,\n is_data: bool,\n is_float: bool,\n size: Tuple[int, int]) -> None:\n \"\"\"Initialize the image so that it can be used for baking\n MaterialLayer channels.\"\"\"\n if self.image is not None:\n raise RuntimeError(\"Already initialized\")\n\n im = image_manager\n\n name = \".pml_bake_image\"\n\n if im.uses_tiled_images:\n # TODO Use the size argument\n self.image = im.udim_layout.create_tiled_image(\n name, is_data=is_data, is_float=is_float,\n temp=True)\n else:\n self.image = bpy.data.images.new(name, size[0], size[1],\n alpha=False,\n is_data=is_data,\n float_buffer=is_float)\n\n self.name = self.image.name\n self.identifier = im.create_identifier()\n # TODO check name not in image_manager.bake_images\n\n @property\n def image_manager(self) -> ImageManager:\n return get_layer_stack_from_prop(self).image_manager\n\n @property\n def layer_stack(self):\n return get_layer_stack_from_prop(self)\n\n @staticmethod\n def _name_update(self_, _context):\n self = self_\n if self.image.name == self.name:\n return\n self.image.name = self.name\n if self.name != self.image.name:\n self.name = self.image.name\n\n\nclass ImageManager(bpy.types.PropertyGroup):\n \"\"\"Manages all the images used by a layer stack. This includes the\n images that store the alpha value for layers as well as the images\n produced by baking layers.\n \"\"\"\n\n layer_images: CollectionProperty(\n type=SplitChannelImageProp,\n name=\"Layer Images\",\n description=\"Images that store the value of paint layers\"\n )\n # Image used for painting on the active layer. This will be None if\n # the active layer has no image (e.g. if it's a fill layer), an\n # image containing a copy of the layer's image data if the layer\n # uses a shared image, or the same image used by the layer\n # otherwise.\n active_image: PointerProperty(\n type=bpy.types.Image,\n name=\"Active Image\",\n description=\"An image containing the data of the active paint layer\",\n )\n # Property for msgbus subscriptions to use, since subscribing to\n # active_image directly doesn't seem to work\n active_image_change: PointerProperty(\n type=bpy.types.Image,\n name=\"\"\n )\n image_width: IntProperty(\n name=\"Width\",\n description=\"Horizontal resolution of image-based layers\",\n min=1, soft_max=2**14, default=1024,\n subtype='PIXEL'\n )\n image_height: IntProperty(\n name=\"Height\",\n description=\"Vertical resolution of image-based layers\",\n min=1, soft_max=2**14, default=1024,\n subtype='PIXEL'\n )\n use_float: BoolProperty(\n name=\"32-bit Float\",\n description=\"Layers use images with 32-bit float bit depth\"\n )\n layers_share_images: BoolProperty(\n name=\"Layers Share Images\",\n description=\"Upto three layers are stored in a single image. \"\n \"Uses much less memory, but changing the active layer \"\n \"becomes slower\",\n )\n\n bake_images: CollectionProperty(\n type=SplitChannelImageProp,\n name=\"Bake Images\",\n description=\"Images that channels can be baked to\"\n )\n bake_samples: IntProperty(\n name=\"Bake Samples\",\n description=\"Number of samples to use when baking layers\",\n default=4, min=1, soft_max=128\n )\n bake_size_percent: IntProperty(\n name=\"Bake Size\", subtype='PERCENTAGE',\n description=\"\",\n default=100, min=1, soft_max=100\n )\n bake_float_always: BoolProperty(\n name=\"Always Bake as Float\",\n description=\"Always use 32-bit float images when baking layers\",\n default=False\n )\n bake_srgb_never: BoolProperty(\n name=\"Never Bake to sRGB\",\n description=\"Always bake images as non-color data. Reduces the number \"\n \"of shader image units used when using tiled storage\",\n default=False,\n get=lambda self: (self.uses_tiled_storage\n and self.get(\"bake_srgb_never\", False)),\n set=lambda self, value: self.__setitem__(\"bake_srgb_never\", value)\n )\n bake_shared: BoolProperty(\n name=\"Shared Bake Images\",\n description=\"Pack multiple scalar channels into the same image\",\n default=True\n )\n bake_skip_simple: BoolProperty(\n name=\"Skip Simple\",\n description=\"Don't bake channels with values that are relatively \"\n \"inexpensive to compute\",\n default=True\n )\n\n # Props for when using tiled storage (copies all images to a UDIM\n # and uses that in the shader instead of indivdual images)\n uses_tiled_storage: BoolProperty(\n name=\"Use Tiled Storage\",\n description=(\"Only needed if shader compilation fails due to \"\n \"exceeding the shader image unit limit.\"\n \"Copies the images used by the add-on to a tiled image \"\n \"to bypass the image limit. Significantly increases \"\n \"memory usage.\"),\n default=False,\n update=lambda self, _: self._uses_tiled_storage_update()\n )\n tiles_srgb: PointerProperty(\n type=tiled_storage.TiledStorage,\n name=\"sRGB Bake Tiles\",\n description=\"TiledStorage for sRGB images\"\n )\n tiles_data: PointerProperty(\n type=tiled_storage.TiledStorage,\n name=\"Data Bake Tiles\",\n description=\"TiledStorage for non-color images\"\n )\n\n # UDIM Layout used if the image manager is initialized with\n # tiled=True\n udim_layout: PointerProperty(\n type=UDIMLayout,\n name=\"UDIM Layout\"\n )\n\n # Name of the image to use when a blank image is needed\n _BLANK_IMAGE_NAME = \".pml_blank_image\"\n\n def initialize(self, image_width: int = 1024, image_height: int = 1024,\n use_float: bool = False, tiled: bool = False) -> None:\n \"\"\"Initialize the instance. This should be called before the\n image manager is used.\n Params:\n image_width: The width (in px) of layer images.\n image_height: The height (in px) of layer images.\n use_float: If True then float images are used for layers.\n tiled: If True then layers use tiled images.\n \"\"\"\n self.image_width = image_width\n self.image_height = image_height\n self.use_float = use_float\n\n self[\"uses_tiled_images\"] = tiled\n if tiled:\n self.udim_layout.initialize()\n\n prefs = get_addon_preferences()\n\n # N.B. Sharing is not supported for tiled images\n self.layers_share_images = (prefs.layers_share_images\n and not self.uses_tiled_images)\n\n layer_stack = self.layer_stack\n if layer_stack is None:\n raise RuntimeError(\"ImageManager instance must be a property of a\"\n \" LayerStack\")\n\n self[\"active_layer_id\"] = \"\"\n\n if self.blank_image is None:\n self._create_blank_image()\n\n if layer_stack.active_layer is not None:\n self.set_active_layer(layer_stack.active_layer)\n\n if prefs.use_tiled_storage_default and not self.uses_tiled_images:\n self.uses_tiled_storage = True\n\n def delete(self) -> None:\n \"\"\"Deletes the image manager. This removes all images created\n by the manager from the blend file.\"\"\"\n self._delete_tmp_active_image(self.active_layer)\n\n self.delete_tiled_storage()\n\n for img in self.layer_images:\n img.delete()\n self.layer_images.clear()\n\n for img in self.bake_images:\n img.delete()\n self.bake_images.clear()\n\n self.udim_layout.delete()\n\n def on_load(self) -> None:\n \"\"\"Called by the layer stack instance when a blend file is\n loaded.\n \"\"\"\n self.tiles_srgb.on_load()\n self.tiles_data.on_load()\n\n def active_image_name(self, layer: MaterialLayer) -> str:\n \"\"\"If a temporary active image is needed to paint on layer\n (i.e if the layer uses a shared image) then this function\n returns the name the image should have.\n Returns:\n The name of the active image as a string\n \"\"\"\n layer_stack_id = self.layer_stack.identifier\n layer_id = layer.identifier\n\n return f\".plm_active_image.{layer_stack_id}.{layer_id}\"\n\n def _add_layer_image(self) -> SplitChannelImageProp:\n layer_image = self.layer_images.add()\n name = unique_name_in(bpy.data.images, format_str=\".pml_layer_data.{}\")\n layer_image.initialize_as_layer_image(name, self)\n\n self.update_tiled_storage((layer_image.image,))\n\n return layer_image\n\n def _create_blank_image(self) -> bpy.types.Image:\n \"\"\"Creates and returns the image used by the blank_image\n property. If the image already exists then the existing\n image is returned instead.\n \"\"\"\n existing = bpy.data.images.get(self._BLANK_IMAGE_NAME)\n if existing is not None:\n return existing\n\n image = bpy.data.images.new(name=self._BLANK_IMAGE_NAME,\n width=32, height=32,\n float_buffer=False,\n is_data=True)\n\n if not image.name == self._BLANK_IMAGE_NAME:\n image.name = self._BLANK_IMAGE_NAME\n if not image.name == self._BLANK_IMAGE_NAME:\n warnings.warn(\"Unable to correctly name blank_image. name=\"\n f\"{image.name}, want {self._BLANK_IMAGE_NAME}\")\n return image\n\n def create_identifier(self) -> str:\n \"\"\"Creates a unique (in this ImageManager) identifier for a\n SplitChannelImageProp.\n \"\"\"\n # All SplitChannelImageProp used by this ImageManager\n all_split_images = it.chain(self.layer_images, self.bake_images)\n\n identifiers = {x.identifier for x in all_split_images}\n return unique_name_in(identifiers)\n\n def _get_unused_layer_image_channel(self):\n \"\"\"Finds a layer image with an unused channel; if none can be\n found then a new image is created.\n\n Returns:\n A tuple containing the layer image and the free channel's index\n \"\"\"\n for layer_image in self.layer_images:\n if not layer_image.is_full:\n return (layer_image, layer_image.get_unused_channel())\n\n new_layer_image = self._add_layer_image()\n\n return (new_layer_image, 0)\n\n def _get_unused_layer_image(self) -> SplitChannelImageProp:\n \"\"\"Finds a layer image with all of its channels free; if none\n can be found then a new image is created.\n\n Returns:\n The layer image\n \"\"\"\n for layer_image in self.layer_images:\n if layer_image.is_empty:\n return layer_image\n\n new_layer_image = self._add_layer_image()\n\n return new_layer_image\n\n def allocate_image_to_layer(self, layer: MaterialLayer) -> None:\n \"\"\"Allocates an image for the layer to store its alpha value in\n If layers_share_images is True then only a single channel of the\n image is allocated.\n This sets the 'image' and 'image_channel' properties on the layer.\n 'image' is the Blender image used by the layer.\n 'image_channel' is the index of the channel of 'image' used (-1)\n if all channels are used.\n \"\"\"\n if layer.has_image:\n self.deallocate_layer_image(layer)\n assert not layer.has_image\n\n if self.layers_share_images:\n layer_img, ch = self._get_unused_layer_image_channel()\n\n layer_img.allocate_single_to_layer(layer, ch)\n else:\n layer_img = self._get_unused_layer_image()\n layer_img.allocate_all_to_layer(layer)\n ch = -1\n layer.image = layer_img.image\n layer.image_channel = ch\n\n def deallocate_layer_image(self, layer: MaterialLayer) -> None:\n \"\"\"If layer has an image or image channel allocated to it then\n the image is deallocated. This function sets the properties\n 'image' and 'image_channel' on the layer.\n Does nothing if no image is allocated to the layer\n \"\"\"\n if not layer.has_image:\n return\n\n layer_image = self.layer_images.get(layer.image.name)\n if layer_image is None:\n return\n\n # TODO check that the layer_image is actually allocated to layer\n if not layer.has_shared_image:\n layer_image.deallocate_all()\n else:\n layer_image.deallocate_single(layer.image_channel)\n clear_channel(layer.image, layer.image_channel)\n\n layer.image = None\n layer.image_channel = -1\n\n if layer_image.is_empty:\n self._delete_layer_image(layer_image)\n\n def create_bake_image(self,\n is_data: bool,\n is_float: bool,\n size: Optional[Tuple[int, int]] = None\n ) -> SplitChannelImageProp:\n \"\"\"Creates and stores an image used when baking layers.\"\"\"\n\n if size is None:\n size = self.bake_size\n\n bake_image = self.bake_images.add()\n bake_image.initialize_as_bake_image(self,\n is_data=is_data,\n is_float=is_float,\n size=size)\n return bake_image\n\n def allocate_bake_image(self,\n channel: Channel,\n image: SplitChannelImageProp,\n image_ch: int) -> None:\n \"\"\"Allocates a channel(s) of a bake image to a material channel.\"\"\"\n if image.name not in self.bake_images:\n raise RuntimeError(\"image not found in bake_images collection\")\n if channel.is_baked:\n self.deallocate_bake_image(channel)\n\n if image.channel_allocated(image_ch):\n raise ValueError(\"image channel has already been allocated\")\n\n image.allocate_to_channel_bake(channel, image_ch)\n channel.set_bake_image(image.image, image_ch)\n\n def deallocate_bake_image(self, channel: Channel) -> None:\n \"\"\"Deallocates a material channel's bake image (if any).\"\"\"\n image, image_ch = channel.bake_image, channel.bake_image_channel\n if image is None:\n return\n if image.name not in self.bake_images:\n # image may have been renamed\n bake_image = next((x for x in self.bake_images\n if x.image is image), None)\n if bake_image is None:\n return\n bake_image.name = image.name\n else:\n bake_image = self.bake_images[image.name]\n\n bake_image.deallocate(image_ch)\n if bake_image.is_empty:\n self._delete_bake_image(bake_image)\n\n channel.set_bake_image(None)\n assert not channel.is_baked\n\n def _delete_bake_image(self, image: SplitChannelImageProp) -> None:\n idx = self.bake_images.find(image.name)\n if idx < 0:\n raise ValueError(\"image not found in bake_images\")\n\n image.delete()\n self.bake_images.remove(idx)\n\n def _delete_layer_image(self, image: SplitChannelImageProp) -> None:\n idx = self.layer_images.find(image.name)\n if idx < 0:\n raise ValueError(\"image not found in layer_images\")\n\n image.delete()\n self.layer_images.remove(idx)\n\n def get_image_by_id(self,\n identifier: str) -> Optional[SplitChannelImageProp]:\n \"\"\"Returns a SplitChannelImageProp (used for layer images and\n bake images) with the given identifier.\"\"\"\n return next((x for x in it.chain(self.layer_images, self.bake_images)\n if x.identifier == identifier), None)\n\n def release_image(self, image: Image) -> None:\n \"\"\"Disassociate image from this image manager. The image will\n not be deleted when this image manager is deleted.\n \"\"\"\n\n for img_coll in (self.layer_images, self.bake_images):\n identifiers = [x.identifier for x in img_coll if x.image is image]\n for identifier in identifiers:\n split_image = self.get_image_by_id(identifier)\n split_image.release_image()\n split_image.delete()\n\n img_coll.remove(img_coll.find(split_image.name))\n\n def reload_tmp_active_image(self) -> None:\n \"\"\"If a temporary active image is being used instead of the\n active layer's 'image' property then this loads the active\n layer's data into all the RGB channels of the active image.\n\n Any changes made to the active image but not written back\n to the layer will be lost.\n \"\"\"\n # The active layer\n active = self.active_layer\n if active is None:\n return\n\n if (active is not None\n and active.uses_image\n and active.has_shared_image):\n\n copy_image_channel_to_rgb(active.image,\n active.image_channel,\n self.active_image,\n copy_alpha=True)\n\n def reload_active_layer(self) -> None:\n \"\"\"Reloads the active image from the active layer.\"\"\"\n self._set_active_layer(self.active_layer)\n\n def _create_tmp_active_image(self,\n layer: MaterialLayer) -> bpy.types.Image:\n \"\"\"Create an image suitable for painting on for the given layer\n and fill its RGB channels with the layer's alpha value.\n \"\"\"\n image_name = self.active_image_name(layer)\n\n if image_name in bpy.data.images:\n self._delete_tmp_active_image(layer)\n\n new_active_img = bpy.data.images.new(\n image_name,\n self.image_width, self.image_height,\n alpha=False, is_data=True,\n float_buffer=self.use_float)\n\n # Copy the image channel that the layer stores its alpha in\n # to all rgb channels of new_active_img\n copy_image_channel_to_rgb(layer.image,\n layer.image_channel,\n new_active_img,\n copy_alpha=True)\n new_active_img.pack()\n\n return new_active_img\n\n def _delete_tmp_active_image(self, layer: MaterialLayer) -> None:\n \"\"\"Deletes any active image made for the given layer. Does\n nothing if there is no active image for the layer.\n \"\"\"\n if layer is None:\n return\n\n image_name = self.active_image_name(layer)\n\n image = bpy.data.images.get(image_name)\n if image is not None:\n bpy.data.images.remove(image)\n\n @property\n def _is_using_tmp_active_image(self) -> bool:\n tmp_image_name = self.active_image_name(self.active_layer)\n return (self.active_image is not None\n and self.active_image.name == tmp_image_name)\n\n def _replace_active_image(self,\n layer: MaterialLayer,\n old_layer: MaterialLayer) -> None:\n\n # Only deletes active images made by _create_tmp_active_image\n self._delete_tmp_active_image(old_layer)\n\n if not layer.uses_image:\n new_active_img = None\n\n elif not layer.has_shared_image:\n # Use the actual image that the layer stores its data in\n new_active_img = layer.image\n\n else:\n # Use a new image that is not referenced by the layer and\n # fill it with the layer's image data\n\n new_active_img = self._create_tmp_active_image(layer)\n\n if new_active_img is self.active_image:\n # No changes if the image is already active\n return\n\n self.active_image = new_active_img\n\n bpy.msgbus.publish_rna(key=self.active_image)\n bpy.msgbus.publish_rna(\n key=self.path_resolve(\"active_image_change\", False))\n\n def _set_active_layer(self,\n new_layer: MaterialLayer) -> None:\n \"\"\"Changes the active layer from old_layer to new_layer\"\"\"\n\n # The currently active layer\n old_layer = self.active_layer\n\n if (old_layer is not None\n and old_layer.has_image\n and old_layer.has_shared_image\n and self._is_using_tmp_active_image):\n\n copy_image_channel(self.active_image,\n 0,\n old_layer.image,\n old_layer.image_channel)\n\n if (self.uses_tiled_storage\n and old_layer is not None\n and old_layer.has_image):\n self.update_tiled_storage((old_layer.image,))\n\n self._replace_active_image(new_layer, old_layer)\n\n def set_active_layer(self, layer: MaterialLayer) -> None:\n \"\"\"Sets the active layer. This will also set the active_image\n property to an appropriate value for the layer.\n If currently using a temp active image then its data will be\n written back to the previous active layer.\n \"\"\"\n # The identifier of the currently active layer\n current_id = self[\"active_layer_id\"]\n\n if layer.identifier == current_id:\n return\n\n self._set_active_layer(layer)\n\n self[\"active_layer_id\"] = layer.identifier\n\n def set_paint_canvas(self, context=None) -> None:\n \"\"\"Sets the image paint canvas based on this image_manager's\n active layer.\n \"\"\"\n if context is None:\n context = bpy.context\n\n paint_settings = context.scene.tool_settings.image_paint\n\n paint_settings.mode = 'IMAGE'\n\n active_layer = self.active_layer\n\n paint_settings.canvas = (active_layer.find_secondary_image()\n if self.active_image is None\n else self.active_image)\n\n def resize_all_layers(self, width: int, height: int) -> None:\n \"\"\"Resize all layer images created by this image manager.\"\"\"\n for image in self.layer_images:\n bl_image = image.image\n bl_image.scale(width, height)\n\n active_image = self.active_image\n if active_image is not None:\n if tuple(active_image.size) != (width, height):\n active_image.scale(width, height)\n\n # Need to edit pixel data after scale or texture paint may\n # display blank tiles when trying to paint (cause unknown).\n active_image.pixels[0] = active_image.pixels[0]\n active_image.update()\n\n self.image_width = width\n self.image_height = height\n\n def _uses_tiled_storage_update(self):\n \"\"\"Called when the uses_tiled_storage prop changes.\"\"\"\n if self.uses_tiled_storage:\n # Initialize tiled storage using all this image_manager's\n # images\n self.update_tiled_storage_all()\n else:\n # Clear tiled storage\n self.delete_tiled_storage()\n self.layer_stack.node_manager.rebuild_node_tree()\n\n def delete_tiled_storage(self) -> None:\n \"\"\"Clears all TiledStorage instances used by this image manager.\n Can be called even if the instances are uninitialized.\n \"\"\"\n self.tiles_srgb.delete()\n self.tiles_data.delete()\n\n def find_in_tiled_storage(self,\n image: Image\n ) -> Tuple[tiled_storage.TiledStorage, int]:\n \"\"\"Searches for image in this ImageManager's TiledStorage\n instances returning the instance and the tile number of image.\n Params:\n image: a bpy.types.Image\n Returns:\n A tuple, (TiledStorage instance, tile_number) or (None, -1)\n if the image was not found.\n \"\"\"\n if image in self.tiles_srgb:\n return self.tiles_srgb, self.tiles_srgb.get_image_tile_num(image)\n if image in self.tiles_data:\n return self.tiles_data, self.tiles_data.get_image_tile_num(image)\n return None, -1\n\n def remove_from_tiled_storage(self, image: Image) -> None:\n \"\"\"Remove an image from tiled storage.\"\"\"\n if self.tiles_srgb and image in self.tiles_srgb:\n self.tiles_srgb.remove_image(image)\n if self.tiles_data and image in self.tiles_data:\n self.tiles_data.remove_image(image)\n\n def update_tiled_storage_all(self) -> None:\n \"\"\"Updates the tiled storage with all the layer images and\n bake images of this image manager. Will initialize the\n TiledStorage instances if necessary.\n \"\"\"\n images = self.layer_images_blend + self.bake_images_blend\n self.update_tiled_storage(images)\n\n def update_tiled_storage(self,\n modified_images: Optional[Iterable[Image]] = None\n ) -> None:\n \"\"\"Updates the tiled storage with modified_images and clears\n any tiles that are no longer valid. If this image manager does\n not use tiled storage then this method does nothing. Will\n initialize the TiledStorage instances if necessary.\n \"\"\"\n if not self.uses_tiled_storage:\n return\n\n if not self.tiles_srgb:\n self.tiles_srgb.initialize(is_data=False)\n if not self.tiles_data:\n self.tiles_data.initialize(is_data=True)\n\n if modified_images is None:\n modified_images = []\n elif not isinstance(modified_images, typing.Collection):\n modified_images = list(modified_images)\n\n self.tiles_srgb.update_from(modified_images)\n self.tiles_data.update_from(modified_images)\n\n def update_udim_images(self) -> None:\n \"\"\"Ensures all of this ImageManager's layer images have the\n same tile layout given by self.udim_layout.\n \"\"\"\n for img in self.layer_images_blend:\n self.udim_layout.update_tiles(img)\n\n @property\n def active_layer(self) -> Optional[MaterialLayer]:\n \"\"\"The active MaterialLayer of this ImageManager. May possibly\n be different from the active layer of the layer stack.\n \"\"\"\n active_id = self[\"active_layer_id\"]\n return (None if not active_id\n else self.layer_stack.get_layer_by_id(active_id))\n\n @active_layer.setter\n def active_layer(self, value: Optional[MaterialLayer]):\n self.set_active_layer(value)\n\n @property\n def bake_size(self) -> Tuple[int, int]:\n \"\"\"The size (in pixels) of the images used for baking. Tuple\n of 2 integers (width, height). Always multiples of 32.\"\"\"\n ratio = self.bake_size_percent / 100\n width = int(self.image_width * ratio) // 32 * 32\n height = int(self.image_height * ratio) // 32 * 32\n return (max(width, 32), max(height, 32))\n\n @property\n def blank_image(self) -> Optional[bpy.types.Image]:\n \"\"\"A blank solid black image.\"\"\"\n # TODO maybe store reference as id_prop instead of accessing\n # by name\n image = bpy.data.images.get(self._BLANK_IMAGE_NAME)\n if image is None:\n self._create_blank_image()\n image = bpy.data.images.get(self._BLANK_IMAGE_NAME)\n return image\n\n @property\n def layer_images_blend(self) -> List[bpy.types.Image]:\n \"\"\"The bpy.types.Image values of 'layer_images' as a list.\"\"\"\n return [x.image for x in self.layer_images]\n\n @property\n def bake_images_blend(self) -> List[bpy.types.Image]:\n \"\"\"The bpy.types.Image values of 'bake_images' as a list.\"\"\"\n return [x.image for x in self.bake_images]\n\n @property\n def layer_size(self) -> Tuple[int, int]:\n \"\"\"\"The size (in pixels) of the images for image-based layers.\"\"\"\n return (self.image_width, self.image_height)\n\n @property\n def layer_stack(self):\n return get_layer_stack_from_prop(self)\n\n @property\n def uses_tiled_images(self) -> bool:\n \"\"\"True if layers use tiled images (UDIMs).\n Not to be confused with uses_tiled_storage.\n \"\"\"\n return self.get(\"uses_tiled_images\", False)\n\n\nclasses = (SplitChannelImageProp, ImageManager)\n\nregister, unregister = bpy.utils.register_classes_factory(classes)\n","repo_name":"avelgest/principled-material-layers","sub_path":"principled_material_layers/image_manager.py","file_name":"image_manager.py","file_ext":"py","file_size_in_byte":33915,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"70908735669","text":"from django.urls import path\nfrom apps.store.views import (\n SearchPopularAPIView,\n AutoCompleteSearchView,\n CategoriesAPIView,\n AdCreateAPIView,\n RetrieveUpdateDeleteAPIView,\n UserAdsListAPIView,\n)\n\nurlpatterns = [\n path(\"search/populars/\", SearchPopularAPIView.as_view(), name=\"popular_search\"),\n path(\n \"search/complete/\", AutoCompleteSearchView.as_view(), name=\"autocomplete_search\"\n ),\n path(\"categories-with-childs/\", CategoriesAPIView.as_view(), name=\"categories\"),\n path(\"ads/\", AdCreateAPIView.as_view(), name=\"ad_create\"),\n path(\n \"ads//\",\n RetrieveUpdateDeleteAPIView.as_view(),\n name=\"retrieve-update-delete\",\n ),\n path(\n \"my-ads/\",\n UserAdsListAPIView.as_view(),\n name=\"my-ads\",\n ),\n]\n","repo_name":"Ilyosbek07/77.UZ","sub_path":"apps/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"32935768088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport sys\nimport glob\nimport serial\nimport time\nimport urllib.request\n\n\ndef serial_ports():\n\tif sys.platform.startswith('win'):\n\t\tports = ['COM%s' % (i + 1) for i in range(256)]\n\telif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n\t\t# this excludes your current terminal \"/dev/tty\"\n\t\tports = glob.glob('/dev/ttyACM*')\n\t\tprint(ports)\n\telif sys.platform.startswith('darwin'):\n\t\tports = glob.glob('/dev/tty.usbmodem*')\n\telse:\n\t\traise EnvironmentError('Unsupported platform')\n\n\tresult = ports\n\treturn result\n\n\n# types: Sonar - sonar arduino, Box - box controlling arduino\n# returns serial connection\ndef connect_to():\n\tarduinos = serial_ports()\n\tprint(arduinos)\n\tser = []\n\tfor i in range(len(arduinos)):\n\t\tser.append(serial.Serial(arduinos[i], 115200))\n\t\ttime.sleep(1)\n\t\tser[i].write(\"?\".encode())\n\t\t# time.sleep(0.1)\n\t\ttypes = ser[i].readline().strip().decode(\"utf-8\")\n\t\tprint(types)\n\t\tif types == \"MOT\":\n\t\t\tmot = ser[i]\n\t\t# if types == \"Box\":\n\t\t# \tbox = ser[i]\n\treturn mot\n\ndef connect_to_box():\n\tarduinos = serial_ports()\n\tprint(arduinos)\n\tser = []\n\tfor i in range(len(arduinos)):\n\t\tser.append(serial.Serial(arduinos[i], 115200))\n\t\ttime.sleep(1)\n\t\tser[i].write(\"?\".encode())\n\t\t# time.sleep(0.1)\n\t\ttypes = ser[i].readline().strip().decode(\"utf-8\")\n\t\tprint(types)\n\t\tif types == \"Box\":\n\t\t\tbox = ser[i]\n\t\t# if types == \"Box\":\n\t\t# \tbox = ser[i]\n\treturn box\n\n\ndef open_doar(i, j, ser):\n\tif i == 0:\n\t\tnum = j\n\telse:\n\t\tnum = j + 4\n\tser.write(str(num).encode())\n\tdoor = ser.readline().strip().decode(\"utf-8\")\n\n\ndef read_values():\n\twhile 1:\n\t\ttry:\n\t\t\tfp2 = urllib.request.urlopen(\"http://0.0.0.0:5000/ANGLE/\")\n\t\t\tmybytes2 = fp2.read()\n\t\t\tmystr2 = mybytes2.decode(\"utf8\")\n\t\t\tprint(mystr2)\n\t\t\tfp2.close()\n\t\t\tmystr2 = mystr2.split(\".\")[0]\n\t\t\tangle = float(mystr2)\n\t\t\tprint(angle)\n\t\t\treturn angle\n\t\texcept:\n\t\t\treturn 0\n\n\ndef motion(ser, direction):\n\tstart = time.time()\n\tstartAngle = read_values()\n\tif direction == \"U\":\n\t\tser.write(str(2).encode())\n\tif direction == \"D\":\n\t\tser.write(str(1).encode())\n\tif direction == \"S\":\n\t\tser.write(str(0).encode())\n\tif direction == \"C\":\n\t\tser.write(str(3).encode())\n\tif direction == \"R\":\n\t\tser.write(str(4).encode())\n\tif direction == \"L\":\n\t\tser.write(str(5).encode())\n\tstring = \"Direction is: \" + str(direction) + \", Start time: \" + str(start) + \", Angle is: \" + str(startAngle) + \";\"\n\tf=open(\"log.txt\",\"a\")\n\tprint(string, file=f)\n\tf.close()\n\nif __name__ == \"__main__\":\n\tprint(\"Connecting\")\n\tser = connect_to(\"GPS\")\n\tprint(\"connected to \" + str(ser))\n","repo_name":"clevtech/KazPostBot","sub_path":"lib/arduino_speak.py","file_name":"arduino_speak.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18760638106","text":"from typing import List\n\n\nclass Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n rotation_matrix = [[0] * len(matrix[0]) for _ in range(len(matrix))]\n N = len(matrix)\n for x in range(N):\n for y in range(N):\n rotation_matrix[y][x] = matrix[N - x - 1][y]\n\n for x in range(N):\n for y in range(N):\n matrix[y][x] = rotation_matrix[y][x]\n","repo_name":"ddobokki/coding-test-practice","sub_path":"leet_code/leet_48.py","file_name":"leet_48.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22247082681","text":"import os\nfrom functools import partial\nfrom json import dump, load\n\nimport numpy as np\nimport PySide6.QtWidgets as QtW\n\nimport ScenarioGUI.global_settings as global_vars\nfrom ScenarioGUI.gui_classes.gui_combine_window import JsonDict\n\nfrom ..starting_closing_tests import close_tests, start_tests\n\n\ndef test_add_other_version_functions(qtbot):\n \"\"\"\n test if the GUI handles wrong load and save inputs correctly\n\n Parameters\n ----------\n qtbot: qtbot\n bot for the GUI\n \"\"\"\n\n # init gui window\n main_window = start_tests(qtbot)\n\n def other_version_import(data: JsonDict) -> JsonDict:\n for dic in data[\"values\"]:\n dic[\"float_b\"] = dic[\"float_b\"] + 10\n return data\n\n main_window.add_other_version_import_function(\"v0.0.1\", other_version_import)\n # set filenames\n filename_1 = f\"test_1.{global_vars.FILE_EXTENSION}\"\n # delete files if they already exists\n if os.path.exists(main_window.default_path.joinpath(filename_1)): # pragma: no cover\n os.remove(main_window.default_path.joinpath(filename_1))\n\n def get_save_file_name(*args, **kwargs):\n \"\"\"getSaveFileName proxy\"\"\"\n return kwargs[\"return_value\"]\n\n QtW.QFileDialog.getSaveFileName = partial(\n get_save_file_name, return_value=(f\"{main_window.default_path.joinpath(filename_1)}\", f\"{global_vars.FILE_EXTENSION} (.{global_vars.FILE_EXTENSION})\")\n )\n main_window.fun_save_as()\n for thread in main_window.saving_threads:\n thread.start()\n thread.wait()\n assert thread.calculated\n assert filename_1 in main_window.dia.windowTitle()\n old_value = main_window.gui_structure.float_b.get_value()\n qtbot.wait(1000)\n\n with open(main_window.default_path.joinpath(filename_1)) as file:\n saving = load(file)\n\n saving[\"version\"] = \"0.0.1\"\n\n with open(main_window.default_path.joinpath(filename_1), \"w\") as file:\n dump(saving, file, indent=1)\n\n close_tests(main_window, qtbot)\n main_window = start_tests(qtbot)\n main_window.add_other_version_import_function(\"v0.0.1\", other_version_import)\n\n assert not np.isclose(main_window.gui_structure.float_b.get_value(), old_value + 10)\n QtW.QFileDialog.getOpenFileName = partial(get_save_file_name, return_value=(f\"{main_window.default_path.joinpath(filename_1)}\",\n f\"{global_vars.FILE_EXTENSION} (.{global_vars.FILE_EXTENSION})\"))\n main_window.fun_load()\n assert filename_1 in main_window.dia.windowTitle()\n assert np.isclose(main_window.gui_structure.float_b.get_value(), old_value + 10)\n close_tests(main_window, qtbot)\n","repo_name":"tblanke/ScenarioGUI","sub_path":"tests/test_main_window_functions/test_add_other_version_import.py","file_name":"test_add_other_version_import.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"31744485446","text":"import pandas as pd\nimport os\n\ndef process_wolt_order_data():\n # Change to Data Directory\n folder_path = r'H:\\Shared drives\\99 - Data\\01 - Source Data\\03 - Wolt\\CSV Files'\n os.chdir(folder_path)\n \n # Create an empty list to store data frames\n dataframe = []\n \n # Loop through all files in the directory\n for filename in os.listdir(folder_path):\n # Check if file is a CSV file\n if filename.endswith('.csv'):\n # Load CSV file into a data frame\n filepath = os.path.join(folder_path, filename)\n df = pd.read_csv(filepath)\n # Append data frame to list\n dataframe.append(df)\n \n # Concatenate all data frames in the list\n df = pd.concat(dataframe)\n \n # Convert 'Order placed' to datetime\n df['Order placed'] = pd.to_datetime(df['Order placed'], format='%m/%d/%y, %I:%M %p')\n \n # Extract date and time components\n df['OrderDate'] = df['Order placed'].dt.date\n df['OrderTime'] = df['Order placed'].dt.time\n \n # Convert 'Delivery time' to datetime\n df['Delivery time'] = pd.to_datetime(df['Delivery time'], format='%m/%d/%y, %I:%M %p')\n \n # Extract time component from 'Delivery time'\n df['OrderEndTime'] = df['Delivery time'].dt.time\n \n # Add Order Duration\n df['OrderDuration'] = ''\n \n # Combine date and time columns to create datetime objects\n df['OrderStartTime'] = pd.to_datetime(df['OrderDate'].astype(str) + ' ' + df['OrderTime'].astype(str))\n \n # Clean Restaurant Names\n cleaned_names = pd.read_csv(r'H:\\Shared drives\\99 - Data\\03 - Rx Name List\\Full Rx List, with Cleaned Names.csv')\n df['Location'] = df['Venue']\n df['Location'] = df['Location'].str.replace('ö', 'o').str.replace('ü', 'u')\n df = pd.merge(df, cleaned_names[['Location', 'Cleaned Name']], on='Location', how='left')\n df = df.rename(columns={'Cleaned Name': 'Cleaned Location'})\n df['Location'] = df['Cleaned Location']\n df = df.drop(columns=['Cleaned Location'])\n \n # Clean Channel Partner Names\n df['Channel'] = 'Wolt'\n \n # Clean Order ID\n df['OrderID'] = df['Order number']\n \n # Clean Statuses\n df['OrderStatus'] = df['Delivery status'].str.title()\n \n # Clean Other Entries\n df['DeliveryType'] = df['Delivery type'].str.title().str.replace('Takeaway', 'Pickup').str.replace('Homedelivery', 'Delivery')\n df['GrossAOV'] = df['Price']\n df['RatingScore'] = df['Review score']\n df['OrderStartTime'] = df['OrderTime']\n \n # Sort The DataFrame\n df = df.sort_values(['OrderDate', 'OrderTime'])\n \n # Create Primary Key\n df['OrderDate'] = pd.to_datetime(df['OrderDate'])\n df['PrimaryKey'] = df['OrderID'] + ' - ' + df['Location'] + ' - ' + df['OrderDate'].dt.strftime('%Y-%m-%d')\n \n # Order the DataFrame\n wolt_order_list = ['PrimaryKey', 'Location', 'OrderID', 'OrderDate', 'OrderTime', 'Channel', 'OrderStatus', 'DeliveryType', 'GrossAOV', 'OrderDuration', 'RatingScore']\n df = df[wolt_order_list]\n \n # Save The DataFrame\n os.chdir(r'H:\\Shared drives\\99 - Data\\00 - Cleaned Data')\n df = df.to_csv('Wolt Data.csv', index=False)\n\nprocess_wolt_order_data()","repo_name":"GerryPidgeon/tu_master_data_file","sub_path":"wolt.py","file_name":"wolt.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39072500008","text":"import requests\nimport json\nfrom python_1inch import OneInchExchange\n\nfrom .market_base import Market, Coin, Cup, CupEntry\n\n\nclass Oneinch(Market):\n\n def __init__(self) -> None:\n super().__init__('1inch')\n self.exchange = OneInchExchange(address=None)\n self.exchange.get_tokens()\n\n def make_name_for_market(self, coin: Coin, base_coin: Coin) -> str:\n return f'{coin.get_name(self)}/{base_coin.get_name()}'\n\n def _get_price_quote(\n self, from_token_symbol: str,\n to_token_symbol: str,\n amount: int) -> tuple:\n quote_dict = self.exchange.get_quote(\n from_token_symbol=from_token_symbol,\n to_token_symbol=to_token_symbol,\n amount=amount\n )\n toTokenAmount = self.exchange.convert_amount_to_decimal(\n token_symbol=to_token_symbol,\n amount=quote_dict['toTokenAmount']\n )\n fromTokenAmount = self.exchange.convert_amount_to_decimal(\n token_symbol=from_token_symbol,\n amount=quote_dict['fromTokenAmount']\n )\n return (fromTokenAmount, toTokenAmount)\n\n def _add_coin_to_tokenbook(self, coin: Coin):\n url = '{}/{}/{}/quote'.format(\n self.exchange.base_url,\n self.exchange.version,\n self.exchange.chain_id)\n url = url + '?fromTokenAddress={}&toTokenAddress={}&amount={}'.format(\n coin.address,\n self.exchange.tokens['USDT']['address'],\n 10_000_000_000_000_000)\n response = requests.get(url)\n token = json.loads(response.text)['fromToken']\n coin.put_new_name(\n name=token['symbol'],\n market=self\n )\n self.exchange.tokens[coin.get_upper_name(self)] = token\n\n def get_cup(self, coin: Coin, base_coin: Coin, depth: int = 1) -> Cup:\n target_base_amount = 510\n if coin.address and (\n coin.get_upper_name(self) not in self.exchange.tokens):\n self._add_coin_to_tokenbook(coin)\n base_amount, coin_amount = self._get_price_quote(\n from_token_symbol=base_coin.get_upper_name(self),\n to_token_symbol=coin.get_upper_name(self),\n amount=target_base_amount\n )\n ask_price = base_amount / coin_amount\n asks = [CupEntry(float(ask_price), float(coin_amount)), ]\n\n coin_amount_int = int(coin_amount)\n coin_amount, base_amount = self._get_price_quote(\n from_token_symbol=coin.get_upper_name(self),\n to_token_symbol=base_coin.get_upper_name(self),\n amount=coin_amount_int\n )\n bid_price = base_amount / coin_amount\n bids = [CupEntry(float(bid_price), float(coin_amount)), ]\n return Cup(asks, bids)\n\n def make_link_to_market(self, coin: Coin, base_coin: Coin) -> str:\n market_name = \\\n f'{coin.get_upper_name(self)}/{base_coin.get_upper_name(self)}'\n return f'https://app.1inch.io/#/1/classic/swap/{market_name}'\n","repo_name":"lermanMax/coin_control_bot","sub_path":"services/api_oneinch.py","file_name":"api_oneinch.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72794159349","text":"from __future__ import annotations\nimport argparse\nimport copy\nfrom datetime import datetime\nfrom enum import Enum\nfrom dataclasses import dataclass, field\nfrom time import sleep\nfrom typing import Tuple, TypeVar, Type, Iterable, ClassVar\nimport random\nimport requests\nimport os\nimport threading\n\n# maximum and minimum values for our heuristic scores (usually represents an end of game condition)\nMAX_HEURISTIC_SCORE = 2000000000\nMIN_HEURISTIC_SCORE = -2000000000\n\n# Define weights for each unit type\nweights = {\n 'Virus': 3,\n 'Tech': 3,\n 'Firewall': 3,\n 'Program': 3,\n 'AI': 9999\n}\n\nclass TimeLimitExceededException(Exception):\n pass\n\n##############################################################################################################\n# LOGGING\n\nlogfile = open('templog.txt', \"w\")\n\n##############################################################################################################\n\nclass UnitType(Enum):\n \"\"\"Every unit type.\"\"\"\n AI = 0\n Tech = 1\n Virus = 2\n Program = 3\n Firewall = 4\n\nclass Player(Enum):\n \"\"\"The 2 players.\"\"\"\n Attacker = 0\n Defender = 1\n\n def next(self) -> Player:\n \"\"\"The next (other) player.\"\"\"\n if self is Player.Attacker:\n return Player.Defender\n else:\n return Player.Attacker\n\nclass GameType(Enum):\n AttackerVsDefender = 0\n AttackerVsComp = 1\n CompVsDefender = 2\n CompVsComp = 3\n\n##############################################################################################################\n\n@dataclass(slots=True)\nclass Unit:\n player: Player = Player.Attacker\n type: UnitType = UnitType.Program\n health : int = 9\n # class variable: damage table for units (based on the unit type constants in order)\n damage_table : ClassVar[list[list[int]]] = [\n [3,3,3,3,1], # AI\n [1,1,6,1,1], # Tech\n [9,6,1,6,1], # Virus\n [3,3,3,3,1], # Program\n [1,1,1,1,1], # Firewall\n ]\n # class variable: repair table for units (based on the unit type constants in order)\n repair_table : ClassVar[list[list[int]]] = [\n [0,1,1,0,0], # AI\n [3,0,0,3,3], # Tech\n [0,0,0,0,0], # Virus\n [0,0,0,0,0], # Program\n [0,0,0,0,0], # Firewall\n ]\n\n def is_alive(self) -> bool:\n \"\"\"Are we alive ?\"\"\"\n return self.health > 0\n\n def mod_health(self, health_delta : int):\n \"\"\"Modify this unit's health by delta amount.\"\"\"\n self.health += health_delta\n if self.health < 0:\n self.health = 0\n elif self.health > 9:\n self.health = 9\n\n def to_string(self) -> str:\n \"\"\"Text representation of this unit.\"\"\"\n p = self.player.name.lower()[0]\n t = self.type.name.upper()[0]\n return f\"{p}{t}{self.health}\"\n \n def __str__(self) -> str:\n \"\"\"Text representation of this unit.\"\"\"\n return self.to_string()\n \n def damage_amount(self, target: Unit) -> int:\n \"\"\"How much can this unit damage another unit.\"\"\"\n amount = self.damage_table[self.type.value][target.type.value]\n if target.health - amount < 0:\n return target.health\n return amount\n\n def repair_amount(self, target: Unit) -> int:\n \"\"\"How much can this unit repair another unit.\"\"\"\n amount = self.repair_table[self.type.value][target.type.value]\n if target.health + amount > 9:\n return 9 - target.health\n return amount\n\n##############################################################################################################\n\n@dataclass(slots=True)\nclass Coord:\n \"\"\"Representation of a game cell coordinate (row, col).\"\"\"\n row : int = 0\n col : int = 0\n\n def col_string(self) -> str:\n \"\"\"Text representation of this Coord's column.\"\"\"\n coord_char = '?'\n if self.col < 16:\n coord_char = \"0123456789abcdef\"[self.col]\n return str(coord_char)\n\n def row_string(self) -> str:\n \"\"\"Text representation of this Coord's row.\"\"\"\n coord_char = '?'\n if self.row < 26:\n coord_char = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"[self.row]\n return str(coord_char)\n\n def to_string(self) -> str:\n \"\"\"Text representation of this Coord.\"\"\"\n return self.row_string()+self.col_string()\n \n def __str__(self) -> str:\n \"\"\"Text representation of this Coord.\"\"\"\n return self.to_string()\n \n def clone(self) -> Coord:\n \"\"\"Clone a Coord.\"\"\"\n return copy.copy(self)\n\n def iter_range(self, dist: int) -> Iterable[Coord]:\n \"\"\"Iterates over Coords inside a rectangle centered on our Coord.\"\"\"\n for row in range(self.row-dist,self.row+1+dist):\n for col in range(self.col-dist,self.col+1+dist):\n yield Coord(row,col)\n\n def iter_adjacent(self) -> Iterable[Coord]:\n \"\"\"Iterates over adjacent Coords.\"\"\"\n yield Coord(self.row-1,self.col)\n yield Coord(self.row,self.col-1)\n yield Coord(self.row+1,self.col)\n yield Coord(self.row,self.col+1)\n\n @classmethod\n def from_string(cls, s : str) -> Coord | None:\n \"\"\"Create a Coord from a string. ex: D2.\"\"\"\n s = s.strip()\n for sep in \" ,.:;-_\":\n s = s.replace(sep, \"\")\n if (len(s) == 2):\n coord = Coord()\n coord.row = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\".find(s[0:1].upper())\n coord.col = \"0123456789abcdef\".find(s[1:2].lower())\n return coord\n else:\n return None\n\n##############################################################################################################\n\n@dataclass(slots=True)\nclass CoordPair:\n \"\"\"Representation of a game move or a rectangular area via 2 Coords.\"\"\"\n src : Coord = field(default_factory=Coord)\n dst : Coord = field(default_factory=Coord)\n\n def to_string(self) -> str:\n \"\"\"Text representation of a CoordPair.\"\"\"\n return self.src.to_string()+\" \"+self.dst.to_string()\n \n def __str__(self) -> str:\n \"\"\"Text representation of a CoordPair.\"\"\"\n return self.to_string()\n\n def clone(self) -> CoordPair:\n \"\"\"Clones a CoordPair.\"\"\"\n return copy.copy(self)\n\n def iter_rectangle(self) -> Iterable[Coord]:\n \"\"\"Iterates over cells of a rectangular area.\"\"\"\n for row in range(self.src.row,self.dst.row+1):\n for col in range(self.src.col,self.dst.col+1):\n yield Coord(row,col)\n\n @classmethod\n def from_quad(cls, row0: int, col0: int, row1: int, col1: int) -> CoordPair:\n \"\"\"Create a CoordPair from 4 integers.\"\"\"\n return CoordPair(Coord(row0,col0),Coord(row1,col1))\n \n @classmethod\n def from_dim(cls, dim: int) -> CoordPair:\n \"\"\"Create a CoordPair based on a dim-sized rectangle.\"\"\"\n return CoordPair(Coord(0,0),Coord(dim-1,dim-1))\n \n @classmethod\n def from_string(cls, s : str) -> CoordPair | None:\n \"\"\"Create a CoordPair from a string. ex: A3 B2\"\"\"\n s = s.strip()\n for sep in \" ,.:;-_\":\n s = s.replace(sep, \"\")\n if (len(s) == 4):\n coords = CoordPair()\n coords.src.row = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\".find(s[0:1].upper())\n coords.src.col = \"0123456789abcdef\".find(s[1:2].lower())\n coords.dst.row = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\".find(s[2:3].upper())\n coords.dst.col = \"0123456789abcdef\".find(s[3:4].lower())\n return coords\n else:\n return None\n\n##############################################################################################################\n\n@dataclass(slots=True)\nclass Options:\n \"\"\"Representation of the game options.\"\"\"\n dim: int = 5\n max_depth : int | None = 5\n min_depth : int | None = 2\n max_time : float | None = 6.0\n game_type : GameType = GameType.AttackerVsDefender\n alpha_beta : bool | None = True\n max_moves : int | None = 100\n randomize_moves : bool = True\n broker : str | None = None\n heuristic_type: str | None = \"e2\"\n\n##############################################################################################################\n\n@dataclass(slots=True)\nclass Stats:\n \"\"\"Representation of the global game statistics.\"\"\"\n evaluations_per_depth : dict[int,int] = field(default_factory=dict)\n total_seconds: float = 0.0\n non_root_nodes: int = 0\n non_leaf_nodes: int = 0\n\n##############################################################################################################\n\n@dataclass(slots=True)\nclass Game:\n \"\"\"Representation of the game state.\"\"\"\n board: list[list[Unit | None]] = field(default_factory=list)\n h_player: Player = Player.Attacker\n next_player: Player = Player.Attacker\n turns_played : int = 1\n options: Options = field(default_factory=Options)\n stats: Stats = field(default_factory=Stats)\n _attacker_has_ai : bool = True\n _defender_has_ai : bool = True\n\n def __post_init__(self):\n \"\"\"Automatically called after class init to set up the default board state.\"\"\"\n dim = self.options.dim\n self.board = [[None for _ in range(dim)] for _ in range(dim)]\n md = dim-1\n self.set(Coord(0,0),Unit(player=Player.Defender,type=UnitType.AI))\n self.set(Coord(1,0),Unit(player=Player.Defender,type=UnitType.Tech))\n self.set(Coord(0,1),Unit(player=Player.Defender,type=UnitType.Tech))\n self.set(Coord(2,0),Unit(player=Player.Defender,type=UnitType.Firewall))\n self.set(Coord(0,2),Unit(player=Player.Defender,type=UnitType.Firewall))\n self.set(Coord(1,1),Unit(player=Player.Defender,type=UnitType.Program))\n self.set(Coord(md,md),Unit(player=Player.Attacker,type=UnitType.AI))\n self.set(Coord(md-1,md),Unit(player=Player.Attacker,type=UnitType.Virus))\n self.set(Coord(md,md-1),Unit(player=Player.Attacker,type=UnitType.Virus))\n self.set(Coord(md-2,md),Unit(player=Player.Attacker,type=UnitType.Program))\n self.set(Coord(md,md-2),Unit(player=Player.Attacker,type=UnitType.Program))\n self.set(Coord(md-1,md-1),Unit(player=Player.Attacker,type=UnitType.Firewall))\n #initialize the turns_played to start from 1\n self.turns_played = 0\n def clone(self) -> Game:\n \"\"\"Make a new copy of a game for minimax recursion.\n\n Shallow copy of everything except the board (options and stats are shared).\n \"\"\"\n new = copy.copy(self)\n new.board = copy.deepcopy(self.board)\n return new\n\n def is_empty(self, coord : Coord) -> bool:\n \"\"\"Check if contents of a board cell of the game at Coord is empty (must be valid coord).\"\"\"\n return self.board[coord.row][coord.col] is None\n\n def get(self, coord : Coord) -> Unit | None:\n \"\"\"Get contents of a board cell of the game at Coord.\"\"\"\n if self.is_valid_coord(coord):\n return self.board[coord.row][coord.col]\n else:\n return None\n\n def set(self, coord : Coord, unit : Unit | None):\n \"\"\"Set contents of a board cell of the game at Coord.\"\"\"\n if self.is_valid_coord(coord):\n self.board[coord.row][coord.col] = unit\n\n def remove_dead(self, coord: Coord):\n \"\"\"Remove unit at Coord if dead.\"\"\"\n unit = self.get(coord)\n if unit is not None and not unit.is_alive():\n self.set(coord,None)\n if unit.type == UnitType.AI:\n if unit.player == Player.Attacker:\n self._attacker_has_ai = False\n else:\n self._defender_has_ai = False\n\n def mod_health(self, coord : Coord, health_delta : int):\n \"\"\"Modify health of unit at Coord (positive or negative delta).\"\"\"\n target = self.get(coord)\n if target is not None:\n target.mod_health(health_delta)\n self.remove_dead(coord)\n\n def is_valid_move(self, coords : CoordPair) -> bool:\n \"\"\"Validate a move expressed as a CoordPair. TODO: WRITE MISSING CODE!!!\"\"\"\n if not self.is_valid_coord(coords.src) or not self.is_valid_coord(coords.dst):\n return False\n\n # Check if move is to an adjacent cell\n if coords.dst != coords.src and coords.dst not in coords.src.iter_adjacent():\n return False\n \n unit = self.get(coords.src)\n if unit is None or unit.player != self.next_player:\n return False\n \n dstunit = self.get(coords.dst)\n\n #Check if an AI, a Firewall or a Program \n if unit.type.value == 0 or unit.type.value == 3 or unit.type.value == 4:\n # Check if the move is valid for the specific units\n if unit.player == Player.Attacker:\n # The attacker’s AI, Firewall and Program can only move up or left.\n if coords.dst.row == coords.src.row+1 or coords.dst.col == coords.src.col+1:\n # If engaged in combat, should still be able to attack and repair.\n if dstunit is not None and dstunit.player == Player.Defender:\n return True\n return False\n else:\n # The defender’s AI, Firewall and Program can only move down or right.\n if coords.dst.row == coords.src.row-1 or coords.dst.col == coords.src.col-1:\n # If engaged in combat, should still be able to attack and repair.\n if dstunit is not None and dstunit.player == Player.Attacker:\n return True\n return False\n \n # Check if wants to move but is engaged in combat\n if self.get(coords.dst) == None:\n for coord in coords.src.iter_adjacent():\n if self.get(coord) is not None and self.get(coord).player != self.next_player:\n return False\n \n return True\n\n def perform_move(self, coords : CoordPair) -> Tuple[bool,str]:\n \"\"\"Validate and perform a move expressed as a CoordPair. TODO: WRITE MISSING CODE!!!\"\"\"\n if self.is_valid_move(coords):\n #self destruct\n unit = self.get(coords.src)\n target = self.get(coords.dst)\n if coords.src == coords.dst:\n area = coords.src.iter_range(1)\n for units in area:\n if coords.src == units:\n continue\n temp = self.get(units)\n if temp == None:\n continue\n temp.mod_health(-2)\n\n self.remove_dead(units)\n unit.mod_health(-9)\n self.remove_dead(coords.src)\n return (True, coords.to_string())\n else:\n #standard movement or interaction\n if target == None:\n self.set(coords.dst,self.get(coords.src))\n self.set(coords.src,None)\n return (True, coords.to_string())\n elif target.player == unit.player:\n if unit.repair_amount(target) == 0 or target.health == 9:\n return (False, \"invalid move\")\n target.mod_health(unit.repair_amount(target))\n return (True, coords.to_string())\n else:\n #if combat, they damage each other\n target.mod_health(-(unit.damage_amount(target)))\n unit.mod_health(-(target.damage_amount(unit)))\n #redundant if not target.is_alive():\n self.remove_dead(coords.dst)\n #redundant if not unit.is_alive():\n self.remove_dead(coords.src)\n return (True, coords.to_string())\n\n return (False,\"invalid move\")\n\n def next_turn(self):\n \"\"\"Transitions game to the next turn.\"\"\"\n self.next_player = self.next_player.next()\n self.turns_played += 1\n\n def get_output(self) -> str:\n output = \"\"\n output += f\"Next player: {self.next_player.name}\\n\"\n output += f\"Turns played: {self.turns_played}\\n\"\n\n return output\n\n def get_board(self) -> str:\n \"\"\"Pretty text representation of the game.\"\"\"\n dim = self.options.dim\n output = \"\"\n coord = Coord()\n output += \"\\n \"\n for col in range(dim):\n coord.col = col\n label = coord.col_string()\n output += f\"{label:^3} \"\n output += \"\\n\"\n for row in range(dim):\n coord.row = row\n label = coord.row_string()\n output += f\"{label}: \"\n for col in range(dim):\n coord.col = col\n unit = self.get(coord)\n if unit is None:\n output += \" . \"\n else:\n output += f\"{str(unit):^3} \"\n output += \"\\n\"\n return output\n\n def __str__(self) -> str:\n \"\"\"Default string representation of a game.\"\"\"\n return self.get_output() + self.get_board()\n \n def is_valid_coord(self, coord: Coord) -> bool:\n \"\"\"Check if a Coord is valid within out board dimensions.\"\"\"\n dim = self.options.dim\n if coord.row < 0 or coord.row >= dim or coord.col < 0 or coord.col >= dim:\n return False\n return True\n\n def read_move(self) -> CoordPair:\n \"\"\"Read a move from keyboard and return as a CoordPair.\"\"\"\n while True:\n s = input(F'Player {self.next_player.name}, enter your move: ')\n coords = CoordPair.from_string(s)\n if coords is not None and self.is_valid_coord(coords.src) and self.is_valid_coord(coords.dst):\n return coords\n else:\n print('Invalid coordinates! Try again.')\n \n def human_turn(self):\n \"\"\"Human player plays a move (or get via broker).\"\"\"\n if self.options.broker is not None:\n print(\"Getting next move with auto-retry from game broker...\")\n while True:\n mv = self.get_move_from_broker()\n if mv is not None:\n (success,result) = self.perform_move(mv)\n print(f\"Broker {self.next_player.name}: \",end='')\n print(result)\n if success:\n self.next_turn()\n break\n sleep(0.1)\n else:\n while True:\n mv = self.read_move()\n (success,result) = self.perform_move(mv)\n if success:\n logfile.write(\"\\n\")\n logfile.write(f\"Turn #{self.turns_played}: \\n\")\n logfile.write(f\"Player {self.next_player.name}: \")\n print(f\"Player {self.next_player.name}: \",end='')\n logfile.write(result + \"\\n\")\n print(result + \"\\n\")\n\n logfile.write(self.get_board() + \"\\n\\n\")\n self.next_turn()\n break\n else:\n print(\"The move is not valid! Try again.\")\n\n def computer_turn(self) -> CoordPair | None:\n \"\"\"Computer plays a move.\"\"\"\n self.h_player = self.next_player\n mv = self.suggest_move()\n if mv is not None:\n (success,result) = self.perform_move(mv)\n if success:\n logfile.write(\"\\n\")\n logfile.write(f\"Turn #{self.turns_played}: \\n\")\n logfile.write(f\"Computer {self.next_player.name}: \")\n print(f\"Computer {self.next_player.name}: \",end='')\n logfile.write(result + \"\\n\")\n print(result + \"\\n\")\n\n logfile.write(self.get_board() + \"\\n\\n\")\n self.next_turn()\n return mv\n\n def player_units(self, player: Player) -> Iterable[Tuple[Coord,Unit]]:\n \"\"\"Iterates over all units belonging to a player.\"\"\"\n for coord in CoordPair.from_dim(self.options.dim).iter_rectangle():\n unit = self.get(coord)\n if unit is not None and unit.player == player:\n yield (coord,unit)\n\n def is_finished(self) -> bool:\n \"\"\"Check if the game is over.\"\"\"\n return self.has_winner() is not None\n\n def has_winner(self) -> Player | None:\n \"\"\"Check if the game is over and returns winner\"\"\"\n if self.options.max_moves is not None and self.turns_played >= self.options.max_moves:\n return Player.Defender\n elif self._attacker_has_ai:\n if self._defender_has_ai:\n return None\n else:\n return Player.Attacker \n elif self._defender_has_ai:\n return Player.Defender\n\n def move_candidates(self) -> Iterable[CoordPair]:\n \"\"\"Generate valid move candidates for the next player.\"\"\"\n move = CoordPair()\n for (src,_) in self.player_units(self.next_player):\n move.src = src\n for dst in src.iter_adjacent():\n move.dst = dst\n if self.is_valid_move(move):\n yield move.clone()\n move.dst = src\n yield move.clone()\n\n def random_move(self) -> Tuple[int, CoordPair | None, float]:\n \"\"\"Returns a random move.\"\"\"\n move_candidates = list(self.move_candidates())\n random.shuffle(move_candidates)\n if len(move_candidates) > 0:\n return (0, move_candidates[0], 1)\n else:\n return (0, None, 0)\n \n\n def minimax_alpha_beta(self, game, depth, alpha, beta, maximizing_player):\n is_root_node = depth == self.options.max_depth # Check if this is the root node\n is_leaf_node = depth == 0 or game.is_finished() # Check if this is a leaf node\n\n # Count non-root nodes\n if not is_root_node:\n self.stats.non_root_nodes += 1\n\n # Count non-leaf nodes\n if not is_leaf_node:\n self.stats.non_leaf_nodes += 1\n\n # Existing code for leaf node evaluation\n if is_leaf_node:\n if self.options.heuristic_type == \"e0\":\n self.stats.evaluations_per_depth[self.options.max_depth - depth] = self.stats.evaluations_per_depth.get(self.options.max_depth - depth, 0) + 1\n return game.heuristic_e0()\n if self.options.heuristic_type == \"e1\":\n self.stats.evaluations_per_depth[self.options.max_depth - depth] = self.stats.evaluations_per_depth.get(self.options.max_depth - depth, 0) + 1\n return game.heuristic_e1()\n if self.options.heuristic_type == \"e2\":\n self.stats.evaluations_per_depth[self.options.max_depth - depth] = self.stats.evaluations_per_depth.get(self.options.max_depth - depth, 0) + 1\n return game.heuristic_e2()\n\n if maximizing_player:\n v = float('-inf')\n \n for move in game.move_candidates():\n child_game = game.clone()\n (success, result) = child_game.perform_move(move)\n if not success:\n continue\n child_game.next_turn()\n eval_value = self.minimax_alpha_beta(child_game, depth - 1, alpha, beta, False) \n v = max(v, eval_value)\n if self.options.alpha_beta:\n alpha = max(alpha, v)\n if beta <= alpha:\n break # Beta cut-off\n return v\n else:\n v = float('inf')\n \n for move in game.move_candidates():\n child_game = game.clone()\n (success, result) = child_game.perform_move(move)\n if not success:\n continue\n child_game.next_turn()\n eval_value = self.minimax_alpha_beta(child_game, depth - 1, alpha, beta, True)\n v = min(v, eval_value)\n if self.options.alpha_beta:\n beta = min(beta, v)\n if beta <= alpha:\n break # Alpha cut-off\n return v\n\n def get_best_move(self, depth):\n best_move = None\n max_eval = float('-inf')\n\n stop_search = threading.Event() # Event to signal the thread to stop\n\n def worker():\n nonlocal best_move, max_eval\n \n for move in self.move_candidates():\n # Check if we should stop searching due to time limit\n if stop_search.is_set():\n # Can remove this output in the future, but just to show it does respect time limit\n print(f\"Time limit of {self.options.max_time} seconds reached! Returning current values.\")\n break\n\n child_game = self.clone()\n (success, result) = child_game.perform_move(move)\n if not success:\n continue\n child_game.next_turn()\n v = self.minimax_alpha_beta(child_game, depth - 1, float('-inf'), float('inf'), False)\n \n if v > max_eval:\n max_eval = v\n best_move = move\n \n thread = threading.Thread(target=worker)\n thread.start()\n thread.join(timeout=(self.options.max_time - 1)) # - 1 second for time for the rest of the turn logic to be performed\n\n if thread.is_alive():\n stop_search.set() # Signal the thread to stop searching\n thread.join() # Wait for the thread to actually finish\n\n return max_eval, best_move \n\n def suggest_move(self) -> CoordPair | None:\n \"\"\"Suggest the next move using minimax alpha beta. TODO: REPLACE RANDOM_MOVE WITH PROPER GAME LOGIC!!!\"\"\"\n start_time = datetime.now()\n \n try:\n (score, move) = self.get_best_move(self.options.max_depth)\n except TimeLimitExceededException:\n pass\n elapsed_seconds = (datetime.now() - start_time).total_seconds()\n self.stats.total_seconds += elapsed_seconds\n print(f\"Heuristic score: {score}\")\n logfile.write(f\"Heuristic score: {score}\\n\")\n total_evals = sum(self.stats.evaluations_per_depth.values())\n print(f\"Cumulative evaluations: {total_evals}\")\n logfile.write(f\"Cumulative evaluations: {total_evals}\\n\")\n #Average branching factor \n total_depths = len(self.stats.evaluations_per_depth)\n if self.stats.non_leaf_nodes > 0:\n average_branching_factor = self.stats.non_root_nodes/self.stats.non_leaf_nodes if total_depths else 0\n else: \n average_branching_factor = 0\n print(f\"Average branching factor: {average_branching_factor:0.1f}\")\n logfile.write(f\"Average branching factor: {average_branching_factor:0.1f}\\n\")\n self.stats.non_leaf_nodes = 0\n self.stats.non_root_nodes = 0\n print(f\"Evals per depth: \",end='')\n logfile.write(f\"Evals per depth: \")\n for k in sorted(self.stats.evaluations_per_depth.keys()):\n print(f\"{k}:{self.stats.evaluations_per_depth[k]} \",end='')\n logfile.write(f\"{k}:{self.stats.evaluations_per_depth[k]} \")\n print()\n logfile.write(\"\\n\")\n print(f\"Cumulative evals per depth: \",end='')\n logfile.write(f\"Cumulative evals per depth: \")\n for k in sorted(self.stats.evaluations_per_depth.keys()):\n print(f\"{k}:{(self.stats.evaluations_per_depth[k]/total_evals)*100:0.1f}% \",end='')\n logfile.write(f\"{k}:{(self.stats.evaluations_per_depth[k]/total_evals)*100:0.1f}% \")\n print()\n logfile.write(\"\\n\")\n if self.stats.total_seconds > 0:\n print(f\"Eval perf.: {total_evals/self.stats.total_seconds/1000:0.1f}k/s\")\n logfile.write(f\"Eval perf.: {total_evals/self.stats.total_seconds/1000:0.1f}k/s\")\n logfile.write(\"\\n\")\n print(f\"Elapsed time: {elapsed_seconds:0.1f}s\")\n logfile.write(f\"Elapsed time: {elapsed_seconds:0.1f}s\\n\")\n return move\n\n def post_move_to_broker(self, move: CoordPair):\n \"\"\"Send a move to the game broker.\"\"\"\n if self.options.broker is None:\n return\n data = {\n \"from\": {\"row\": move.src.row, \"col\": move.src.col},\n \"to\": {\"row\": move.dst.row, \"col\": move.dst.col},\n \"turn\": self.turns_played\n }\n try:\n r = requests.post(self.options.broker, json=data)\n if r.status_code == 200 and r.json()['success'] and r.json()['data'] == data:\n # print(f\"Sent move to broker: {move}\")\n pass\n else:\n print(f\"Broker error: status code: {r.status_code}, response: {r.json()}\")\n except Exception as error:\n print(f\"Broker error: {error}\")\n\n def get_move_from_broker(self) -> CoordPair | None:\n \"\"\"Get a move from the game broker.\"\"\"\n if self.options.broker is None:\n return None\n headers = {'Accept': 'application/json'}\n try:\n r = requests.get(self.options.broker, headers=headers)\n if r.status_code == 200 and r.json()['success']:\n data = r.json()['data']\n if data is not None:\n if data['turn'] == self.turns_played+1:\n move = CoordPair(\n Coord(data['from']['row'],data['from']['col']),\n Coord(data['to']['row'],data['to']['col'])\n )\n print(f\"Got move from broker: {move}\")\n return move\n else:\n # print(\"Got broker data for wrong turn.\")\n # print(f\"Wanted {self.turns_played+1}, got {data['turn']}\")\n pass\n else:\n # print(\"Got no data from broker\")\n pass\n else:\n print(f\"Broker error: status code: {r.status_code}, response: {r.json()}\")\n except Exception as error:\n print(f\"Broker error: {error}\")\n return None\n \n def heuristic_e2(self) -> int:\n player1_score = 0\n player2_score = 0\n \n WIN_SCORE = 999999 # A very high score for winning\n LOSS_SCORE = -999999 # A very high negative score for losing\n MOVE_TOWARDS_AI_WEIGHT = 6000\n HEALTH_FACTOR = 100 # Adjust as needed to increase/decrease the influence of health\n AI_HEALTH_WEIGHT = 9999 # High factor for AI health so that units are motivated to go for AI.\n \n ai_location_opponent = None\n ai_location_self = None\n \n # Check opponent's AI\n for (coord, unit) in self.player_units(Player.Defender if self.h_player == Player.Attacker else Player.Attacker):\n if unit.type.name == \"AI\":\n ai_location_opponent = coord\n player2_score += unit.health * AI_HEALTH_WEIGHT \n break\n \n # Check own AI\n for (coord, unit) in self.player_units(self.h_player):\n if unit.type.name == \"AI\":\n ai_location_self = coord\n player1_score += unit.health * AI_HEALTH_WEIGHT\n break\n\n # If the opponent's AI is not found, it means the current player (AI) has won\n if not ai_location_opponent:\n return WIN_SCORE\n \n # If the AI's own unit is not on the board, it means the AI has lost\n if not ai_location_self:\n return LOSS_SCORE\n\n # For the current player\n for (coord, unit) in self.player_units(self.h_player):\n # Incentive to move closer to the opponent's AI\n distance_to_ai = abs(coord.row - ai_location_opponent.row) + abs(coord.col - ai_location_opponent.col)\n if distance_to_ai == 0:\n player1_score += MOVE_TOWARDS_AI_WEIGHT\n else:\n player1_score += MOVE_TOWARDS_AI_WEIGHT / distance_to_ai\n\n # Health consideration\n if unit.type.name != \"AI\":\n player1_score += unit.health * HEALTH_FACTOR\n\n # For the opposing player (similar considerations but for the opponent)\n for (coord, unit) in self.player_units(Player.Defender if self.h_player == Player.Attacker else Player.Attacker):\n distance_to_ai = abs(coord.row - ai_location_self.row) + abs(coord.col - ai_location_self.col)\n if distance_to_ai == 0:\n player2_score += MOVE_TOWARDS_AI_WEIGHT # Give maximum score if the unit is already on the AI's position\n else:\n player2_score += MOVE_TOWARDS_AI_WEIGHT / distance_to_ai\n\n if unit.type.name != \"AI\":\n player2_score += unit.health * HEALTH_FACTOR\n\n return int(player1_score - player2_score)\n\n\n def heuristic_e1(self) -> int:\n # Initialize the scores for both players\n player1_score = player2_score = 0\n\n # Define weights for each unit type\n weights = {\n 'Virus': 3,\n 'Tech': 3,\n 'Firewall': 3,\n 'Program': 3,\n 'AI': 9999\n }\n\n for (coord, unit) in self.player_units(self.h_player):\n if unit is not None:\n unit_type = unit.type.name\n # Add the unit's health to its corresponding unit type score in HealthScore\n # Add the unit's health to its player's score\n player1_score += weights[unit_type] * unit.health\n\n for (coord, unit) in self.player_units(Player.Defender if self.h_player == Player.Attacker else Player.Attacker):\n if unit is not None:\n unit_type = unit.type.name\n # Add the unit's health to its corresponding unit type score in HealthScore\n # Add the unit's health to its player's score\n player2_score += weights[unit_type] * unit.health\n\n # Calculate the final score\n return player1_score - player2_score\n\n \n def heuristic_e0(self) -> int:\n # Initialize the scores for both players\n player1_score = player2_score = 0\n\n # Determine who \n\n unitCount = {\n 'Virus': 0,\n 'Tech': 0,\n 'Firewall': 0,\n 'Program': 0,\n 'AI': 0\n }\n\n # Get the number of each unit type for Player 1 (performing move)\n for (coord, unit) in self.player_units(self.h_player):\n # Add the unit's count to its corresponding unit type score in unitCount\n unitCount[unit.type.name] += 1\n \n for unitType in unitCount:\n player1_score += weights[unitType] * unitCount[unitType]\n \n # Reset unitCount\n unitCount = {\n 'Virus': 0,\n 'Tech': 0,\n 'Firewall': 0,\n 'Program': 0,\n 'AI': 0\n }\n\n # Get the number of each unit type for Player 2 (not performing move)\n for (coord, unit) in self.player_units(Player.Defender if self.h_player == Player.Attacker else Player.Attacker):\n # Add the unit's count to its corresponding unit type score in unitCount\n unitCount[unit.type.name] += 1\n \n for unitType in unitCount:\n player2_score += weights[unitType] * unitCount[unitType]\n\n return player1_score - player2_score\n\n##############################################################################################################\n\ndef main():\n\n def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n # parse command line arguments\n parser = argparse.ArgumentParser(\n prog='ai_wargame',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--max_depth', type=int, help='maximum search depth')\n parser.add_argument('--max_time', type=float, help='maximum search time')\n parser.add_argument('--max_moves', type=float, help='maximum moves per game')\n parser.add_argument('--game_type', type=str, default=\"attacker\", help='game type: auto|attacker|defender|manual')\n parser.add_argument('--alpha_beta', type=str2bool, help='if a player is an AI, whether alpha-beta is on or off')\n parser.add_argument('--heuristic_type', type=str, help='heuristic type: e0|e1|e2')\n parser.add_argument('--broker', type=str, help='play via a game broker')\n args = parser.parse_args()\n\n # parse the game type\n if args.game_type == \"attacker\":\n game_type = GameType.AttackerVsComp\n elif args.game_type == \"defender\":\n game_type = GameType.CompVsDefender\n elif args.game_type == \"manual\":\n game_type = GameType.AttackerVsDefender\n else:\n game_type = GameType.CompVsComp\n\n # set up game options\n options = Options(game_type=game_type)\n\n # override class defaults via command line options\n if args.max_depth is not None:\n options.max_depth = args.max_depth\n if args.max_time is not None:\n options.max_time = args.max_time\n if args.broker is not None:\n options.broker = args.broker\n if args.heuristic_type is not None:\n options.heuristic_type = args.heuristic_type\n if args.max_moves is not None:\n options.max_moves = int(args.max_moves)\n if args.alpha_beta is not None:\n options.alpha_beta = args.alpha_beta\n \n logfileName = f\"gameTrace-{options.alpha_beta}-{options.max_time}-{int(options.max_moves)}.txt\"\n counter = 0\n while os.path.exists(logfileName):\n logfileName = f\"gameTrace-{options.alpha_beta}-{options.max_time}-{int(options.max_moves)}-{counter}.txt\"\n counter += 1\n\n logfile.write(f\"1. The game parameters:\\n\\ta. Timeout (in s): {options.max_time}\\n\\tb. Max number of turns: {args.max_moves}\\n\",)\n\n # parse the game type\n if args.game_type == \"attacker\":\n logfile.write(f\"\\tc. Play mode: Attacker (Human) vs Defender (AI)\\n\")\n elif args.game_type == \"defender\":\n logfile.write(f\"\\tc. Play mode: Attacker (AI) vs Defender (Human)\\n\")\n elif args.game_type == \"manual\":\n logfile.write(f\"\\tc. Play mode: Attacker (Human) vs Defender (Human)\\n\")\n else:\n logfile.write(f\"\\tc. Play mode: Attacker (AI) vs Defender (AI)\\n\")\n\n if not args.game_type == \"manual\":\n logfile.write(f\"\\td. Alpha-Beta is:\")\n logfile.write(f\" {'ON' if options.alpha_beta else 'OFF'}\\n\")\n logfile.write(f\"\\te. Heuristic: {options.heuristic_type}\\n\",)\n\n # create a new game\n game = Game(options=options)\n\n logfile.write(f\"\\n2. Initialtial game board:\\n{game.get_board()}\\n3. Gameplay trace:\\n\\n\")\n\n # the main game loop\n try:\n while True:\n print()\n print(game)\n winner = game.has_winner()\n if game.stats.total_seconds >= options.max_time:\n print(f\"Player {game.next_player.next().name} took too much time to perform turn with {game.stats.total_seconds:0.1f}s, immediate forfeit.\")\n logfile.write(f\"Player {game.next_player.next().name} took too much time to perform turn with {game.stats.total_seconds:0.1f}s, immediate forfeit.\\n\")\n winner = game.next_player\n if game.turns_played == game.options.max_moves:\n print(f\"Tie, max number of turns played! {game.turns_played}/{game.options.max_moves}\")\n logfile.write(f\"Tie, max number of turns played! {game.turns_played}/{game.options.max_moves}\")\n logfile.close()\n os.rename('templog.txt', logfileName)\n break\n if winner is not None:\n print(f\"{winner.name} wins in {game.turns_played} turns!\")\n logfile.write(f\"{winner.name} wins in {game.turns_played} turns!\\n\")\n logfile.close()\n os.rename('templog.txt', logfileName)\n break\n game.stats.total_seconds = 0\n if game.options.game_type == GameType.AttackerVsDefender:\n game.human_turn()\n elif game.options.game_type == GameType.AttackerVsComp and game.next_player == Player.Attacker:\n game.human_turn()\n elif game.options.game_type == GameType.CompVsDefender and game.next_player == Player.Defender:\n game.human_turn()\n else:\n player = game.next_player\n move = game.computer_turn()\n if move is not None:\n game.post_move_to_broker(move)\n else:\n print(\"Computer doesn't know what to do!!!\")\n logfile.close()\n os.rename('templog.txt', logfileName)\n exit(1)\n except KeyboardInterrupt:\n print(\"Game interrupted by user.\")\n logfile.write(\"Game interrupted by user.\\n\")\n logfile.close()\n os.rename('templog.txt', logfileName)\n##############################################################################################################\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"LegendArtur/10x-ai-game","sub_path":"ai_wargame_skeleton.py","file_name":"ai_wargame_skeleton.py","file_ext":"py","file_size_in_byte":41427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"37410031502","text":"import os\nimport numpy as np\nimport pickle\nimport copy\nimport matplotlib.pyplot as plt\n\nfrom medis.plot_tools import quick2D, view_spectra, grid\nfrom medis.utils import dprint\n\ndef get_ideal_photons(fields, cam, comps=True, plot=False):\n ntime, nwave = fields.shape[0], fields.shape[1]\n\n cam.rebinned_cube = np.zeros((ntime, nwave, cam.array_size[1], cam.array_size[0]))\n for step in range(len(fields)):\n print(step)\n if comps:\n spectralcube = np.sum(fields[step], axis=1)\n else:\n spectralcube = fields[step, :, 0]\n\n cube = cam.get_ideal_cube(spectralcube)\n cam.rebinned_cube[step] = cube\n\n cam.rebinned_cube /= np.sum(cam.rebinned_cube) # /sp.numframes\n cam.rebinned_cube = np.transpose(cam.rebinned_cube, (1, 0, 2, 3))\n\n if plot:\n # plt.figure()\n # plt.hist(cam.rebinned_cube[cam.rebinned_cube != 0].flatten(), bins=np.linspace(0, 1e4, 50))\n # plt.yscale('log')\n grid(cam.rebinned_cube, show=True, title='get ideal photons', nstd=6)\n\n if cam.usesave:\n cam.save()\n\n return cam\n\ndef get_form_photons(fields, cam, comps=True, plot=False, collapse_time_first=False, norm=False):\n \"\"\"\n Alternative to cam.__call__ that allows the user to specify whether the spectracube contains the planets\n\n :param fields: ndarray\n :param cam: mkids.Camera()\n :param comps: bool\n\n :return:\n mkids.Camera()\n \"\"\"\n dprint(cam.name)\n if os.path.exists(cam.name):\n print(f'loading cam rebined_cube save at {cam.name}')\n with open(cam.name, 'rb') as handle:\n cam = pickle.load(handle)\n else:\n if comps:\n fourcube = np.sum(fields, axis=2)\n else:\n fourcube = fields[:, :, 0]\n\n fourcube = np.abs(fourcube) ** 2\n dprint(np.sum(fourcube))\n fourcube = cam.rescale_cube(fourcube)\n\n max_steps = cam.max_chunk(fourcube)\n num_chunks = int(np.ceil(len(fourcube) / max_steps))\n dprint(fourcube.shape, max_steps, len(fourcube) / max_steps, num_chunks)\n # cam.photons = np.empty((4,0))\n cam.rebinned_cube = np.zeros_like(fourcube)\n for chunk in range(num_chunks):\n photons = cam.get_photons(fourcube[chunk*max_steps:(chunk+1)*max_steps], chunk_step=chunk*max_steps)\n photons = cam.degrade_photons(photons)\n # cam.photons = np.hstack((cam.photons, photons))\n # dprint(photons.shape, cam.photons.shape)\n cam.rebinned_cube[chunk*max_steps:(chunk+1)*max_steps] = cam.rebin_list(photons, time_inds=[chunk*max_steps,(chunk+1)*max_steps])\n # cam.rebinned_cube = cam.rebin_list(cam.photons)\n\n cam.photons = None\n\n for step in range(len(fields)):\n print(step, cam.max_count)\n if cam.max_count:\n cam.rebinned_cube[step] = cam.cut_max_count(cam.rebinned_cube[step])\n\n if norm:\n cam.rebinned_cube /= np.sum(cam.rebinned_cube) # /sp.numframes\n\n if collapse_time_first:\n grid(cam.rebinned_cube, title='comp sum', show=False, logZ=True)\n cam.rebinned_cube = np.median(cam.rebinned_cube, axis=0)[np.newaxis]\n grid(cam.rebinned_cube, title='comp sum', show=False, logZ=True)\n\n cam.rebinned_cube = np.transpose(cam.rebinned_cube, (1, 0, 2, 3))\n\n if plot:\n grid(cam.rebinned_cube, show=True, title='get form photons')\n\n if cam.usesave:\n cam.save_instance()\n\n return cam\n\ndef save_params(param_list):\n \"\"\"\n\n :param param_list: [ap, sp, tp, iop, atmp, cdip, mp] or less\n :return:\n \"\"\"\n print(f'storing params for {[param.__name__() for param in param_list]}')\n save_state = [copy.deepcopy(param) for param in param_list]\n return save_state\n\ndef restore_params(save_state, params):\n \"\"\"\"\"\"\n for i, param in enumerate(params):\n param.__dict__ = save_state[i].__dict__\n","repo_name":"RupertDodkins/mkid_param_investigation","sub_path":"substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16254314078","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom agent_world import Agent, Behavior, BaseGame, Sensor, Base2DWorld, Drawable2D\nimport util\nfrom constants import *\n\nclass SimpleHeatGame(BaseGame):\n\n def __init__(self, animate=False, timeout=50):\n super(SimpleHeatGame, self).__init__(animate=animate, timeout=timeout)\n\nclass SimpleHeatSource(Drawable2D):\n '''\n Position must be same length as world dimensionality\n Heat is in range [0,1] and is experienced fully within radius\n Outside of radius is a heat gradient that reduces with distance from center\n\n Heat currently remains indefinitely when agent is in region\n '''\n\n def __init__(self, radius, position, heat=1):\n self.radius = radius\n self.position = position\n self.heat = heat\n\n def setupGraphics(self, canvas):\n core_corners = util.toScreen(util.circleCorners(self.position, self.radius), CELL_RESOLUTION)\n outer_corners = util.toScreen(util.circleCorners(self.position, self.maxDistance()), CELL_RESOLUTION)\n canvas.create_oval(*outer_corners, fill=\"red\")\n canvas.create_oval(*core_corners, fill=\"black\")\n\n def maxDistance(self):\n # Outside of which heat not felt\n return self.radius * 3\n\n def getHeat(self, distance=0):\n within = distance < self.radius\n max_distance = self.maxDistance()\n if within:\n return self.heat\n elif distance > max_distance:\n return 0\n else:\n # Within linear gradient\n return self.heat * ((max_distance - distance)/max_distance)\n\nclass SimpleHeatWorld(Base2DWorld):\n '''\n Basic world in 2 or 3 dimensions\n Heat sources can be added with a single position, and pos or neg temperature\n The heat at any location in the world is defined by distance from each heat source\n '''\n\n MAX_VELOCITY = 2\n MAX_ROT_VELOCITY = np.pi/6.\n\n def __init__(self, shape):\n super(SimpleHeatWorld, self).__init__(shape)\n self.hsources = []\n\n def addSource(self, source):\n self.hsources.append(source)\n\n def heatAt(self, position):\n heat_from_sources = []\n for hs in self.hsources:\n dist = util.distance(position, hs.position)\n heat = hs.getHeat(dist)\n heat_from_sources.append(heat)\n return sum(heat_from_sources)\n\n def setupGraphics(self, canvas):\n for hs in self.hsources:\n hs.setupGraphics(canvas)\n\n\nclass HeatAntennaSensor(Sensor):\n '''\n Gets reward at defined offset from agent\n '''\n\n def __init__(self, nickname, world, offset, utility=0):\n super(HeatAntennaSensor, self).__init__(nickname, world, utility=utility)\n self.offset = offset\n\n def observe(self, state):\n observe_loc = state.position + self.offset\n return self.world.heatAt(observe_loc)\n\nclass AgentState():\n\n def __init__(self, world):\n self.position = world.center()\n self.direction = np.array([0, 1]) # Vector of length 1.\n\n def __str__(self):\n dx, dy = self.direction\n bearing_deg = np.degrees(np.arctan(-1*dx/dy))\n return \"At %s, Heading: %s\" % (self.position, bearing_deg)\n\nclass RotateBehavior(Behavior):\n\n def __init__(self, nickname, world, cw=True):\n self.cw = cw\n super(RotateBehavior, self).__init__(nickname, world)\n\n def actuate(self, magnitude, state):\n super(RotateBehavior, self).actuate(magnitude, state)\n direction = 1 if self.cw else -1\n theta = magnitude * direction * (self.world.MAX_ROT_VELOCITY) # Rad, CCW\n z_axis = [0, 0, 1]\n rm = util.rotation_matrix(z_axis, theta)\n dir_3d = np.append(state.direction, [0])\n state.direction = np.dot(rm, dir_3d)[:2] # Back to 2D\n\nclass DriveBehavior(Behavior):\n\n def __init__(self, nickname, world):\n super(DriveBehavior, self).__init__(nickname, world)\n\n def actuate(self, magnitude, state):\n super(DriveBehavior, self).actuate(magnitude, state)\n move = magnitude * state.direction * self.world.MAX_VELOCITY # Does this update the state object?\n new_position = state.position + move\n world_ok = util.inside(new_position, [0,0], self.world.shape)\n if world_ok:\n # Ok to move\n state.position = state.position + move\n\n\n\nclass SimpleHeatAgent(Agent):\n '''\n Agent with 6 directional heat antennas and 3 behaviors\n\n '''\n DRAW_SIZE = 1\n DRAW_ARC = 80\n\n def __init__(self, world, brain):\n sensors = [\n HeatAntennaSensor('nw', world, [-1,1]),\n HeatAntennaSensor('nnw', world, [-0.5,1]),\n HeatAntennaSensor('n', world, [0,1]),\n HeatAntennaSensor('nne', world, [0.5,1]),\n HeatAntennaSensor('ne', world, [1,1]),\n HeatAntennaSensor('here', world, [0,0], utility=1) # Redeemed reward\n ]\n behaviors = [\n RotateBehavior('turn right', world, cw=True),\n RotateBehavior('turn left', world, cw=False),\n DriveBehavior('drive', world)\n ]\n state = AgentState(world)\n super(SimpleHeatAgent, self).__init__(state, brain, sensors, behaviors)\n\n # Graphics\n\n def getCoords(self):\n x, y = self.state.position\n x0 = x - self.DRAW_SIZE\n y0 = y - self.DRAW_SIZE\n x1 = x + self.DRAW_SIZE\n y1 = y + self.DRAW_SIZE\n return util.toScreen([x0, y0, x1, y1], CELL_RESOLUTION)\n\n def setupGraphics(self, canvas):\n x0, y0, x1, y1 = self.getCoords()\n self.drawable = canvas.create_arc(x0, y0, x1, y1, fill=\"red\", start=320, extent=self.DRAW_ARC)\n\n def draw(self, canvas):\n dx, dy = self.state.direction\n bearing_deg = np.degrees(np.arctan(-1*dx/dy))\n coords = self.getCoords()\n canvas.coords(self.drawable, *coords)\n canvas.itemconfig(self.drawable, start=bearing_deg - (self.DRAW_ARC / 2))\n","repo_name":"onejgordon/pphtm","sub_path":"games/SimpleHeatGame.py","file_name":"SimpleHeatGame.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"23025560480","text":"import cv2\r\nimport numpy as np\r\n# read image\r\nimg = cv2.imread(\"Resources/lena.png\")\r\n# np.ones= all values are 1. size of matrix is 5x5. then type\r\n# of object uint8 = unsigned integer with 8 bit i.e from 0-255\r\nkernel = np.ones((5,5),np.uint8)\r\n\r\nimgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n# blur image\r\nimgBlur = cv2.GaussianBlur(imgGray,(7,7),0)\r\n# canny edge detector\r\nimgCanny = cv2.Canny(img,150,200)\r\n# increase the thickness of edges\r\nimgDialation = cv2.dilate(imgCanny,kernel,iterations=1)\r\n# opposite of dialation is erosion\r\nimgEroded = cv2.erode(imgDialation,kernel,iterations=1)\r\n\r\ncv2.imshow(\"Gray Image\",imgGray)\r\ncv2.imshow(\"Blur Image\",imgBlur)\r\ncv2.imshow(\"Canny Image\",imgCanny)\r\ncv2.imshow(\"Dialation Image\",imgDialation)\r\ncv2.imshow(\"Eroded Image\",imgEroded)\r\ncv2.waitKey(0)","repo_name":"RIT-MESH/Deep-learning-and-Computer-Vision-projects","sub_path":"1Learn-OpenCV-in-3-hours/chapter2.py","file_name":"chapter2.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"32930412245","text":"import logging\nimport functools\nimport torch\nimport os\n\nfrom ptsemseg.loss.loss import (\n cross_entropy2d,\n bootstrapped_cross_entropy2d,\n multi_scale_cross_entropy2d,\n multi_scale_patch_composition,\n multi_scale_patch_composition_targeted,\n smoothness_loss,\n NPS,\n)\n\n\nlogger = logging.getLogger(\"ptsemseg\")\n\nkey2loss = {\n \"cross_entropy\": cross_entropy2d,\n \"bootstrapped_cross_entropy\": bootstrapped_cross_entropy2d,\n \"multi_scale_cross_entropy\": multi_scale_cross_entropy2d,\n \"multi_scale_patch_composition\": multi_scale_patch_composition,\n \"multi_scale_patch_composition_targeted\": multi_scale_patch_composition_targeted,\n \"smoothness_loss\": smoothness_loss,\n \"NPS\": NPS,\n}\n\n\ndef get_loss_function(cfg_training):\n if cfg_training[\"loss\"] is None:\n logger.info(\"Using default cross entropy loss\")\n return cross_entropy2d\n\n else:\n loss_dict = cfg_training[\"loss\"]\n loss_name = loss_dict[\"name\"]\n loss_params = {k: v for k, v in loss_dict.items() if k != \"name\"}\n loss_params['weight'] = torch.Tensor([1, 0.01, 0.01, 1, 1, 1, 1, 1, 1, 0.1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).to('cuda')\n\n if loss_name not in key2loss:\n raise NotImplementedError(\"Loss {} not implemented\".format(loss_name))\n\n logger.info(\"Using {} with {} params\".format(loss_name, loss_params))\n return functools.partial(key2loss[loss_name], **loss_params)\n \ndef get_patch_loss_function(cfg_training):\n adv_loss = cfg_training[\"loss\"][\"adv_loss\"]\n smooth_loss = cfg_training[\"loss\"][\"smoothness\"]\n NPS = cfg_training[\"loss\"][\"NPS\"]\n \n adv_loss_name = adv_loss[\"name\"]\n adv_loss_arg = adv_loss[\"args\"]\n \n smooth_loss_name = smooth_loss[\"name\"]\n smooth_loss_args = smooth_loss[\"args\"]\n \n NPS_name = NPS[\"name\"]\n NPS_args = NPS[\"args\"]\n P = []\n assert os.path.isfile(NPS_args)\n with open(NPS_args, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n# print(line)\n split_str = line.split(',')\n val_r = split_str[0].strip()\n if '(' in val_r:\n val_r = val_r[-1]\n val_g = split_str[1].strip()\n val_b = split_str[2].strip()\n if ')' in val_b:\n val_b = val_b[0]\n P.append([float(val_r), float(val_g), float(val_b)])\n \n \n P = torch.Tensor(P).reshape((-1, 3, 1, 1))\n \n weights = (adv_loss[\"mult_factor\"], smooth_loss[\"mult_factor\"], NPS[\"mult_factor\"])\n \n if adv_loss_name not in key2loss:\n raise NotImplementedError(\"Loss {} not implemented\".format(adv_loss_name))\n if smooth_loss_name not in key2loss:\n raise NotImplementedError(\"Loss {} not implemented\".format(smooth_loss_name))\n if NPS_name not in key2loss:\n raise NotImplementedError(\"Loss {} not implemented\".format(NPS_name))\n \n losses_tuple = (\n _init_adv_loss(adv_loss_name, adv_loss_arg), \n functools.partial(key2loss[smooth_loss_name]), \n functools.partial(key2loss[NPS_name], color_list=P), \n )\n return losses_tuple, weights\n\ndef _init_adv_loss(loss_name, arg):\n if loss_name in (\"cross_entropy\", \"bootstrapped_cross_entropy\", \"multi_scale_cross_entropy\") or arg is None:\n return functools.partial(key2loss[loss_name])\n elif loss_name in (\"multi_scale_patch_composition\",\"multi_scale_patch_composition_targeted\",) and arg is not None:\n return functools.partial(key2loss[loss_name], gamma=arg)\n \n \n \n","repo_name":"retis-ai/SemSegAdvPatch","sub_path":"ptsemseg/loss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"34988862116","text":"#!/usr/bin/env python\n\n# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,\n# a² + b² = c²\n# For example, 3² + 4² = 9 + 16 = 25 = 5².\n# There exists exactly one Pythagorean triplet for which a + b + c = 1000.\n# Find the product abc.\n\n\n# shame, shame, shame\n\nfor a in range(1,1000):\n for b in range(a, 1000):\n c = 1000 - a -b\n if a**2 + b**2 == c**2:\n print(f\"a {a}, b {b}, c {c}\")\n print(a*b*c)\n \n","repo_name":"rotsix/algo","sub_path":"ProjectEuler/Problem009.py","file_name":"Problem009.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"25156070143","text":"from telegram_bot_pagination import InlineKeyboardPaginator\nfrom telegram import InlineKeyboardButton\n\nfrom math import ceil\n\nITEM_PER_PAGE = 10\n\n\ndef get_keyboard_start():\n start_keyboard = [\n [\n InlineKeyboardButton(\"⏸ Pause All Accounts ⏸\", callback_data=\"pause\"),\n ],\n [\n InlineKeyboardButton(\"▶ Resume All Accounts ▶\", callback_data=\"resume\")\n ],\n [\n InlineKeyboardButton(\"List All Accounts\", callback_data=\"accounts\")\n ],\n ]\n return start_keyboard\n\n\ndef get_account_keyboard(usernames, page):\n paginator = InlineKeyboardPaginator(\n ceil(len(usernames) / ITEM_PER_PAGE),\n data_pattern='user#{page}',\n current_page=page\n )\n\n start_index = (page - 1) * ITEM_PER_PAGE\n end_index = start_index + ITEM_PER_PAGE\n\n for username in usernames[start_index:end_index]:\n row = [\n InlineKeyboardButton(username, callback_data=\" \"),\n InlineKeyboardButton(\"Pause\", callback_data=f\"{username}!pause\"),\n InlineKeyboardButton(\"Resume\", callback_data=f\"{username}!resume\"),\n ]\n\n paginator.add_before(*row)\n\n paginator.add_after(InlineKeyboardButton(\"⬅️ Back\", callback_data=\"back\"))\n return paginator.markup\n","repo_name":"Trikos/f2pool_telegram_bot","sub_path":"keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6810958792","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef func(x,y):\n\t#Function with 2 varibel x,y\n\treturn np.sin(x+y) + (x-y)**2 -1.5*x + 2.5*y + 1\n\nx = np.linspace(-1.5,4,100)\ny = np.linspace(-3,4,100)\n\nX,Y = np.meshgrid(x,y)\n# Z = np.sin(X+Y) + np.power((X-Y),2) - 1.5*X + 2.5*Y + 1\nZ = func(X,Y)\n# levels = [-2, -1, 0, 1, 2, 3, 4]\n\nfig = plt.figure(figsize=(6,5))\nleft, bottom, width, height = 0.1, 0.1, 0.8, 0.8\nax = fig.add_axes([left, bottom, width, height]) \n\nmycmap = plt.get_cmap('gist_earth')\ncp = plt.contourf(X,Y,Z, cmap = 'gist_earth', extent = (-10,10,-20,20))\n#plt.clabel(cp, inline = True, fontsize = 10)\n#plt.clabel(cp, colors = 'k', fmt = '%2.1f', fontsize=12)\nplt.colorbar(cp)\nplt.scatter(0,2,color = 'k')\nax.set_title('Contour Plot')\nax.set_xlabel('x')\nax.set_ylabel('y')\nplt.show()\n","repo_name":"locluong09/Particle-Swarm-Optimization","sub_path":"plot_contour.py","file_name":"plot_contour.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"36039646111","text":"import os\nfrom tqdm import tqdm\nimport json\nimport sys\nimport numpy as np\n\nclass Preprocessor(object):\n\t\"\"\"\n\tThis is the class that will perform all of the necessary preprocessing on\n\tthe data.\n\n\tExample of usage:\n\n\t\tp = Preprocessor(\"leagueoflegends\", 1e7, 75)\n\t\tcomments = p.process()\n\t\tout = open('out.txt', 'w+', encoding='utf-8')\n\t\tfor c in comments:\n\t\t\tout.write(c[0])\n\t\tout.close()\n\t\tgood = p.statistics(comments)\n\n\tThe constructor has arguments:\n\t - target_subreddit: A string that represents the subreddit we wish to\n\t focus. If it is none, then we collect data on every subreddit, which\n\t we do not recommend at the moment.\n\t - break_limit: An int that represents the number of comments we wish to\n\t collect before we stop.\n\t - threshold: The percentile that we wish to extract comments from. For\n\t example, if this value is 90, then only extract comments in the upper\n\t 10 percent of rated comments.\n\t\"\"\"\n\tdef __init__(self, target_subreddit, break_limit, threshold, custom_file=\"\",\n\t\tcustom=False\n\t):\n\t\tself.target_subreddit = target_subreddit\n\t\tself.break_limit = break_limit\n\t\tself.threshold = threshold\n\n\t\"\"\"\n\tPerforms the file reading that transforms our intended dataset into the\n\tform: [(comment, score), ...]\n\tUses the tqdm library to create a progress bar for sanity purposes.\n\tInputs:\n\t - custom_file: A string denoting the custom file that we would like to use.\n\t Only considered if custom=True.\n\t - custom: A boolean denoting whether or not we would like to use a custom\n\t file.\n\t\"\"\"\n\tdef process(self, custom_file=\"\", custom=False):\n\t\tprint(\"Processing file...\")\n\n\t\t#Default input file\n\t\tf_name = '../RC_2015'\n\t\tif custom:\n\t\t\tf_name = custom_file\n\n\t\ttry:\n\t\t\tf = open(f_name, 'r')\n\t\texcept:\n\t\t\tprint(\"File not found!\")\n\t\t\tsys.exit()\n\n\t\tline_count = 0\n\t\tsizecounter = 0\n\t\tsizecounter += os.stat(f_name).st_size\n\t\toutput = []\n\t\t#Initialize progress bar\n\t\twith tqdm(total=sizecounter,\n\t\t\t\tunit='B', unit_scale=True, unit_divisor=1024) as pbar:\n\t\t\twith open(f_name, 'r', encoding=\"utf-8\") as fh:\n\t\t\t\t\"\"\"\n\t\t\t\tIf we wish to use a custom file, \n\t\t\t\t\"\"\"\n\t\t\t\tif custom:\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tWe simply add each line of the input file to a custom\n\t\t\t\t\tstructure.\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tfor line in fh:\n\t\t\t\t\t\toutput.append(line)\n\t\t\t\t\t\tif line:\n\t\t\t\t\t\t\tpbar.set_postfix(file=f_name[-10:], refresh=False)\n\t\t\t\t\t\t\tpbar.update(sys.getsizeof(line))\n\t\t\t\telse:\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tFor each file in the input file, we load each comment and \n\t\t\t\t\tsee if the comment that we observe is the same as the one\n\t\t\t\t\twe're looking for. If so, we record the body of the comment\n\t\t\t\t\tand its core.\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tfor line in fh:\n\t\t\t\t\t\tcomment = json.loads(line)\n\t\t\t\t\t\tcomment_as_dict = dict(comment)\n\t\t\t\t\t\tsubreddit = comment_as_dict['subreddit']\n\t\t\t\t\t\tif subreddit == self.target_subreddit:\n\t\t\t\t\t\t\tscore = int(comment_as_dict['ups'])\n\t\t\t\t\t\t\toutput.append((comment_as_dict['body'], score))\n\t\t\t\t\t\tline_count += 1\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tWe stop after some number of comments if we are not\n\t\t\t\t\t\tlooking at every single comment.\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tif self.break_limit != None:\n\t\t\t\t\t\t\tif line_count > self.break_limit:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif line:\n\t\t\t\t\t\t\tpbar.set_postfix(file=f_name[-10:], refresh=False)\n\t\t\t\t\t\t\tpbar.update(sys.getsizeof(line))\n\t\tf.close()\n\n\t\tprint(\"Finished processing\")\n\t\treturn output, line_count\n\n\t\"\"\"\n\tTakes in the original comment base and returns a subset of of comments that\n\tpertain to the percentile requirements that we've previously set forth.\n\t\"\"\"\n\tdef statistics(self, comments):\n\t\t\"\"\"\n\t\tWe first extract the scores and compute the lower bound of the\n\t\tpercentile.\n\t\t\"\"\"\n\t\tscores = []\n\t\tfor comment in comments:\n\t\t\tscores.append(comment[1])\n\t\tscores = np.array(scores)\n\t\tlower_bound = np.percentile(scores, self.threshold)\n\t\t\"\"\"\n\t\tWe then make another pass through the comments and only save the comments\n\t\tthat are above the bound.\n\t\t\"\"\"\n\t\tgood_comments = []\n\t\tfor comment in comments:\n\t\t\tif comment[1] > lower_bound:\n\t\t\t\tgood_comments.append(comment)\n\t\treturn good_comments\n\t\t\"\"\"\n\t\tThis is pretty inefficient but in practice, it runs pretty quickly.\n\t\t\"\"\"\n\n","repo_name":"shuyangw/cs585-final-project-submission","sub_path":"src/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8445397327","text":"\"\"\"Melhore o jogo do DESAFIO 28 onde o computador vai “pensar” em um número entre 0 e 10.\nSó que agora o jogador vai tentar adivinhar até acertar,\nmostrando no final quantos palpites foram necessários para vencer.\"\"\"\n\nfrom random import randint\nprint(f'{\"JOGO DA ADIVINHAÇÃO\":=^40}')\ncomputador = randint(0, 10)\nprint('Sou seu computador \\n'\n 'Acabei de pensar em um número entre 0 e 10\\n'\n 'Será que você consegue adivinhar qual foi?')\njogador = int(input('Qual é o seu palpite? '))\npalpite = 1\nwhile jogador != computador:\n if jogador > computador:\n jogador = int(input('Menos...tente outra vez'))\n if jogador < computador:\n jogador = int(input('Mais...tente outra vez'))\n if jogador == computador:\n print('Você acertou!')\n palpite += 1\nprint(f'Você acertou! Você levou {palpite} tentativas para acertar')\n","repo_name":"garciashirley38/jogo-da-adivinha-o","sub_path":"jogodaadivinhacao.py","file_name":"jogodaadivinhacao.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"72653845429","text":"import os\nimport datetime\nimport csv\n\nimport requests\n\ncredentials = 'your_zendesk_email', 'your_zendesk_password'\nzendesk = 'https://your_subdomain.zendesk.com'\nlanguage = 'some_locale'\n\ndate = datetime.date.today()\nbackup_path = os.path.join(str(date), language)\nif not os.path.exists(backup_path):\n os.makedirs(backup_path)\n\nlog = []\n\nendpoint = zendesk + '/api/v2/help_center/{locale}/articles.json'.format(locale=language.lower())\nwhile endpoint:\n response = requests.get(endpoint, auth=credentials)\n if response.status_code != 200:\n print('Failed to retrieve articles with error {}'.format(response.status_code))\n exit()\n data = response.json()\n\n for article in data['articles']:\n if article['body'] is None:\n continue\n title = '

' + article['title'] + '

'\n filename = '{id}.html'.format(id=article['id'])\n with open(os.path.join(backup_path, filename), mode='w', encoding='utf-8') as f:\n f.write(title + '\\n' + article['body'])\n print('{id} copied!'.format(id=article['id']))\n\n log.append((filename, article['title'], article['author_id']))\n\n endpoint = data['next_page']\n\nwith open(os.path.join(backup_path, '_log.csv'), mode='wt', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow( ('File', 'Title', 'Author ID') )\n for article in log:\n writer.writerow(article)\n","repo_name":"gistable/gistable","sub_path":"dockerized-gists/95a1e9c771e46bb94e9b/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"35629961799","text":"from itertools import combinations\r\nfrom collections import Counter\r\n\r\n\r\ndef solution(orders, course):\r\n answer = []\r\n \r\n for c in course:\r\n candidates = []\r\n for o in orders:\r\n for cb in combinations(o,c):\r\n joined = ''.join(sorted(cb))\r\n candidates.append(joined)\r\n counter_candi = Counter(candidates).most_common()\r\n answer += [menu for menu,cnt in counter_candi if cnt > 1 and cnt == counter_candi[0][1]]\r\n answer.sort()\r\n\r\n return answer\r\n\r\norders = [\"ABCDE\", \"AB\", \"CD\", \"ADE\", \"XYZ\", \"XYZ\", \"ACD\"]\r\ncourse = [2,3,5]\r\nprint(solution(orders,course))","repo_name":"bignamu/algorithmPython","sub_path":"Apply/2021 KAKAO BLIND RECRUITMENT 메뉴 리뉴얼_2.py","file_name":"2021 KAKAO BLIND RECRUITMENT 메뉴 리뉴얼_2.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"28297597462","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Network overview analysis for ContNeXt\"\"\"\n\nimport json\nimport os\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom tqdm import tqdm\n\nimport ora\nfrom network_utils import create_network_from_edge_file, edge_file_path, load_interactome, most_common\n\n# replace here the location of the external data dir if not structured as instructed\ndata_dir = os.path.join(os.path.expanduser(\"~\"), \"contnext_data\", \"data\")\n\n# optional, replace here the desired location of the output figures\nfigures_dir = os.path.join(data_dir, \"figures\")\nos.makedirs(figures_dir, exist_ok=True)\n\n# ### LOAD DATA ####\n# tissue networks\ntis_network_dict = {\n ID: create_network_from_edge_file(\n edge_file_path(os.path.join(data_dir, \"coexpr_networks\", \"tissue\"), ID), ID)\n for ID in tqdm(os.listdir(os.path.join(data_dir, \"coexpr_networks\", \"tissue\")),\n desc=\"Creating/loading network objects\") if ID != \".DS_Store\"\n}\n\n# cell type networks\nct_network_dict = {\n ID: create_network_from_edge_file(edge_file_path(os.path.join(data_dir, \"coexpr_networks\", \"cell_type\"), ID),\n ID)\n for ID in tqdm(os.listdir(os.path.join(data_dir, \"coexpr_networks\", \"cell_type\")),\n desc=\"Creating/loading network objects\") if ID != \".DS_Store\"\n}\n\n# cell line networks\ncl_network_dict = {\n ID: create_network_from_edge_file(edge_file_path(os.path.join(data_dir, \"coexpr_networks\", \"cell.line\"), ID),\n ID)\n for ID in tqdm(os.listdir(os.path.join(data_dir, \"coexpr_networks\", \"cell.line\")),\n desc=\"Creating/loading network objects\") if ID != \".DS_Store\"\n}\n\n# interactome PPI network\ninteractome = load_interactome(os.path.join(data_dir, \"interactome\", \"interactome_18_01_2021.tsv\"))\n\n# KEGG pathway assignments\ngene_pathway_file = os.path.join(data_dir, \"pathway\", \"gene_pathway_assignment.json\")\nwith open(gene_pathway_file, 'r') as f:\n gene_pathways_dict = json.load(f)\npathway_genes = os.path.join(data_dir, \"pathway\", \"kegg_hgnc_ids.gmt\")\npathway_genes_dict = ora.gmt_parser(pathway_genes, 3, 10000)\n\n# ontology name mappings\nwith open(os.path.join(data_dir, \"mappings\", \"uberon_name_mappings.json\"), 'r') as f:\n uberon_name_mappings = json.load(f)\nwith open(os.path.join(data_dir, \"mappings\", \"CL_name_mappings.json\"), 'r') as f:\n CL_name_mappings = json.load(f)\nwith open(os.path.join(data_dir, \"mappings\", \"CLO_name_mappings.json\"), 'r') as f:\n CLO_name_mappings = json.load(f)\nwith open(os.path.join(data_dir, \"mappings\", \"hgnc_name_mappings.json\"), 'r') as f:\n hgnc_mappings = json.load(f)\n\n# group context data into one variable¶\ncontexts = {\n \"tissues\": {\n \"nets\": tis_network_dict,\n \"name_mapping\": uberon_name_mappings\n },\n \"cell types\": {\n \"nets\": ct_network_dict,\n \"name_mapping\": CL_name_mappings\n },\n \"cell lines\": {\n \"nets\": cl_network_dict,\n \"name_mapping\": CLO_name_mappings\n }\n}\nIDs_per_context = {\n group: sorted(group_data[\"nets\"].keys(), key=lambda ID: len(group_data[\"nets\"][ID].nodes()), reverse=True)\n for group, group_data in contexts.items()}\n\n# ### LOAD/PROCESS CONTROLLABILITY ANALYSIS RESULTS ####\nsummary_keys = [\n \"Number of nodes(N)\",\n \"Number of edges(E)\",\n \"Average degree(Kmean)\",\n \"Number of driver nodes(Nd)\",\n \"Fraction of driver nodes(Nd/N)\",\n \"Fraction of type-I critical nodes\",\n \"Fraction of type-I redundant nodes\",\n \"Fraction of type-I ordinary nodes\",\n \"Fraction of type-II critical nodes\",\n \"Fraction of type-II redundant nodes\",\n \"Fraction of type-II ordinary nodes\",\n \"Fraction of critical links\",\n \"Fraction of redundant links\",\n \"Fraction of ordinary links\",\n \"Average local clustering coefficient\",\n \"Global clustering coefficient\"\n \"Average directed local clustering coefficient\",\n \"Global directed clustering coefficient\",\n \"Average betweenness centralities\",\n \"Average closeness centralities\",\n \"Average authority centralities\",\n \"Average hub centralities\",\n \"Fraction of source nodes\",\n \"Fraction of external dilations\",\n \"Fraction of internal dilations\",\n]\n\nwith open(os.path.join(data_dir, \"controllability_analysis\", \"interactome.output\"), \"r\") as f:\n summary_raw = f.read().strip().split(\",\")\n\nsummary = dict(zip(summary_keys, summary_raw))\nwith open(os.path.join(data_dir, \"controllability_analysis\", \"interactome_summary.tsv\"), 'w') as f:\n for key in summary.keys():\n f.write(f\"{key}\\t{summary[key]}\\n\")\n\nnode_type = pd.read_table(os.path.join(data_dir, \"controllability_analysis\", \"interactome.nodetype\"), sep=\" \", usecols=[\"#Name\", \"TypeI\"])\nnode_type['#Name'] = node_type['#Name'].astype(str)\nimportance = {0: \"critical\", 1: \"redundant\", 2: \"ordinary\"}\nnode_type = node_type.replace({\"#Name\": hgnc_mappings, \"TypeI\": importance})\nnode_type.columns = [\"protein\", \"classification\"]\nnode_type.to_csv(os.path.join(data_dir, \"controllability_analysis\", \"interactome_node_classifications.tsv\"), sep='\\t', index=False)\n\nindispensable_nodes = list(node_type.loc[node_type.classification == \"critical\"][\n \"protein\"]) # increases driver nodes if removed ex. in a directed path\ndispensable_nodes = list(node_type.loc[node_type.classification == \"redundant\"][\n \"protein\"]) # decreases driver nodes if removed ex. leaf in a star\nneutral_nodes = list(node_type.loc[node_type.classification == \"ordinary\"][\n \"protein\"]) # no change in driver nodes if removed ex. central hub of a star\nprint(f\"there are {len(indispensable_nodes)} indispensable nodes, {len(dispensable_nodes)} dispensable nodes, and {len(neutral_nodes)} neutral nodes.\")\n\nwith open(os.path.join(data_dir, \"controllability_analysis\", \"interactome_indispensable_nodes.txt\"), 'w') as f:\n f.write(\"\\n\".join(indispensable_nodes))\n\nedge_type = pd.read_table(os.path.join(data_dir, \"controllability_analysis\", \"interactome.linktype\"), sep=\" \")\nedge_type['#source'] = edge_type['#source'].astype(str)\nedge_type['target'] = edge_type['target'].astype(str)\nedge_type = edge_type.replace({\"#source\": hgnc_mappings, \"target\": hgnc_mappings, \"LinkType\": importance})\nedge_type.columns = [\"source\", \"target\", \"classification\"]\nedge_type.to_csv(os.path.join(data_dir, \"controllability_analysis\", \"interactome_edge_classifications.tsv\"), sep='\\t', index=False)\n\nindispensable_edges = list(zip(edge_type.loc[edge_type.classification == \"critical\"][\"source\"],\n edge_type.loc[edge_type.classification == \"critical\"][\n \"target\"])) # increases driver nodes if removed\ndispensable_edges = list(zip(edge_type.loc[edge_type.classification == \"redundant\"][\"source\"],\n edge_type.loc[edge_type.classification == \"redundant\"][\n \"target\"])) # decreases driver nodes if removed\nneutral_edges = list(zip(edge_type.loc[edge_type.classification == \"ordinary\"][\"source\"],\n edge_type.loc[edge_type.classification == \"ordinary\"][\n \"target\"])) # no change in driver nodes if removed\nprint(f\"there are {len(indispensable_edges)} indispensable edges, {len(dispensable_edges)} dispensable edges, and {len(neutral_edges)} neutral edges.\")\n\npd.DataFrame(indispensable_edges).to_csv(os.path.join(data_dir, \"controllability_analysis\", \"interactome_indispensable_edges.txt\"),\n sep='\\t', index=False, header=False)\n\nmost_common_nodes_in_tis = most_common(tis_network_dict.values(), comparison=\"nodes\")\nmost_common_nodes_in_ct = most_common(ct_network_dict.values(), comparison=\"nodes\")\nmost_common_nodes_in_cl = most_common(cl_network_dict.values(), comparison=\"nodes\")\n\n# ### FIGURES ####\n\n# Network size overview figure\nterm_and_node_num_per_context = {}\n# use a count to preserve ordering\ncount = 0\nfor group, ids in IDs_per_context.items():\n for ID in ids:\n term_and_node_num_per_context[count] = {\n \"Context\": group,\n \"Term\": contexts[group][\"name_mapping\"][ID].capitalize(),\n \"Number of nodes\": len(contexts[group][\"nets\"][ID].nodes()),\n }\n count += 1\nnode_count_df = pd.DataFrame(term_and_node_num_per_context.values(), index=term_and_node_num_per_context.keys())\n\nsns.reset_defaults()\nf, ax = plt.subplots(figsize=(20, 15))\n# the node_count_df has the preserved intended order\nlegend_order = node_count_df[\"Context\"].unique()\nsns.barplot(x=\"Number of nodes\", y=\"Term\", hue=\"Context\", hue_order=legend_order, data=node_count_df, dodge=False)\n# extend chart above and below\nax.set_ylim([len(node_count_df), -1.5])\nplt.title(\"Network size distribution\", fontsize=45, x=0.25)\nax.set_xlabel(ax.get_xlabel(), fontsize=25)\nax.set_ylabel(\"Individual class and corresponding contexts\", fontsize=25)\nfor p in ax.patches:\n # label bars\n ax.annotate(int(np.nan_to_num(p.get_width(), 0)), xy=(p.get_width(), p.get_y() + p.get_height() / 2),\n xytext=(5, 0), textcoords='offset points', ha=\"left\", va=\"center\")\n# move legend to outside\nplt.legend(bbox_to_anchor=(1.01, 0.9), loc=2, borderaxespad=0., fontsize='large')\n# save figure\nplt.savefig(os.path.join(figures_dir, \"Network_Size_Distribution.png\"), bbox_inches='tight', dpi=320)\n\n# Term overview figure\nterm_overview = {\n group: {\n \"ids\": list(group_data[\"nets\"].keys()),\n \"terms\": [group_data[\"name_mapping\"][ID] for ID in group_data[\"nets\"].keys()]\n }\n for group, group_data in contexts.items()\n}\nnaming_for_files = {\"tissues\": \"tissue\", \"cell types\": \"celltype\", \"cell lines\": \"cellline\"}\ncontext_info = {}\nfor context in term_overview:\n with open(os.path.join(data_dir, \"misc_data\", f\"FULL_{naming_for_files[context]}_overview_after_download.tsv\"),\n \"r\") as f:\n lines = [f.readline().strip()]\n lines += [line.strip() for line in f.readlines() if\n line.strip().split(\"\\t\")[1] in term_overview[context][\"terms\"]]\n context_info[context] = {\n line.split(\"\\t\")[0].split(\":\")[1]: {\n \"Name\": line.split(\"\\t\")[1],\n \"Number of datasets\": int(line.split(\"\\t\")[2]),\n \"Number of samples\": int(line.split(\"\\t\")[3]),\n \"Number of nodes\": len(contexts[context][\"nets\"][line.split(\"\\t\")[0].split(\":\")[1]].nodes()),\n \"Number of edges\": len(contexts[context][\"nets\"][line.split(\"\\t\")[0].split(\":\")[1]].edges()),\n }\n for line in lines[1:]\n }\n with open(os.path.join(data_dir, \"misc_data\", f\"{naming_for_files[context]}_overview.tsv\"), \"w\") as f:\n f.write(\"\\n\".join(lines))\n\n# Correlation between number of nodes to number of datasets and number of samples for all terms figure\ntissue_df = pd.DataFrame.from_dict(context_info[\"tissues\"], orient=\"index\")\ntissue_df[\"Context\"] = [\"tissue\"] * len(tissue_df)\ncelltype_df = pd.DataFrame.from_dict(context_info[\"cell types\"], orient=\"index\")\ncelltype_df[\"Context\"] = [\"cell type\"] * len(celltype_df)\ncellline_df = pd.DataFrame.from_dict(context_info[\"cell lines\"], orient=\"index\")\ncellline_df[\"Context\"] = [\"cell line\"] * len(cellline_df)\nterm_info_df = pd.concat([tissue_df, celltype_df, cellline_df], axis=0)\nmpl.rcParams['xtick.labelsize'] = 'xx-small'\nmpl.rcParams['ytick.labelsize'] = 'xx-small'\nax = sns.pairplot(term_info_df,\n x_vars=[\"Number of datasets\", \"Number of samples\"], y_vars=[\"Number of nodes\"],\n hue=\"Context\", plot_kws={\"s\": 15, 'alpha': 0.8})\nplt.title(\"Correlation between number of datasets/samples and network size\", x=0)\nax._legend.remove()\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., prop={'size': 8})\nplt.savefig(os.path.join(figures_dir, \"datasets-samples_vs_nodes.png\"), bbox_inches='tight', dpi=320)\n\n# Distribution of the frequency of all proteins across the co-expression networks\nfig, axes = plt.subplots(1, 3, sharey=True, figsize=(20, 5))\ngene_counts_tis = [int(count[0].split('/')[0]) for gene, count in most_common_nodes_in_tis]\ngene_counts_ct = [int(count[0].split('/')[0]) for gene, count in most_common_nodes_in_ct]\ngene_counts_cl = [int(count[0].split('/')[0]) for gene, count in most_common_nodes_in_cl]\n\nsns.histplot(gene_counts_tis, kde=True, bins=26, ax=axes[0])\nsns.histplot(gene_counts_ct, kde=True, bins=26, ax=axes[1])\nsns.histplot(gene_counts_cl, kde=True, bins=26, ax=axes[2])\n\naxes[0].set(xlabel=\"tissues\", ylabel=\"\")\naxes[1].set(xlabel=\"cell types\", ylabel=\"\")\naxes[2].set(xlabel=\"cell lines\", ylabel=\"\")\n\nfig.text(.12, .95, \"Protein frequency distribution in co-expression networks\", fontsize=35, weight=\"bold\")\nfig.text(0.5, -0.03, 'Number of networks', ha='center', fontsize=20)\nfig.text(0.08, 0.5, 'Protein count', va='center', rotation='vertical', fontsize=20)\n\nplt.savefig(os.path.join(figures_dir, \"prot-freq-dist.png\"), bbox_inches='tight', dpi=320)\n","repo_name":"ContNeXt/scripts","sub_path":"analyses/1-networks_overview.py","file_name":"1-networks_overview.py","file_ext":"py","file_size_in_byte":13026,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"20498103805","text":"n, m = map(int, input().split())\na = [list(map(int, input().split())) for _ in range(n)]\ninfo = []\nfor _ in range(m):\n d, s = map(int, input().split())\n info.append([d - 1, s])\nclouds = [[n-2, 0], [n-2, 1], [n-1, 0], [n-1, 1]]\ndx = [0, -1, -1, -1, 0, 1, 1, 1]\ndy = [-1, -1, 0, 1, 1, 1, 0, -1]\n\nfor dir in info:\n d, s = dir[0], dir[1]\n next_clouds = []\n for cloud in clouds:\n x = cloud[0]\n y = cloud[1]\n nx = (x + dx[d] * s) % n\n ny = (y + dy[d] * s) % n\n next_clouds.append([nx, ny])\n visited = [[False] * n for _ in range(n)]\n for cloud in next_clouds:\n x = cloud[0]\n y = cloud[1]\n a[x][y] += 1\n visited[x][y] = True\n\n cx = [-1, -1, 1, 1]\n cy = [-1, 1, -1, 1]\n for cloud in next_clouds:\n x = cloud[0]\n y = cloud[1]\n count = 0\n for i in range(4):\n nx = x + cx[i]\n ny = y + cy[i]\n\n if 0 <= nx < n and 0 <= ny < n and a[nx][ny] >= 1:\n count += 1\n\n a[x][y] += count\n\n clouds = []\n\n for i in range(n):\n for j in range(n):\n if a[i][j] >= 2 and visited[i][j] == False:\n a[i][j] -= 2\n clouds.append([i, j])\n\nans = 0\nfor i in range(n):\n ans += sum(a[i])\n\nprint(ans)","repo_name":"SeoYeonBae/algorithmStudy","sub_path":"알고리즘 공부 - Python/BOJ/21610_마법사상어와비바라기.py","file_name":"21610_마법사상어와비바라기.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20594857845","text":"# import\nfrom scipy.sparse.linalg import svds\nfrom CF import loadData\nimport json\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nimport config\nimport pickle\nimport os.path\nfrom CBF.CBF import ContentsBasedFiltering\n\n\nclass CollaborativeFiltering:\n\n def __init__(self, user_id):\n ReadData = loadData.ReadData()\n self.userData = ReadData.userData\n self.appList = ReadData.appList\n self.userId = user_id\n self.searchData = pd.DataFrame(columns=['appid', 'playtime_forever', 'playtime_2weeks', 'steamid'])\n self.searchUser = pd.DataFrame(\n columns=['appid', 'playtime_forever', 'steamid', 'newsteamid', 'playtime_2weeks'])\n self.svd_preds = \"\"\n self.newSteamId = \"\"\n\n def getUserData(self):\n self.userData = self.userData.drop_duplicates(['appid', 'steamid'])\n url = 'https://api.steampowered.com/IPlayerService/GetOwnedGames/v1/?key=' + config.api_key + '&steamid=' + str(\n self.userId)\n response = requests.get(url)\n if response.status_code == 200:\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n jsonData = pd.read_json(soup.text)\n if jsonData.empty:\n return \"스팀 프로필 및 게임 세부정보 설정을 공개로 바꿔주세요\", ''\n elif jsonData['response']['game_count'] == 0:\n return \"스팀 라이브러리에 게임이 없습니다.\", ''\n else:\n if response.status_code == 500:\n return \"입력하신 스팀 id를 확인해주세요\", ''\n else:\n return \"스팀서버 통신 에러입니다. 잠시 후 다시 시도해주세요.\", ''\n\n self.searchUser = pd.DataFrame(jsonData['response']['games'])\n self.searchUser = self.searchUser.drop(['playtime_windows_forever', 'playtime_mac_forever',\n 'playtime_linux_forever'], axis='columns')\n self.searchUser['steamid'] = self.userId\n self.newSteamId = self.userData['steamid'].unique().size\n self.searchUser['newsteamid'] = self.newSteamId\n\n if 'playtime_2weeks' not in self.searchUser.columns:\n self.searchUser['playtime_2weeks'] = 0\n\n return 'success', self.searchUser\n\n def addData(self, appids, steamid):\n if self.searchUser.empty:\n self.newSteamId = self.userData['steamid'].unique().size\n else:\n list = []\n for appid in appids:\n for alreadyHave in self.searchUser['appid']:\n if appid == alreadyHave:\n list.append(appid)\n break\n for appid in list:\n appids.remove(appid)\n\n for appid in appids:\n self.searchUser = self.searchUser.append(\n {'appid': int(appid), 'playtime_forever': 600.0, 'steamid': steamid, 'newsteamid': self.newSteamId,\n 'playtime_2weeks': 0}, ignore_index=True)\n\n # weight 할당 def\n def refine(self):\n # 가중치 부여\n self.searchUser['weight'] = 0\n self.searchUser['playtime_forever'] = self.searchUser['playtime_forever'] / 60\n self.searchUser['playtime_forever'] = np.log10(self.searchUser['playtime_forever'])\n\n def nonePlaying(playtime_forever, playtime_2weeks, weight):\n # 플레이시간 0\n if playtime_forever == float('-inf'):\n weight = 1\n # 플레이시간 1시간 이내\n elif playtime_forever < 0:\n weight = -1\n # 플레이시간 10시간 이내\n # elif 0 <= playtime_forever < 1:\n # weight = 2\n # 플레이시간 1시간 이상\n else:\n weight = 2\n\n if playtime_2weeks >= 60:\n weight = weight + 1\n\n return weight\n\n self.searchUser['weight'] = self.searchUser.apply(lambda x: nonePlaying(x['playtime_forever'],\n x['playtime_2weeks'], x['weight']),axis=1)\n\n file = 'CF/data/pivot.pickle'\n\n if os.path.isfile(file):\n self.userData = self.userData.append(self.searchUser)\n with open(file, 'rb') as fr:\n pivotUserApp = pickle.load(fr)\n else:\n ## 피봇 테이블 저장\n pivotUserApp = self.userData.pivot(\n index='newsteamid',\n columns='appid',\n values='weight',\n ).fillna(0)\n with open(file, 'wb') as fw:\n pickle.dump(pivotUserApp, fw)\n self.userData = self.userData.append(self.searchUser)\n\n pivotSearchUserApp = self.searchUser.pivot(\n index='newsteamid',\n columns='appid',\n values='weight',\n ).fillna(0)\n pivotUserApp = pivotUserApp.append(pivotSearchUserApp, sort=False).fillna(0)\n\n ## 추천을 위한 svd 수행\n matrix = pivotUserApp.values\n weightMean = np.mean(matrix, axis=1)\n matrixUserMean = matrix - weightMean.reshape(-1, 1)\n U, sigma, Vt = svds(matrixUserMean, k=12)\n sigma = np.diag(sigma)\n svd_user_predicted_weight = np.dot(np.dot(U, sigma), Vt) + weightMean.reshape(-1, 1)\n self.svd_preds = pd.DataFrame(svd_user_predicted_weight, columns=pivotUserApp.columns)\n\n def recommend_games(self, num_recommendations=10, n=3):\n\n user_row_number = self.newSteamId\n # 최종적으로 만든 pred_df에서 사용자 index에 따라 게임 데이터 정렬 -> 게임 weight가 높은 순으로 정렬 됌\n sorted_user_predictions = self.svd_preds.iloc[user_row_number].sort_values(ascending=False)\n\n # 원본 데이터에서 user_id 해당하는 데이터를 뽑아낸다.\n user_data = self.userData[self.userData.newsteamid == user_row_number]\n # 위에서 뽑은 user_data와 게임 데이터를 합친다.\n user_history = user_data.merge(self.appList, on='appid').sort_values(['weight'], ascending=False)\n\n # 원본 게임 데이터에서 사용자 목록에 있는 게임 데이터를 제외해 데이터를 추출\n recommendations = self.appList[~self.appList['appid'].isin(user_history['appid'])]\n # 사용자의 게임 weight가 높은 순으로 정렬된 데이터와 위 recommendations 을 합친다.\n recommendations = recommendations.merge(pd.DataFrame(sorted_user_predictions).reset_index(), on='appid')\n # 컬럼 이름 바꾸고 정렬해서 return\n recommendations = recommendations.rename(columns={user_row_number: 'Predictions'}).sort_values('Predictions',\n ascending=False).iloc[\n :num_recommendations, :]\n recommendations = recommendations[['appid', 'name']]\n predictions = pd.DataFrame(columns=['appid', 'name', 'score'])\n scores = []\n for appid in recommendations['appid']:\n data = ContentsBasedFiltering(appid)\n scores.append(0)\n data.makePoint()\n if not data.isAppidValid:\n continue\n data.refine()\n data.simEval()\n predictions = predictions.append(data.result(n))\n recommendations['score'] = scores\n recommendations = recommendations.append(predictions).drop(['score', 'name'], axis='columns').drop_duplicates()\n recommendations = recommendations[~recommendations['appid'].isin(user_history['appid'])]\n return user_history, recommendations\n\n\n","repo_name":"bokiri409/SteamWorld","sub_path":"exec/recommend/Recommend/CF/CF.py","file_name":"CF.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"4661067769","text":"import os\nimport unittest\nimport logging\nfrom copy import copy\nfrom pathlib import Path\nfrom typing import List\nfrom unittest import TestCase\n\nfrom linkml_runtime.dumpers import yaml_dumper\nfrom linkml_runtime.linkml_model.meta import SchemaDefinition, ClassDefinition, SlotDefinitionName, SlotDefinition, \\\n ClassDefinitionName, Prefix\nfrom linkml_runtime.loaders.yaml_loader import YAMLLoader\nfrom linkml_runtime.utils.introspection import package_schemaview\nfrom linkml_runtime.utils.schemaview import SchemaView, SchemaUsage, OrderedBy\nfrom linkml_runtime.utils.schemaops import roll_up, roll_down\nfrom tests.test_utils import INPUT_DIR\n\nSCHEMA_NO_IMPORTS = Path(INPUT_DIR) / 'kitchen_sink_noimports.yaml'\nSCHEMA_WITH_IMPORTS = Path(INPUT_DIR) / 'kitchen_sink.yaml'\nSCHEMA_WITH_STRUCTURED_PATTERNS = Path(INPUT_DIR) / \"pattern-example.yaml\"\n\nyaml_loader = YAMLLoader()\nIS_CURRENT = 'is current'\nEMPLOYED_AT = 'employed at'\nCOMPANY = 'Company'\nAGENT = 'agent'\nACTIVITY = 'activity'\nRELATED_TO = 'related to'\nAGE_IN_YEARS = 'age in years'\n\n\nclass SchemaViewTestCase(unittest.TestCase):\n\n def test_children_method(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n children = view.get_children(\"Person\")\n self.assertEqual(children, ['Adult'])\n\n def test_all_aliases(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n aliases = view.all_aliases()\n self.assertIn(\"identifier\", aliases[\"id\"])\n self.assertIn(\"A\", aliases[\"subset A\"])\n self.assertIn(\"B\", aliases[\"subset B\"])\n self.assertIn(\"dad\", aliases[\"Adult\"])\n self.assertNotIn(\"test\", aliases[\"Adult\"])\n\n def test_schemaview_enums(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n for en, e in view.all_enums().items():\n if e.name == \"Animals\":\n for pv, v in e.permissible_values.items():\n if pv == \"CAT\":\n self.assertEqual(view.permissible_value_parent(pv, e.name), None)\n self.assertEqual(view.permissible_value_ancestors(pv, e.name), ['CAT'])\n if pv == \"ANGRY_LION\":\n self.assertEqual(view.permissible_value_parent(pv, e.name), ['LION'])\n self.assertEqual(view.permissible_value_ancestors(pv, e.name), ['ANGRY_LION', 'LION', 'CAT'])\n for cn, c in view.all_classes().items():\n if c.name == \"Adult\":\n self.assertEqual(view.class_ancestors(c.name), ['Adult', 'Person', 'HasAliases', 'Thing'])\n\n def test_schemaview(self):\n # no import schema\n view = SchemaView(SCHEMA_NO_IMPORTS)\n logging.debug(view.imports_closure())\n self.assertEqual(len(view.imports_closure()), 1)\n all_cls = view.all_classes()\n logging.debug(f'n_cls = {len(all_cls)}')\n\n self.assertEqual(list(view.annotation_dict(IS_CURRENT).values()), ['bar'])\n logging.debug(view.annotation_dict(EMPLOYED_AT))\n e = view.get_element(EMPLOYED_AT)\n logging.debug(e.annotations)\n e = view.get_element('has employment history')\n logging.debug(e.annotations)\n\n elements = view.get_elements_applicable_by_identifier(\"ORCID:1234\")\n self.assertIn(\"Person\", elements)\n elements = view.get_elements_applicable_by_identifier(\"PMID:1234\")\n self.assertIn(\"Organization\", elements)\n elements = view.get_elements_applicable_by_identifier(\"http://www.ncbi.nlm.nih.gov/pubmed/1234\")\n self.assertIn(\"Organization\", elements)\n elements = view.get_elements_applicable_by_identifier(\"TEST:1234\")\n self.assertNotIn(\"anatomical entity\", elements)\n self.assertEqual(list(view.annotation_dict(SlotDefinitionName(IS_CURRENT)).values()), ['bar'])\n logging.debug(view.annotation_dict(SlotDefinitionName(EMPLOYED_AT)))\n element = view.get_element(SlotDefinitionName(EMPLOYED_AT))\n logging.debug(element.annotations)\n element = view.get_element(SlotDefinitionName('has employment history'))\n logging.debug(element.annotations)\n\n self.assertTrue(view.is_mixin('WithLocation'))\n self.assertFalse(view.is_mixin('BirthEvent'))\n\n self.assertTrue(view.inverse('employment history of'), 'has employment history')\n self.assertTrue(view.inverse('has employment history'), 'employment history of')\n \n mapping = view.get_mapping_index()\n self.assertTrue(mapping is not None)\n\n category_mapping = view.get_element_by_mapping(\"GO:0005198\")\n self.assertTrue(category_mapping, [ACTIVITY])\n\n self.assertTrue(view.is_multivalued('aliases'))\n self.assertFalse(view.is_multivalued('id'))\n self.assertTrue(view.is_multivalued('dog addresses'))\n\n self.assertTrue(view.slot_is_true_for_metadata_property('aliases', 'multivalued'))\n self.assertTrue(view.slot_is_true_for_metadata_property('id', 'identifier'))\n with self.assertRaises(ValueError):\n view.slot_is_true_for_metadata_property('aliases', 'aliases')\n\n for tn, t in view.all_types().items():\n logging.info(f'TN = {tn}')\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', t.from_schema)\n for sn, s in view.all_slots().items():\n logging.info(f'SN = {sn} RANGE={s.range}')\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', s.from_schema)\n # range should always be populated: See https://github.com/linkml/linkml/issues/733\n rng = view.induced_slot(sn).range\n self.assertIsNotNone(rng)\n # this section is mostly for debugging\n for cn in all_cls.keys():\n c = view.get_class(cn)\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', c.from_schema)\n logging.debug(f'{cn} PARENTS = {view.class_parents(cn)}')\n logging.debug(f'{cn} ANCS = {view.class_ancestors(cn)}')\n logging.debug(f'{cn} CHILDREN = {view.class_children(cn)}')\n logging.debug(f'{cn} DESCS = {view.class_descendants(cn)}')\n logging.debug(f'{cn} SCHEMA = {view.in_schema(cn)}')\n logging.debug(f' SLOTS = {view.class_slots(cn)}')\n for sn in view.class_slots(cn):\n slot = view.get_slot(sn)\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', slot.from_schema)\n logging.debug(f' SLOT {sn} R: {slot.range} U: {view.get_uri(sn)} ANCS: {view.slot_ancestors(sn)}')\n induced_slot = view.induced_slot(sn, cn)\n logging.debug(f' INDUCED {sn}={induced_slot}')\n # range should always be populated: See https://github.com/linkml/linkml/issues/733\n self.assertIsNotNone(induced_slot.range)\n\n\n logging.debug(f'ALL = {view.all_elements().keys()}')\n\n # -- TEST ANCESTOR/DESCENDANTS FUNCTIONS --\n\n self.assertCountEqual(['Company', 'Organization', 'HasAliases', 'Thing'],\n view.class_ancestors(COMPANY))\n self.assertCountEqual(['Organization', 'HasAliases', 'Thing'],\n view.class_ancestors(COMPANY, reflexive=False))\n self.assertCountEqual(['Thing', 'Person', 'Organization', COMPANY, 'Adult'],\n view.class_descendants('Thing'))\n\n # -- TEST CLASS SLOTS --\n\n self.assertCountEqual(['id', 'name', ## From Thing\n 'has employment history', 'has familial relationships', 'has medical history',\n AGE_IN_YEARS, 'addresses', 'has birth event', ## From Person\n 'aliases' ## From HasAliases\n ],\n view.class_slots('Person'))\n self.assertCountEqual(view.class_slots('Person'), view.class_slots('Adult'))\n self.assertCountEqual(['id', 'name', ## From Thing\n 'ceo', ## From COMPANY\n 'aliases' ## From HasAliases\n ],\n view.class_slots(COMPANY))\n\n self.assertEqual(view.get_class(AGENT).class_uri, 'prov:Agent')\n self.assertEqual(view.get_uri(AGENT), 'prov:Agent')\n logging.debug(view.get_class(COMPANY).class_uri)\n\n self.assertEqual(view.get_uri(COMPANY), 'ks:Company')\n\n # test induced slots\n\n for c in [COMPANY, 'Person', 'Organization',]:\n islot = view.induced_slot('aliases', c)\n assert islot.multivalued is True\n self.assertEqual(islot.owner, c, 'owner does not match')\n self.assertEqual(view.get_uri(islot, expand=True), 'https://w3id.org/linkml/tests/kitchen_sink/aliases')\n\n self.assertEqual(view.get_identifier_slot('Company').name, 'id')\n self.assertEqual(view.get_identifier_slot('Thing').name, 'id')\n self.assertTrue(view.get_identifier_slot('FamilialRelationship') is None)\n for c in [COMPANY, 'Person', 'Organization', 'Thing']:\n self.assertTrue(view.induced_slot('id', c).identifier)\n self.assertFalse(view.induced_slot('name', c).identifier)\n self.assertFalse(view.induced_slot('name', c).required)\n self.assertEqual(view.induced_slot('name', c).range, 'string')\n self.assertEqual(view.induced_slot('id', c).owner, c, 'owner does not match')\n self.assertEqual(view.induced_slot('name', c).owner, c, 'owner does not match')\n for c in ['Event', 'EmploymentEvent', 'MedicalEvent']:\n s = view.induced_slot('started at time', c)\n logging.debug(f's={s.range} // c = {c}')\n self.assertEqual(s.range, 'date')\n self.assertEqual(s.slot_uri, 'prov:startedAtTime')\n self.assertEqual(s.owner, c, 'owner does not match')\n c_induced = view.induced_class(c)\n # an induced class should have no slots\n self.assertEqual(c_induced.slots, [])\n self.assertNotEqual(c_induced.attributes, [])\n s2 = c_induced.attributes['started at time']\n self.assertEqual(s2.range, 'date')\n self.assertEqual(s2.slot_uri, 'prov:startedAtTime')\n # test slot_usage\n self.assertEqual(view.induced_slot(AGE_IN_YEARS, 'Person').minimum_value, 0)\n self.assertEqual(view.induced_slot(AGE_IN_YEARS, 'Adult').minimum_value, 16)\n self.assertTrue(view.induced_slot('name', 'Person').pattern is not None)\n self.assertEqual(view.induced_slot('type', 'FamilialRelationship').range, 'FamilialRelationshipType')\n self.assertEqual(view.induced_slot(RELATED_TO, 'FamilialRelationship').range, 'Person')\n self.assertEqual(view.get_slot(RELATED_TO).range, 'Thing')\n self.assertEqual(view.induced_slot(RELATED_TO, 'Relationship').range, 'Thing')\n # https://github.com/linkml/linkml/issues/875\n self.assertCountEqual(['Thing', 'Place'], view.induced_slot('name').domain_of)\n\n a = view.get_class(ACTIVITY)\n self.assertCountEqual(a.exact_mappings, ['prov:Activity'])\n logging.debug(view.get_mappings(ACTIVITY, expand=True))\n self.assertCountEqual(view.get_mappings(ACTIVITY)['exact'], ['prov:Activity'])\n self.assertCountEqual(view.get_mappings(ACTIVITY, expand=True)['exact'], ['http://www.w3.org/ns/prov#Activity'])\n\n u = view.usage_index()\n for k, v in u.items():\n logging.debug(f' {k} = {v}')\n self.assertIn(SchemaUsage(used_by='FamilialRelationship', slot=RELATED_TO,\n metaslot='range', used='Person', inferred=False), u['Person'])\n\n # test methods also work for attributes\n leaves = view.class_leaves()\n logging.debug(f'LEAVES={leaves}')\n self.assertIn('MedicalEvent', leaves)\n roots = view.class_roots()\n logging.debug(f'ROOTS={roots}')\n self.assertIn('Dataset', roots)\n ds_slots = view.class_slots('Dataset')\n logging.debug(ds_slots)\n self.assertEqual(len(ds_slots), 3)\n self.assertCountEqual(['persons', 'companies', 'activities'], ds_slots)\n for sn in ds_slots:\n s = view.induced_slot(sn, 'Dataset')\n logging.debug(s)\n\n def test_all_classes_ordered_lexical(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n classes = view.all_classes(ordered_by=OrderedBy.LEXICAL)\n\n ordered_c = []\n for c in classes.values():\n ordered_c.append(c.name)\n self.assertEqual(ordered_c, sorted(ordered_c))\n\n def test_all_classes_ordered_rank(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n classes = view.all_classes(ordered_by=OrderedBy.RANK)\n ordered_c = []\n for c in classes.values():\n ordered_c.append(c.name)\n first_in_line = []\n second_in_line = []\n for name, definition in classes.items():\n if definition.rank == 1:\n first_in_line.append(name)\n elif definition.rank == 2:\n second_in_line.append(name)\n self.assertIn(ordered_c[0], first_in_line)\n self.assertNotIn(ordered_c[10], second_in_line)\n\n def test_all_classes_ordered_no_ordered_by(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n classes = view.all_classes()\n ordered_c = []\n for c in classes.values():\n ordered_c.append(c.name)\n self.assertEqual(\"HasAliases\", ordered_c[0])\n self.assertEqual(\"EmptyClass\", ordered_c[-1])\n self.assertEqual(\"agent\", ordered_c[-2])\n\n def test_all_slots_ordered_lexical(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n slots = view.all_slots(ordered_by=OrderedBy.LEXICAL)\n ordered_s = []\n for s in slots.values():\n ordered_s.append(s.name)\n self.assertEqual(ordered_s, sorted(ordered_s))\n\n def test_all_slots_ordered_rank(self):\n view = SchemaView(SCHEMA_NO_IMPORTS)\n slots = view.all_slots(ordered_by=OrderedBy.RANK)\n ordered_s = []\n for s in slots.values():\n ordered_s.append(s.name)\n first_in_line = []\n second_in_line = []\n for name, definition in slots.items():\n if definition.rank == 1:\n first_in_line.append(name)\n elif definition.rank == 2:\n second_in_line.append(name)\n self.assertIn(ordered_s[0], first_in_line)\n self.assertNotIn(ordered_s[10], second_in_line)\n\n def test_rollup_rolldown(self):\n # no import schema\n view = SchemaView(SCHEMA_NO_IMPORTS)\n element_name = 'Event'\n roll_up(view, element_name)\n for slot in view.class_induced_slots(element_name):\n logging.debug(slot)\n induced_slot_names = [s.name for s in view.class_induced_slots(element_name)]\n logging.debug(induced_slot_names)\n self.assertCountEqual(['started at time', 'ended at time', IS_CURRENT, 'in location', EMPLOYED_AT, 'married to'],\n induced_slot_names)\n # check to make sure rolled-up classes are deleted\n self.assertEqual(view.class_descendants(element_name, reflexive=False), [])\n roll_down(view, view.class_leaves())\n\n for element_name in view.all_classes():\n c = view.get_class(element_name)\n logging.debug(f'{element_name}')\n logging.debug(f' {element_name} SLOTS(i) = {view.class_slots(element_name)}')\n logging.debug(f' {element_name} SLOTS(d) = {view.class_slots(element_name, direct=True)}')\n self.assertCountEqual(view.class_slots(element_name), view.class_slots(element_name, direct=True))\n self.assertNotIn('Thing', view.all_classes())\n self.assertNotIn('Person', view.all_classes())\n self.assertIn('Adult', view.all_classes())\n \n def test_caching(self):\n \"\"\"\n Determine if cache is reset after modifications made to schema\n \"\"\"\n schema = SchemaDefinition(id='test', name='test')\n view = SchemaView(schema)\n self.assertCountEqual([], view.all_classes())\n view.add_class(ClassDefinition('X'))\n self.assertCountEqual(['X'], view.all_classes())\n view.add_class(ClassDefinition('Y'))\n self.assertCountEqual(['X', 'Y'], view.all_classes())\n # bypass view method and add directly to schema;\n # in general this is not recommended as the cache will\n # not be updated\n view.schema.classes['Z'] = ClassDefinition('Z')\n # as expected, the view doesn't know about Z\n self.assertCountEqual(['X', 'Y'], view.all_classes())\n # inform the view modifications have been made\n view.set_modified()\n # should be in sync\n self.assertCountEqual(['X', 'Y', 'Z'], view.all_classes())\n # recommended way to make updates\n view.delete_class('X')\n # cache will be up to date\n self.assertCountEqual(['Y', 'Z'], view.all_classes())\n view.add_class(ClassDefinition('W'))\n self.assertCountEqual(['Y', 'Z', 'W'], view.all_classes())\n\n def test_import_map(self):\n \"\"\"\n Path to import file should be configurable\n \"\"\"\n for im in [{\"core\": \"/no/such/file\"}, {\"linkml:\": \"/no/such/file\"}]:\n with self.assertRaises(FileNotFoundError):\n view = SchemaView(SCHEMA_WITH_IMPORTS, importmap=im)\n view.all_classes()\n for im in [None, {}, {\"core\": \"core\"}]:\n view = SchemaView(SCHEMA_WITH_IMPORTS, importmap=im)\n view.all_classes()\n self.assertCountEqual(['kitchen_sink', 'core', 'linkml:types'], view.imports_closure())\n self.assertIn(ACTIVITY, view.all_classes())\n self.assertNotIn(ACTIVITY, view.all_classes(imports=False))\n\n def test_imports(self):\n \"\"\"\n view should by default dynamically include imports chain\n \"\"\"\n view = SchemaView(SCHEMA_WITH_IMPORTS)\n self.assertIsNotNone(view.schema.source_file)\n logging.debug(view.imports_closure())\n self.assertCountEqual(['kitchen_sink', 'core', 'linkml:types'], view.imports_closure())\n for t in view.all_types().keys():\n logging.debug(f'T={t} in={view.in_schema(t)}')\n self.assertEqual(view.in_schema(ClassDefinitionName('Person')), 'kitchen_sink')\n self.assertEqual(view.in_schema(SlotDefinitionName('id')), 'core')\n self.assertEqual(view.in_schema(SlotDefinitionName('name')), 'core')\n self.assertEqual(view.in_schema(SlotDefinitionName(ACTIVITY)), 'core')\n self.assertEqual(view.in_schema(SlotDefinitionName('string')), 'types')\n self.assertIn(ACTIVITY, view.all_classes())\n self.assertNotIn(ACTIVITY, view.all_classes(imports=False))\n self.assertIn('string', view.all_types())\n self.assertNotIn('string', view.all_types(imports=False))\n self.assertCountEqual(['SymbolString', 'string'], view.type_ancestors('SymbolString'))\n\n for tn, t in view.all_types().items():\n self.assertEqual(tn, t.name)\n induced_t = view.induced_type(tn)\n self.assertIsNotNone(induced_t.uri)\n #self.assertIsNotNone(induced_t.repr)\n self.assertIsNotNone(induced_t.base)\n if t in view.all_types(imports=False).values():\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', t.from_schema)\n else:\n self.assertIn(t.from_schema, ['https://w3id.org/linkml/tests/core', 'https://w3id.org/linkml/types'])\n for en, e in view.all_enums().items():\n self.assertEqual(en, e.name)\n if e in view.all_enums(imports=False).values():\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', e.from_schema)\n else:\n self.assertEqual('https://w3id.org/linkml/tests/core', e.from_schema)\n for sn, s in view.all_slots().items():\n self.assertEqual(sn, s.name)\n s_induced = view.induced_slot(sn)\n self.assertIsNotNone(s_induced.range)\n if s in view.all_slots(imports=False).values():\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', s.from_schema)\n else:\n self.assertEqual('https://w3id.org/linkml/tests/core', s.from_schema)\n for cn, c in view.all_classes().items():\n self.assertEqual(cn, c.name)\n if c in view.all_classes(imports=False).values():\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', c.from_schema)\n else:\n self.assertEqual('https://w3id.org/linkml/tests/core', c.from_schema)\n for s in view.class_induced_slots(cn):\n if s in view.all_classes(imports=False).values():\n self.assertIsNotNone(s.slot_uri)\n self.assertEqual('https://w3id.org/linkml/tests/kitchen_sink', s.from_schema)\n\n for c in ['Company', 'Person', 'Organization', 'Thing']:\n self.assertTrue(view.induced_slot('id', c).identifier)\n self.assertFalse(view.induced_slot('name', c).identifier)\n self.assertFalse(view.induced_slot('name', c).required)\n self.assertEqual(view.induced_slot('name', c).range, 'string')\n for c in ['Event', 'EmploymentEvent', 'MedicalEvent']:\n s = view.induced_slot('started at time', c)\n self.assertEqual(s.range, 'date')\n self.assertEqual(s.slot_uri, 'prov:startedAtTime')\n self.assertEqual(view.induced_slot(AGE_IN_YEARS, 'Person').minimum_value, 0)\n self.assertEqual(view.induced_slot(AGE_IN_YEARS, 'Adult').minimum_value, 16)\n\n self.assertEqual(view.get_class('agent').class_uri, 'prov:Agent')\n self.assertEqual(view.get_uri(AGENT), 'prov:Agent')\n logging.debug(view.get_class('Company').class_uri)\n\n self.assertEqual(view.get_uri(COMPANY), 'ks:Company')\n self.assertEqual(view.get_uri(COMPANY, expand=True), 'https://w3id.org/linkml/tests/kitchen_sink/Company')\n logging.debug(view.get_uri('TestClass'))\n self.assertEqual(view.get_uri('TestClass'), 'core:TestClass')\n self.assertEqual(view.get_uri('TestClass', expand=True), 'https://w3id.org/linkml/tests/core/TestClass')\n\n self.assertEqual(view.get_uri('string'), 'xsd:string')\n\n # dynamic enums\n e = view.get_enum('HCAExample')\n self.assertCountEqual(['GO:0007049',\n 'GO:0022403'],\n e.include[0].reachable_from.source_nodes)\n\n # units\n height = view.get_slot('height_in_m')\n self.assertEqual(\"m\", height.unit.ucum_code)\n\n def test_imports_from_schemaview(self):\n \"\"\"\n view should by default dynamically include imports chain\n \"\"\"\n view = SchemaView(SCHEMA_WITH_IMPORTS)\n view2 = SchemaView(view.schema)\n self.assertCountEqual(view.all_classes(), view2.all_classes())\n self.assertCountEqual(view.all_classes(imports=False), view2.all_classes(imports=False))\n\n def test_direct_remote_imports(self):\n \"\"\"\n Tests that building a SchemaView directly from a remote URL works.\n\n Note: this should be the only test in this suite that fails if there is\n no network connection.\n \"\"\"\n view = SchemaView(\"https://w3id.org/linkml/meta.yaml\")\n main_classes = [\"class_definition\", \"prefix\"]\n imported_classes = [\"annotation\"]\n for c in main_classes:\n self.assertIn(c, view.all_classes(imports=True))\n self.assertIn(c, view.all_classes(imports=False))\n for c in imported_classes:\n self.assertIn(c, view.all_classes(imports=True))\n self.assertNotIn(c, view.all_classes(imports=False))\n\n @unittest.skip(\"Skipped as fragile: will break if the remote schema changes\")\n def test_direct_remote_imports_additional(self):\n \"\"\"\n Alternative test to: https://github.com/linkml/linkml/pull/1379\n \"\"\"\n url = \"https://raw.githubusercontent.com/GenomicsStandardsConsortium/mixs/main/model/schema/mixs.yaml\"\n view = SchemaView(url)\n self.assertEqual(view.schema.name, \"MIxS\")\n class_count = len(view.all_classes())\n self.assertGreater(class_count, 0)\n\n\n def test_merge_imports(self):\n \"\"\"\n ensure merging and merging imports closure works\n \"\"\"\n view = SchemaView(SCHEMA_WITH_IMPORTS)\n all_c = copy(view.all_classes())\n all_c_noi = copy(view.all_classes(imports=False))\n self.assertLess(len(all_c_noi), len(all_c))\n view.merge_imports()\n all_c2 = copy(view.all_classes())\n self.assertCountEqual(all_c, all_c2)\n all_c2_noi = copy(view.all_classes(imports=False))\n self.assertEqual(len(all_c2_noi), len(all_c2))\n\n def test_metamodel_imports(self):\n \"\"\"\n Tests imports of the metamodel.\n\n Note: this test and others should be able to run without network connectivity.\n SchemaView should make use of the version of the metamodel distributed with the package\n over the network available version.\n\n TODO: use mock testing framework to emulate no access to network.\n\n - ``_\n :return:\n \"\"\"\n schema = SchemaDefinition(id='test', name='metamodel-imports-test',\n imports=[\"linkml:meta\"])\n sv = SchemaView(schema)\n all_classes = sv.all_classes()\n self.assertGreater(len(all_classes), 20)\n schema_str = yaml_dumper.dumps(schema)\n sv = SchemaView(schema_str)\n self.assertGreater(len(sv.all_classes()), 20)\n self.assertCountEqual(all_classes, sv.all_classes())\n\n def test_non_linkml_remote_import(self):\n \"\"\"\n Test that a remote import _not_ using the linkml prefix works\n\n See: https://github.com/linkml/linkml/issues/1627\n \"\"\"\n schema = SchemaDefinition(\n id='test_non_linkml_remote_import',\n name='test_non_linkml_remote_import',\n prefixes=[\n Prefix(\n prefix_prefix=\"foo\",\n prefix_reference=\"https://w3id.org/linkml/\"\n )\n ],\n imports=[\n \"foo:types\"\n ],\n slots=[\n SlotDefinition(\n name=\"an_int\",\n range=\"integer\"\n )\n ],\n classes=[\n ClassDefinition(\n name=\"AClass\",\n slots=[\"an_int\"]\n )\n ]\n )\n sv = SchemaView(schema)\n slots = sv.class_induced_slots(\"AClass\", imports=True)\n self.assertEqual(len(slots), 1)\n\n\n def test_traversal(self):\n schema = SchemaDefinition(id='test', name='traversal-test')\n view = SchemaView(schema)\n view.add_class(ClassDefinition('Root', mixins=['RootMixin']))\n view.add_class(ClassDefinition('A', is_a='Root', mixins=['Am1', 'Am2', 'AZ']))\n view.add_class(ClassDefinition('B', is_a='A', mixins=['Bm1', 'Bm2', 'BY']))\n view.add_class(ClassDefinition('C', is_a='B', mixins=['Cm1', 'Cm2', 'CX']))\n view.add_class(ClassDefinition('RootMixin', mixin=True))\n view.add_class(ClassDefinition('Am1', is_a='RootMixin', mixin=True))\n view.add_class(ClassDefinition('Am2', is_a='RootMixin', mixin=True))\n view.add_class(ClassDefinition('Bm1', is_a='Am1', mixin=True))\n view.add_class(ClassDefinition('Bm2', is_a='Am2', mixin=True))\n view.add_class(ClassDefinition('Cm1', is_a='Bm1', mixin=True))\n view.add_class(ClassDefinition('Cm2', is_a='Bm2', mixin=True))\n view.add_class(ClassDefinition('AZ', is_a='RootMixin', mixin=True))\n view.add_class(ClassDefinition('BY', is_a='RootMixin', mixin=True))\n view.add_class(ClassDefinition('CX', is_a='RootMixin', mixin=True))\n\n def check(ancs: List, expected: List):\n self.assertEqual(ancs, expected)\n check(view.class_ancestors('C', depth_first=True),\n ['C', 'Cm1', 'Cm2', 'CX', 'B', 'Bm1', 'Bm2', 'BY', 'A', 'Am1', 'Am2', 'AZ', 'Root', 'RootMixin'])\n check(view.class_ancestors('C', depth_first=False),\n ['C', 'Cm1', 'Cm2', 'CX', 'B', 'Bm1', 'Bm2', 'RootMixin', 'BY', 'A', 'Am1', 'Am2', 'AZ', 'Root'])\n check(view.class_ancestors('C', mixins=False),\n ['C', 'B', 'A', 'Root'])\n check(view.class_ancestors('C', is_a=False),\n ['C', 'Cm1', 'Cm2', 'CX'])\n\n def test_slot_inheritance(self):\n schema = SchemaDefinition(id='test', name='test')\n view = SchemaView(schema)\n view.add_class(ClassDefinition('C', slots=['s1', 's2']))\n view.add_class(ClassDefinition('D'))\n view.add_class(ClassDefinition('Z'))\n view.add_class(ClassDefinition('W'))\n #view.add_class(ClassDefinition('C2',\n # is_a='C')\n # # slot_usage=[SlotDefinition(s1, range='C2')])\n view.add_slot(SlotDefinition('s1', multivalued=True, range='D'))\n view.add_slot(SlotDefinition('s2', is_a='s1'))\n view.add_slot(SlotDefinition('s3', is_a='s2', mixins=['m1']))\n view.add_slot(SlotDefinition('s4', is_a='s2', mixins=['m1'], range='W'))\n view.add_slot(SlotDefinition('m1', mixin=True, multivalued=False, range='Z'))\n slot1 = view.induced_slot('s1', 'C')\n self.assertEqual(slot1.is_a, None)\n self.assertEqual('D', slot1.range)\n self.assertIsNotNone(slot1.multivalued)\n slot2 = view.induced_slot('s2', 'C')\n self.assertEqual(slot2.is_a, 's1')\n self.assertEqual('D', slot2.range)\n self.assertIsNotNone(slot2.multivalued)\n slot3 = view.induced_slot('s3', 'C')\n self.assertIsNotNone(slot3.multivalued)\n self.assertEqual('Z', slot3.range)\n slot4 = view.induced_slot('s4', 'C')\n self.assertIsNotNone(slot4.multivalued)\n self.assertEqual('W', slot4.range)\n # test dangling\n view.add_slot(SlotDefinition('s5', is_a='does-not-exist'))\n with self.assertRaises(ValueError):\n view.slot_ancestors('s5')\n\n def test_attribute_inheritance(self):\n \"\"\"\n Tests attribute inheritance edge cases\n :return:\n \"\"\"\n view = SchemaView(os.path.join(INPUT_DIR, 'attribute_edge_cases.yaml'))\n expected = [\n ('Root', 'a1', None, \"a1\"),\n ('Root', 'a2', None, \"a2\"),\n ('Root', 'a3', None, \"a3\"),\n ('C1', 'a1', True, \"a1m1\"),\n ('C1', 'a2', True, \"a2c1\"),\n ('C1', 'a3', None, \"a3\"),\n ('C1', 'a4', None, \"a4\"),\n ('C2', 'a1', False, \"a1m2\"),\n ('C2', 'a2', True, \"a2c2\"),\n ('C2', 'a3', None, \"a3\"),\n ('C2', 'a4', True, \"a4m2\"),\n ('C1x', 'a1', True, \"a1m1\"),\n ('C1x', 'a2', True, \"a2c1x\"),\n ('C1x', 'a3', None, \"a3\"),\n ('C1x', 'a4', None, \"a4\"),\n ]\n for cn, sn, req, desc in expected:\n slot = view.induced_slot(sn, cn)\n self.assertEqual(req, slot.required, f\"in: {cn}.{sn}\")\n self.assertEqual(desc, slot.description, f\"in: {cn}.{sn}\")\n self.assertEqual('string', slot.range, f\"in: {cn}.{sn}\")\n\n def test_ambiguous_attributes(self):\n \"\"\"\n Tests behavior where multiple attributes share the same name\n \"\"\"\n schema = SchemaDefinition(id='test', name='test')\n view = SchemaView(schema)\n a1 = SlotDefinition('a1', range='string')\n a2 = SlotDefinition('a2', range='FooEnum')\n a3 = SlotDefinition('a3', range='C3')\n view.add_class(ClassDefinition('C1', attributes={a1.name: a1, a2.name: a2, a3.name: a3}))\n a1x = SlotDefinition('a1', range='integer')\n a2x = SlotDefinition('a2', range='BarEnum')\n view.add_class(ClassDefinition('C2', attributes={a1x.name: a1x, a2x.name: a2x}))\n # a1 and a2 are ambiguous: only stub information available\n # without class context\n self.assertIsNone(view.get_slot(a1.name).range)\n self.assertIsNone(view.get_slot(a2.name).range)\n self.assertIsNotNone(view.get_slot(a3.name).range)\n self.assertEqual(3, len(view.all_slots(attributes=True)))\n self.assertEqual(0, len(view.all_slots(attributes=False)))\n # default is to include attributes\n self.assertEqual(3, len(view.all_slots()))\n self.assertEqual(a3.range, view.induced_slot(a3.name).range)\n self.assertEqual(a1.range, view.induced_slot(a1.name, 'C1').range)\n self.assertEqual(a2.range, view.induced_slot(a2.name, 'C1').range)\n self.assertEqual(a1x.range, view.induced_slot(a1x.name, 'C2').range)\n self.assertEqual(a2x.range, view.induced_slot(a2x.name, 'C2').range)\n\n def test_metamodel_in_schemaview(self):\n view = package_schemaview('linkml_runtime.linkml_model.meta')\n self.assertIn('meta', view.imports_closure())\n self.assertIn('linkml:types', view.imports_closure())\n self.assertIn('meta', view.imports_closure(imports=False))\n self.assertNotIn('linkml:types', view.imports_closure(imports=False))\n self.assertEqual(1, len(view.imports_closure(imports=False)))\n all_classes = list(view.all_classes().keys())\n all_classes_no_imports = list(view.all_classes(imports=False).keys())\n for cn in ['class_definition', 'type_definition', 'slot_definition']:\n self.assertIn(cn, all_classes)\n self.assertIn(cn, all_classes_no_imports)\n self.assertEqual(view.get_identifier_slot(cn).name, 'name')\n for cn in ['annotation', 'extension']:\n self.assertIn(cn, all_classes, \"imports should be included by default\")\n self.assertNotIn(cn, all_classes_no_imports, \"imported class unexpectedly included\")\n for sn in ['id', 'name', 'description']:\n self.assertIn(sn, view.all_slots())\n for tn in ['uriorcurie', 'string', 'float']:\n self.assertIn(tn, view.all_types())\n for tn in ['uriorcurie', 'string', 'float']:\n self.assertNotIn(tn, view.all_types(imports=False))\n for cn, c in view.all_classes().items():\n uri = view.get_uri(cn, expand=True)\n self.assertIsNotNone(uri)\n if cn != 'structured_alias' and cn != 'UnitOfMeasure' and cn != 'ValidationReport' and \\\n cn != 'ValidationResult':\n self.assertIn('https://w3id.org/linkml/', uri)\n induced_slots = view.class_induced_slots(cn)\n for s in induced_slots:\n exp_slot_uri = view.get_uri(s, expand=True)\n self.assertIsNotNone(exp_slot_uri)\n\n def test_get_classes_by_slot(self):\n sv = SchemaView(SCHEMA_WITH_IMPORTS)\n\n slot = sv.get_slot(AGE_IN_YEARS)\n\n actual_result = sv.get_classes_by_slot(slot)\n expected_result = [\"Person\"]\n\n self.assertListEqual(expected_result, actual_result)\n\n actual_result = sv.get_classes_by_slot(slot, include_induced=True)\n expected_result = [\"Person\", \"Adult\"]\n\n self.assertListEqual(actual_result, expected_result)\n\n def test_materialize_patterns(self):\n sv = SchemaView(SCHEMA_WITH_STRUCTURED_PATTERNS)\n\n sv.materialize_patterns()\n\n height_slot = sv.get_slot(\"height\")\n weight_slot = sv.get_slot(\"weight\")\n\n self.assertEqual(height_slot.pattern, \"\\d+[\\.\\d+] (centimeter|meter|inch)\")\n self.assertEqual(weight_slot.pattern, \"\\d+[\\.\\d+] (kg|g|lbs|stone)\")\n\n def test_materialize_patterns_slot_usage(self):\n sv = SchemaView(SCHEMA_WITH_STRUCTURED_PATTERNS)\n\n sv.materialize_patterns()\n\n name_slot_usage = sv.get_class(\"FancyPersonInfo\").slot_usage['name']\n\n self.assertEqual(name_slot_usage.pattern, \"\\\\S+ \\\\S+-\\\\S+\")\n\n def test_materialize_patterns_attribute(self):\n sv = SchemaView(SCHEMA_WITH_STRUCTURED_PATTERNS)\n\n sv.materialize_patterns()\n\n weight_attribute = sv.get_class('ClassWithAttributes').attributes['weight']\n\n self.assertEqual(weight_attribute.pattern, \"\\d+[\\.\\d+] (kg|g|lbs|stone)\")\n\n def test_mergeimports(self):\n sv = SchemaView(SCHEMA_WITH_IMPORTS, merge_imports=False)\n # activity class is in core, but not in kitchen_sink\n classes_list = list(sv.schema.classes.keys())\n self.assertNotIn(\"activity\", classes_list)\n\n # was generated by slot is in core, but not in kitchen_sink\n slots_list = list(sv.schema.slots.keys())\n self.assertNotIn(\"was generated by\", slots_list)\n\n # list of prefixes only in kitchen_sink\n prefixes_list = list(sv.schema.prefixes.keys())\n self.assertListEqual(\n [\"pav\", \"dce\", \"lego\", \"linkml\", \"biolink\", \"ks\", \"RO\", \"BFO\", \"tax\"], \n prefixes_list\n )\n\n # merge_imports=True, so activity class should be present\n sv = SchemaView(SCHEMA_WITH_IMPORTS, merge_imports=True)\n classes_list = list(sv.schema.classes.keys())\n self.assertIn(\"activity\", classes_list)\n\n slots_list = list(sv.schema.slots.keys())\n self.assertIn(\"was generated by\", slots_list)\n\n prefixes_list = list(sv.schema.prefixes.keys())\n if 'schema' not in prefixes_list:\n prefixes_list.append('schema')\n self.assertCountEqual(\n [\"pav\", \n \"dce\", \n \"lego\", \n \"linkml\", \n \"biolink\", \n \"ks\", \n \"RO\", \n \"BFO\", \n \"tax\", \n \"core\", \n \"prov\", \n \"xsd\",\n \"schema\",\n \"shex\",\n ],\n prefixes_list\n )\n\n def test_is_inlined(self):\n schema_path = os.path.join(INPUT_DIR, \"schemaview_is_inlined.yaml\")\n sv = SchemaView(schema_path)\n cases = [\n # slot name, expected is_inline\n (\"a_thing_with_id\", False),\n (\"inlined_thing_with_id\", True),\n (\"inlined_as_list_thing_with_id\", True),\n (\"a_thing_without_id\", True),\n (\"inlined_thing_without_id\", True),\n (\"inlined_as_list_thing_without_id\", True),\n (\"an_integer\", False),\n (\"inlined_integer\", False),\n (\"inlined_as_list_integer\", False)\n ]\n for slot_name, expected_result in cases:\n with self.subTest(slot_name=slot_name):\n slot = sv.get_slot(slot_name)\n actual_result = sv.is_inlined(slot)\n self.assertEqual(actual_result, expected_result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"linkml/linkml-runtime","sub_path":"tests/test_utils/test_schemaview.py","file_name":"test_schemaview.py","file_ext":"py","file_size_in_byte":39005,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"96"} +{"seq_id":"10244558192","text":"\"\"\"change report parameters to varbinary\nRevision ID: 6f1b895840a\nRevises: 1d86908ac3bc\nCreate Date: 2015-06-16 09:58:57.874087\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6f1b895840a'\ndown_revision = '1d86908ac3bc'\n\n\nfrom alembic import op\nfrom sqlalchemy.dialects.mysql import VARBINARY\nfrom sqlalchemy import String\n\n\ndef upgrade():\n op.alter_column('report', 'parameters', type_=VARBINARY(4000),\n existing_type=String(4000, collation='utf8_general_ci'),\n existing_nullable=True)\n\n\ndef downgrade():\n op.alter_column('report', 'parameters',\n type_=String(4000, collation='utf8_general_ci'),\n existing_type=VARBINARY(4000), existing_nullable=True)\n","repo_name":"wikimedia/analytics-wikimetrics","sub_path":"database_migrations/versions/6f1b895840a_change_report_parameters_to_varbinary.py","file_name":"6f1b895840a_change_report_parameters_to_varbinary.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"96"} +{"seq_id":"23598374550","text":"\r\n\r\nA =[2,1,3,4,5]\r\nn = len(A)\r\nmax_sum =0\r\n\r\ndef prefix_sum(A):\r\n \r\n p_sum = [None] * len(A)\r\n p_sum[0]= A[0]\r\n for i in range(1,n):\r\n p_sum[i] = p_sum[i-1] + A[i]\r\n \r\n return p_sum\r\np_sum = prefix_sum(A)\r\n\r\nfor i in range(0,n):\r\n for j in range(i,n):\r\n if (i == 0):\r\n sub_sum = p_sum[j]\r\n else:\r\n sub_sum = p_sum[j]-p_sum[i-1]\r\n if (sub_sum > max_sum) and (sub_sum <= 12):\r\n max_sum = sub_sum\r\n\r\n\r\n\r\nprint(f\"max_sum ={max_sum}\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sarannetworkprogammer/DS_ALGO","sub_path":"Practise/subarrays/prefix_sum.py","file_name":"prefix_sum.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16618779561","text":"from __future__ import print_function\nimport re\nimport json\nimport pickle\nimport sys\nimport urllib\n#import word2vec\nsys.path.insert(0, './head')\nfrom geotext import GeoText\nimport nltk\nfrom nltk.tag.hunpos import HunposTagger\nfrom nltk.tokenize import word_tokenize\n#from crf_location import crf_exec\n\ndef conver_pos(full_pos):\n a = []\n for each_pos in full_pos:\n if each_pos[1] == 'VERB':\n a.append([each_pos[0], 'VB'])\n elif each_pos[1] == 'NUM':\n a.append([each_pos[0], 'CD'])\n elif each_pos[1] == 'NOUN':\n a.append([each_pos[0], 'NN'])\n elif each_pos[1] == 'ADJ':\n a.append([each_pos[0], 'JJ'])\n else:\n a.append(each_pos)\n return a\n\n\n#################################################################################\ndef ner(event, full_pos, full_ents):\n '''Provide an event that contains the following keys:\n - operation: one of the operations in the operations dict below\n - tableName: required for operations that interact with DynamoDB\n - payload: a parameter to pass to the operation being performed\n '''\n love = str(event)\n #print (type(love))\n lust = getWords_special_location(love)\n #print (lust)\n #################################################################################\n d1 = ['i', 'live', 'in', 'please', 'hi', 'give', 'find', 'who', 'what', 'my', 'hungry', 'near', 'me', 'thank', 'you', \\\n 'want', 'to', 'eat', 'like','liked', 'I', 'can', 'you', 'suggest', 'of', 'is', 'are', 'near', 'there', 'some', \\\n 'little', 'now', 'wanna', 'want', 'at', 'on', 'in', 'near', 'area', 'next', 'and', 'how', 'about', 'or', \\\n 'the', 'a', 'an', 'about', 'for', 'with', 'should', 'could', 'would', 'out','time','person','year','way','day',\\\n 'thing','man','world','life','hand','part','child','eye','woman','place','work','week', 'doing',\\\n 'case','point','government','company','number','group','problem','fact','be','have','do','say',\\\n 'get','make','go','know','take','see','come','think','look','want','give','use','find','tell', 'telling',\\\n 'ask','work','seem','feel','try','leave','call','good','new','first','last','long','great','little','own','other',\\\n 'old','right','big','high','different','small','large','next','early','young','important','few',\\\n 'public','bad','same','able','to','of','in','for','on','with','at','by','from','up','about','into',\\\n 'over','after','beneath','under','above','the','and','a','that','I','it','not','he','as','you', \\\n 'this','but','his','they','her','she','or','an','will','my','one','all','would','there','their', 'talk', \\\n 'talking', 'love', 'loved', 'hello', 'help', 'helping', 'helped', 'pleasure', 'bye', 'goodbye', 'care', 'later', \\\n 'no','nothing', 'thanks', 'welcome', 'something', 'hey', 'am', 'month','year','week','day','hour','minute','min','second', \\\n 'months','years','weeks','days','hours','minutes','mins','seconds','time', 'today', 'tomorrow', 'am', 'pm',\\\n 'january', 'febuary', 'marth', 'april', 'may', 'june', 'july','august', 'september', 'october', 'november', 'december', \\\n 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','km', 'kilometer', 'kilometers', 'meter', 'm',\\\n 'cm', 'kms', 'miles', 'yards', 'feet', 'feets','evening', 'morning', 'afternoon', 'noon', 'night']\n #d1 = []\n kiss = ''\n bang = ''\n bump_last = ['.', ',', ';', ':', '(', ')', '?', '!']\n for c_cmall in lust:\n if c_cmall[-1] not in bump_last:\n if c_cmall not in d1:\n kiss = kiss + c_cmall.title() + ' '\n bang = bang + c_cmall.title() + ' '\n else:\n kiss = kiss + c_cmall + ' '\n bang = bang + c_cmall + ' '\n else:\n if c_cmall not in d1:\n kiss = kiss + c_cmall[:-1].title() + ' '\n bang = bang + c_cmall[:-1].title() + ' ' + c_cmall[-1] + ' '\n else:\n kiss = kiss + c_cmall[:-1] + ' '\n bang = bang + c_cmall[:-1] + ' ' + c_cmall[-1] + ' '\n #################################################################################\n #a = crf_exec(bang, 0)\n b = conver_pos(full_pos)\n data_ayrton = full_ents\n ####################################################################\n q=[]\n demo = stamps(\"yesminister\",data_ayrton,love,b,q)\n demo.fw_dispatch()\n demo.fill_init()\n demo.fill_rest()\n demo.just_location_plus()\n demo.uniquefy()\n #demo.baggage()\n ####################################################################\n return demo\n\n\nclass stamps:\n def __init__(self, name, data_ayrton, aliner, message_text, output_dep):\n self.protagonist = name\n # a proper noun\n\n self.number = [] #\n # 2.4, 3.5\n\n self.datetime = []\n # datetime, like, tomorrow at 3 PM, June 3 2016\n\n self.day = [] #\n # tuesday, today, tomorrow, sunday\n\n self.time = [] #\n # 6 PM, 12 Noon\n\n self.amount_of_money = []\n # $40, 35 pounds\n\n self.duration = [] #\n # 30 mins, 2 hours, 4 days\n\n self.distance = []\n\n self.email = []\n\n self.url = []\n\n self.phone_number = []\n\n self.quantity = []\n # any units other than time, like grams, kmph\n\n self.reminder = [] #\n # reminder on or off, for snooze\n\n self.reminder_text = []\n # what would be reminded about, this text should be generated while making\n # above \"True\"\n\n self.location = data_ayrton\n # SFO, Los Alamos, 22661 Merrick, Long Island\n\n self.message_text = message_text\n # full message to be stored for future evaluation?, basically POS Tagging\n\n self.foreign_word = []\n # words that cannot be tagger by a traditional tagger\n\n self.output_dep = output_dep\n # Full dependency text\n\n self.aliner = aliner\n # just full text\n\n self.bagged = []\n # words that may be important and could be used by KGB but don't seem to fit anywhere\n\n self.checked = None\n\n self.d1 = ['i', 'live', 'in', 'please', 'hi', 'give', 'find', 'who', 'what', 'my', 'hungry', 'near', 'me', 'thank', 'you', \\\n 'want', 'to', 'eat', 'like','liked', 'I', 'can', 'you', 'suggest', 'of', 'is', 'are', 'near', 'there', 'some', \\\n 'little', 'now', 'wanna', 'want', 'at', 'on', 'in', 'near', 'area', 'next', 'and', 'how', 'about', 'or', \\\n 'the', 'a', 'an', 'about', 'for', 'with', 'should', 'could', 'would', 'out','time','person','year','way','day',\\\n 'thing','man','world','life','hand','part','child','eye','woman','place','work','week', 'doing',\\\n 'case','point','government','company','number','group','problem','fact','be','have','do','say',\\\n 'get','make','go','know','take','see','come','think','look','want','give','use','find','tell', 'telling',\\\n 'ask','work','seem','feel','try','leave','call','good','new','first','last','long','great','little','own','other',\\\n 'old','right','big','high','different','small','large','next','early','young','important','few',\\\n 'public','bad','same','able','to','of','in','for','on','with','at','by','from','up','about','into',\\\n 'over','after','beneath','under','above','the','and','a','that','I','it','not','he','as','you', \\\n 'this','but','his','they','her','she','or','an','will','my','one','all','would','there','their', 'talk', \\\n 'talking', 'love', 'loved', 'hello', 'help', 'helping', 'helped', 'pleasure', 'bye', 'goodbye', 'care', 'later', \\\n 'no','nothing', 'thanks', 'welcome', 'something', 'hey', 'am', 'month','year','week','day','hour','minute','min','second', \\\n 'months','years','weeks','days','hours','minutes','mins','seconds','time', 'today', 'tomorrow', 'am', 'pm',\\\n 'january', 'febuary', 'marth', 'april', 'may', 'june', 'july','august', 'september', 'october', 'november', 'december', \\\n 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','km', 'kilometer', 'kilometers', 'meter', 'm',\\\n 'cm', 'kms', 'miles', 'yards', 'feet', 'feets','evening', 'morning', 'afternoon', 'noon', 'night']\n\n def view(self):\n print (\"self.protagonist```````\", self.protagonist + \"]\")\n print (\"self.number````````````\", self.number)\n #print (\"self.datetime``````````\", self.datetime)\n print (\"self.day``````````\", self.day)\n print (\"self.time`````````\", self.time)\n print (\"self.amount_of_money```\", self.amount_of_money)\n print (\"self.duration `````````\", self.duration)\n print (\"self.distance `````````\", self.distance)\n print (\"self.email`````````````\", self.email)\n print (\"self.url```````````````\", self.url)\n print (\"self.phone_number``````\", self.phone_number)\n print (\"self.quantity``````````\", self.quantity)\n print (\"self.reminder``````````\", self.reminder)\n print (\"self.reminder_text`````\", self.reminder_text)\n print (\"self.location``````````\", self.location)\n print (\"self.aliner````````````\", self.aliner + \"]\")\n print (\"self.message_text``````\", self.message_text)\n print (\"self.foreign_word``````\", self.foreign_word)\n print (\"self.checked```````````\", self.checked)\n\n def view_return(self):\n return str({\n \"self.protagonist```````\": self.protagonist,\n \"self.number````````````\": self.number,\n \"self.datetime``````````\": self.datetime,\n \"self.day``````````\": self.day,\n \"self.time`````````\": self.time,\n \"self.amount_of_money```\": self.amount_of_money,\n \"self.duration `````````\": self.duration,\n \"self.distance `````````\": self.distance,\n \"self.email`````````````\": self.email,\n \"self.url```````````````\": self.url,\n \"self.phone_number``````\": self.phone_number,\n \"self.quantity``````````\": self.quantity,\n \"self.reminder``````````\": self.reminder,\n \"self.reminder_text`````\": self.reminder_text,\n \"self.message_text``````\": self.message_text,\n \"self.location``````````\": self.location,\n \"self.foreign_word``````\": self.foreign_word,\n \"self.checked```````````\": self.checked\n })\n\n def fw_dispatch(self):\n i = 0\n a = []\n while i < len(self.message_text):\n #print self.message_text[i]\n if self.message_text[i][1] == 'FW':\n # I have my doubts with this NNS. for string 7th July, ot July 7th; the \"7th\" is \"NNS\"\n # so i removed it, it is a small decrepancy but can't take the risk. because all the email\n # will be sparced\n k1 = ''\n k2 = ''\n j = 0\n for j in range(len(self.message_text[i][0])):\n if self.message_text[i][0][j].isdigit():\n k1=k1+self.message_text[i][0][j]\n else:\n k2=k2+self.message_text[i][0][j:-1]\n break\n if k1 != '':\n a.append([k1, 'CD'])\n a.append([k2, 'NNS'])\n else:\n a.append(self.message_text[i])\n else:\n a.append(self.message_text[i])\n i=i+1\n self.message_text = a\n\n\n\n def fill_init(self):\n\n duration_text = ['month','year','week','day','hour','minute','min','second', \\\n 'months','years','weeks','days','hours','minutes','mins','seconds']\n datetime_text = ['time', 'today', 'tomorrow', 'am', 'pm',\\\n 'january', 'febuary', 'marth', 'april', 'may', 'june', 'july', \\\n 'august', 'september', 'october', 'november', 'december', \\\n 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n day_text = ['january', 'febuary', 'marth', 'april', 'may', 'june', 'july', \\\n 'august', 'september', 'october', 'november', 'december']\n time_text = ['am', 'pm', 'noon']\n distance_text = ['km', 'kilometer', 'kilometers', 'meter', 'm', 'cm', 'kms', 'miles', 'yards', 'feet', 'feets']\n amount_of_money = ['usd', 'dollar', 'dollars', 'euro', 'pound', 'pounds', 'euros', 'rupee', 'rupees', 'rs']\n\n b = self.message_text\n\n self.checked = [False] * len(b)\n\n #print (b)\n\n i=0\n for i in range(len(b)):\n b[i][0] = b[i][0].lower()\n if b[i][0] in duration_text:\n if i>0 and b[i-1][1]=='CD' and self.checked[i] == False and self.checked[i-1] == False:\n #print \"found a duration text here \" + b[i-1][0] + \" \" + b[i][0]\n self.checked[i] = True\n self.checked[i-1] = True\n self.duration.append(b[i-1][0] + \" \" + b[i][0])\n if b[i][0] in day_text:\n if i>0 and b[i-1][1]=='CD' and self.checked[i] == False and self.checked[i-1] == False:\n #print \"found a day text here \" + b[i-1][0] + \" \" + b[i][0]\n self.checked[i] = True\n self.checked[i-1] = True\n self.day.append(b[i-1][0] + \" \" + b[i][0])\n if i0 and b[i-1][1]=='CD' and self.checked[i] == False and self.checked[i-1] == False:\n #print \"found a time text here \" + b[i-1][0] + \" \" + b[i][0]\n self.checked[i] = True\n self.checked[i-1] = True\n self.time.append(b[i-1][0] + \" \" + b[i][0])\n if b[i][0] in amount_of_money:\n if i>0 and b[i-1][1]=='CD' and self.checked[i] == False and self.checked[i-1] == False:\n #print \"found a amount of money here \" + b[i-1][0] + \" \" + b[i][0]\n self.checked[i] = True\n self.checked[i-1] = True\n self.amount_of_money.append(b[i-1][0] + \" \" + b[i][0])\n if b[i][0] in distance_text:\n if i>0 and b[i-1][1]=='CD' and self.checked[i] == False and self.checked[i-1] == False:\n #print \"found a distance here \" + b[i-1][0] + \" \" + b[i][0]\n self.checked[i] = True\n self.checked[i-1] = True\n self.distance.append(b[i-1][0] + \" \" + b[i][0])\n if b[i][0] == \"remind\" or b[i][0] == \"reminded\" or b[i][0] == \"reminder\":\n #print \"reminder at time - day found \"\n self.reminder = True\n match = None\n match = re.search(r'[\\w\\.-]+@[\\w\\.-]+', b[i][0])\n if match != None:\n #print \"found email here \" + b[i][0]\n self.email.append(b[i][0])\n self.checked[i] = True\n match = None\n match = re.findall(r'(https?://[^\\s]+)', self.aliner)\n if match != []:\n self.url.append(match)\n self.checked[i] = True\n\n def fill_rest(self):\n to_fill_quant = ['NNS', 'NNPS', 'NNP', 'NN']\n day_text = ['today', 'tomorrow', 'january', 'febuary', 'marth', 'april', 'may', 'june', 'july', \\\n 'august', 'september', 'october', 'november', 'december', \\\n 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n time_text = ['evening', 'morning', 'afternoon', 'noon', 'night']\n for i in range(len(self.checked)):\n if self.message_text[i][1] in to_fill_quant and i > 0 and self.checked[i] == False and \\\n self.checked[i-1] == False and self.message_text[i-1][1]=='CD':\n self.quantity.append(self.message_text[i-1][0] + ' ' + self.message_text[i][0])\n self.checked[i] = True\n self.checked[i-1] = True\n if self.message_text[i][0] in day_text and self.checked[i] == False:\n self.day.append(self.message_text[i][0])\n self.checked[i] = True\n if self.message_text[i][0] in time_text and self.checked[i] == False:\n self.time.append(self.message_text[i][0])\n self.checked[i] = True\n for i in range(len(self.checked)):\n if self.message_text[i][1] == 'CD' and self.checked[i] == False:\n self.number.append(self.message_text[i][0])\n self.checked[i] = True\n\n\n def completia(self):\n if self.time is not [] and self.day is [] and self.reminder is True:\n # if we know when the timer is to be set, but don't know on which day\n # then we need to find out.\n i=0\n for i in range(len(self.message_text)):\n if self.message_text[i][0] in day_text and self.checked[i] is False:\n self.day.append(message_text[i][0])\n #```````````self.setreminder()`````````````\n break\n if self.reminder is True and self.duration is []:\n # Ask for reminder\n # Set reminder\n self.reminder = True\n\n def just_location_plus(self):\n c = getWords_special_location(self.aliner)\n\n a = ''\n for c_cmall in c:\n if c_cmall not in self.d1:\n a = a + c_cmall.title() + ' '\n else:\n a = a + c_cmall + ' '\n #print a\n potentiav = GeoText(a)\n b1 = potentiav.cities\n b2 = potentiav.countries\n #print ('list of potential countries are',b2)\n c = self.location\n self.location = []\n for ea in c:\n if ea[1] == 'GPE':\n self.location.append(ea[2])\n if ea[1] == 'TIME':\n self.time.append(ea[2])\n if ea[1] == 'DATE':\n self.day.append(ea[2])\n #print (\"len(self.message_text)\", len(self.message_text))\n self.location = self.location + b1\n self.location = self.location + b2\n def uniquefy(self):\n self.location = uniquefy_p(self.location)\n self.day = uniquefy_p(self.day)\n self.time = uniquefy_p(self.time)\n\n def baggage(self):\n b = self.message_text\n\n for i in range(len(self.checked)):\n if self.checked[i] is False:\n if b[i][0] not in self.location and b[i][0] not in self.d1:\n self.baggage.append(b[i][0])\n self.checked[i] = True\n\n\n\n####################################################################\ndef getWords_x(data):\n return re.compile(r\"[\\w'/.@-]+\").findall(data)\ndef getWords(data):\n return re.compile(r\"[\\w']+\").findall(data)\ndef getWords_special_location(data):\n return re.compile(r\"[\\w'/.,-@]+\").findall(data)\ndef uniquefy(a):\n b=[]\n for i in a:\n if i not in b:\n b.append(i)\n return b\n####################################################################\ndef uniquefy_p(a):\n b=[]\n for i in a:\n if i.lower() not in b:\n b.append(i.lower())\n return b\n####################################################################\n####################################################################\n####################################################################\n","repo_name":"ruchir594/NatOS","sub_path":"ActionsA/nle/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":19869,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"20321432795","text":"import numpy as np\nimport os \nimport matplotlib.pyplot as plt\nfrom mpi4py import MPI\n\n############\n# 1. INTRO #\n############\n\nclass XYZfile:\n \"\"\"This class loads an atomic structure from a XYZ file.\"\"\"\n \n def __init__(self, filename):\n \"\"\"Reads a XYZ file\n \n filename (str) : The name of the file\n \"\"\"\n \n self.filename = filename\n with open(self.filename,'r') as file : \n # number of atoms \n self.np = int(file.readline()) \n # comment \n self.comment = file.readline() \n # data \n self.data = np.zeros(shape=(self.np,),dtype=[(\"element\",\"U9\"),(\"x\",\"f8\"),(\"y\",\"f8\"),(\"z\",\"f8\")])\n for i in range(self.np):\n parts = file.readline().strip().split(' ')\n self.data[i] = (parts[0],parts[1],parts[2],parts[3])\n\nclass CubicCell:\n \n def __init__(self, comment):\n \"\"\"Reads a cubic cell from a comment contained in the xyz file\n \n comment (str) : The comment\n \"\"\"\n self.L = np.zeros(shape=(3,),dtype=\"f8\")\n parts = comment.strip().split(' ')\n self.L[0] = parts[1] # period along x\n self.L[1] = parts[5] # period along y \n self.L[2] = parts[9] # period along z\n \n def volume(self):\n \"\"\"Computes the volume of the cell\"\"\"\n return self.L[0]*self.L[1]*self.L[2] # volume of a cube\n \n def wrap(self,v):\n \"\"\"Wraps the vector v into the unit cell\n \n v (3-dim ndarray) : vector\n \"\"\"\n return np.remainder(v, self.L) # wraps a vector into the cell \n \n def pbc_distance(self,v1,v2):\n \"\"\"Computes the distance between two vectors with PBC\n \n v1 (3-dim ndarray) : vector\n v2 (3-dim ndarray) : vector\n \"\"\"\n d = np.array(v1,dtype=\"f8\") - np.array(v2,dtype=\"f8\") # general difference \n d = np.remainder(d + self.L/2.0, self.L) - self.L/2.0 # minimum image difference \n return np.linalg.norm(d) # norm of the minimum image difference\n\ndef sphere(r):\n return(4/3*np.pi*r**3)\n\ndef volume2(r,L):\n x = r/L\n return (-np.pi/12*(3-36*x**2+32*x**3))*L**3\n \ndef volume3(r,L):\n x = r/L\n return (-np.pi/4. + 3*np.pi*x**2 + np.sqrt(4*x**2-2) + (1-12*x**2)*np.arctan(np.sqrt(4*x**2-2)) + 2/3*x**2*8*x*np.arctan((2*x*(4*x**2-3))/(np.sqrt(4*x**2-2)*(4*x**2+1))) )*L**3 \n\ndef gauss1D(x,mu=0,sigma=1):\n return np.exp(-0.5*(x-mu)**2/sigma/sigma)/np.sqrt(2.*np.pi)/sigma\n\ndef rdf(f,Rmax,NRmax,element_center,element_distant) : \n\n # get the cell \n cell = CubicCell(f.comment)\n \n is_center = np.char.startswith(f.data[:][\"element\"],element_center)\n is_distant = np.char.startswith(f.data[:][\"element\"],element_distant)\n \n # count the number of center particles\n Np_center = np.sum(is_center)\n \n # count the number of distant particles\n Np_distant = np.sum(is_distant)\n \n bins = np.linspace(0,Rmax,NRmax+1,endpoint=True)\n radii = np.zeros(shape=(NRmax,),dtype=\"f8\")\n hist = np.zeros(shape=(NRmax,),dtype=\"f8\")\n volumes = np.zeros(shape=(NRmax,),dtype=\"f8\")\n \n for i in range(NRmax):\n radii[i] = (bins[i]+bins[i+1])/2. # middle point between two bins\n if radii[i]<=cell.L[0]/2 : \n volumes[i]=sphere(bins[i+1])-sphere(bins[i])\n elif radii[i]<=cell.L[0]*np.sqrt(2)/2 :\n volumes[i]=volume2(bins[i+1],cell.L[0])-volume2(bins[i],cell.L[0])\n elif radii[i]<=cell.L[0]*np.sqrt(3)/2 :\n volumes[i]=(volume3(bins[i+1],cell.L[0])-volume3(bins[i],cell.L[0]))\n else :\n volumes[i]=0\n \n # accumulate gaussians in hist \n for i in range(f.np):\n if is_center[i]: #filter\n Ri = [f.data[i][\"x\"], f.data[i][\"y\"], f.data[i][\"z\"] ]\n for j in range(f.np):\n if is_distant[j] and (i!=j): #filter\n Rj = [f.data[j][\"x\"], f.data[j][\"y\"], f.data[j][\"z\"] ]\n d = cell.pbc_distance(Ri,Rj)\n hist += gauss1D(radii,d,0.2)*Rmax/NRmax\n \n rho_avg = Np_distant / cell.volume()\n return np.divide(hist, volumes, out=np.zeros_like(hist), where=volumes!=0)/rho_avg/Np_center, radii\n\n#############################\n# 2. GET LIST OF FILE NAMES #\n#############################\n\nfnames = []\n\n# We loop over all files that end with \".xyz\"\nfor dirpath, dirnames, filenames in os.walk(\"pbe400_128\", topdown=False):\n xyz_files = [f for f in filenames if f.endswith('.xyz')]\n for name in xyz_files:\n fnames.append(os.path.join(dirpath, name))\n\n##################\n# 3. COMPUTE RDF #\n##################\n\nRmax = 9\nNRmax = 100\nno_files = len(fnames)\n\ng_OO_avg = np.zeros(shape=(NRmax,),dtype=\"f8\")\ng_OH_avg = np.zeros(shape=(NRmax,),dtype=\"f8\")\ng_HH_avg = np.zeros(shape=(NRmax,),dtype=\"f8\")\ng_OO_max = np.zeros(shape=(NRmax,),dtype=\"f8\")\ng_OH_max = np.zeros(shape=(NRmax,),dtype=\"f8\")\ng_HH_max = np.zeros(shape=(NRmax,),dtype=\"f8\")\ng_OO_min = np.ones(shape=(NRmax,),dtype=\"f8\")*100\ng_OH_min = np.ones(shape=(NRmax,),dtype=\"f8\")*100\ng_HH_min = np.ones(shape=(NRmax,),dtype=\"f8\")*100\n\n#MPI IMPLEMENTATION\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nmy_tasks = []\n\n# distributes all files to n processors\nfor i, fname in enumerate(fnames):\n if rank == i % size:\n my_tasks.append(fname)\n\n# LOOP that needs to be parallelized \nfor i, fname in enumerate(my_tasks):\n print(f\"processor {rank}, {i}/{len(my_tasks)-1}, Processing file: {fname}...\")\n f = XYZfile(fname)\n g_OO, r_OO = rdf(f,Rmax,NRmax,\"O\",\"O\")\n g_OH, r_OH = rdf(f,Rmax,NRmax,\"O\",\"H\")\n g_HH, r_HH = rdf(f,Rmax,NRmax,\"H\",\"H\")\n g_OO_avg += g_OO / no_files\n g_OH_avg += g_OH / no_files\n g_HH_avg += g_HH / no_files\n g_OO_max = np.maximum(g_OO,g_OO_max)\n g_OH_max = np.maximum(g_OH,g_OH_max)\n g_HH_max = np.maximum(g_HH,g_HH_max)\n g_OO_min = np.minimum(g_OO,g_OO_min)\n g_OH_min = np.minimum(g_OH,g_OH_min)\n g_HH_min = np.minimum(g_HH,g_HH_min)\n\n\n#gathering for avg data at processor 0\nsendbuff_OO_avg = np.array([g_OO_avg],dtype=\"f8\")\nsendbuff_OH_avg = np.array([g_OH_avg],dtype=\"f8\")\nsendbuff_HH_avg = np.array([g_HH_avg],dtype=\"f8\")\nrecvbuff_OO_avg = np.zeros_like(sendbuff_OO_avg)\nrecvbuff_OH_avg = np.zeros_like(sendbuff_OH_avg)\nrecvbuff_HH_avg = np.zeros_like(sendbuff_HH_avg)\ncomm.Reduce(sendbuff_OO_avg,recvbuff_OO_avg,op=MPI.SUM,root=0)\ncomm.Reduce(sendbuff_OH_avg,recvbuff_OH_avg,op=MPI.SUM,root=0)\ncomm.Reduce(sendbuff_HH_avg,recvbuff_HH_avg,op=MPI.SUM,root=0)\ng_OO_avg = recvbuff_OO_avg[0]\ng_OH_avg = recvbuff_OH_avg[0]\ng_HH_avg = recvbuff_HH_avg[0]\n\n\n#gathering for max data at processor 0\nsendbuff_OO_max = np.array([g_OO_max],dtype=\"f8\")\nsendbuff_OH_max = np.array([g_OH_max],dtype=\"f8\")\nsendbuff_HH_max = np.array([g_HH_max],dtype=\"f8\")\nrecvbuff_OO_max = np.zeros_like(sendbuff_OO_max)\nrecvbuff_OH_max = np.zeros_like(sendbuff_OH_max)\nrecvbuff_HH_max = np.zeros_like(sendbuff_HH_max)\ncomm.Reduce(sendbuff_OO_max,recvbuff_OO_max,op=MPI.MAX,root=0)\ncomm.Reduce(sendbuff_OH_max,recvbuff_OH_max,op=MPI.MAX,root=0)\ncomm.Reduce(sendbuff_HH_max,recvbuff_HH_max,op=MPI.MAX,root=0)\ng_OO_max = recvbuff_OO_max[0]\ng_OH_max = recvbuff_OH_max[0]\ng_HH_max = recvbuff_HH_max[0]\n\n#gathering for min data at processor 0\nsendbuff_OO_min = np.array([g_OO_min],dtype=\"f8\")\nsendbuff_OH_min = np.array([g_OH_min],dtype=\"f8\")\nsendbuff_HH_min = np.array([g_HH_min],dtype=\"f8\")\nrecvbuff_OO_min = np.zeros_like(sendbuff_OO_min)\nrecvbuff_OH_min = np.zeros_like(sendbuff_OH_min)\nrecvbuff_HH_min = np.zeros_like(sendbuff_HH_min)\ncomm.Reduce(sendbuff_OO_min,recvbuff_OO_min,op=MPI.MIN,root=0)\ncomm.Reduce(sendbuff_OH_min,recvbuff_OH_min,op=MPI.MIN,root=0)\ncomm.Reduce(sendbuff_HH_min,recvbuff_HH_min,op=MPI.MIN,root=0)\ng_OO_min = recvbuff_OO_min[0]\ng_OH_min = recvbuff_OH_min[0]\ng_HH_min = recvbuff_HH_min[0]\n\n###########\n# 4. PLOT #\n###########\n\nif rank == 0:\n file_name = \"rdf.png\"\n plt.plot(r_OO,g_OO_avg,marker=\"o\",color=\"r\",label=\"g_OO\")\n plt.plot(r_OO,g_OO_min,color=\"r\")\n plt.plot(r_OO,g_OO_max,color=\"r\")\n plt.plot(r_OH,g_OH_avg,marker=\"o\",color=\"b\",label=\"g_OH\")\n plt.plot(r_OH,g_OH_min,color=\"b\")\n plt.plot(r_OH,g_OH_max,color=\"b\")\n plt.plot(r_HH,g_HH_avg,marker=\"o\",color=\"g\",label=\"g_HH\")\n plt.plot(r_HH,g_HH_min,color=\"g\")\n plt.plot(r_HH,g_HH_max,color=\"g\")\n plt.xlabel(r\"r ($\\AA$)\")\n plt.xlim(0,Rmax)\n plt.ylim(0,4)\n plt.ylabel(r\"g(r)\")\n plt.legend()\n plt.savefig(file_name)\n print(f\"File saved: {file_name}\")","repo_name":"ayhu414/general_programming_projects","sub_path":"RDF Parallelization/Problem001.py","file_name":"Problem001.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20087747067","text":"from collections import defaultdict, deque, Counter\n# d = deque()\n# d.append(5)\n# x = d.popleft()\nimport re\n# m = re.match(r\"(\\w+) (\\w+)\", \"Isaac Newton, physicist\") \n# # or re.search\n# >>> m.group(0) # The entire match\n# 'Isaac Newton'\n# >>> m.group(1) # The first parenthesized subgroup.\n# 'Isaac'\n# >>> m.group(2) # The second parenthesized subgroup.\n# 'Newton'\n# >>> m.group(1, 2) # Multiple arguments give us a tuple.\n# ('Isaac', 'Newton')\nfrom heapq import heappush, heappop\n# >>> heap = []\n# >>> data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]\n# >>> for item in data:\n# ... heappush(heap, item)\n# heap[0] is the smallest item\nimport string\n# string.ascii_lowercase == 'abcde...'\n# string.ascii_uppercase == 'ABCDE...'\n\nimport sys\n\nsys.setrecursionlimit(100000)\n\ndef get_ints(s):\n return list(map(int, re.findall(r\"-?\\d+\", s))) # copied from mcpower from mserrano on betaveros' recommendation\ndirs = [(0,1), (1,0), (0,-1), (-1,0)]\nocts = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]\ndef is_grid_valid(n,m, r,c,):\n return (0<=r self.max_len - 1:\n input_ids = input_ids[: self.max_len - 1]\n input_labels = input_labels[: self.max_len - 1]\n\n # add end token id to the input_ids\n input_ids = input_ids + [self.tokenizer.sep_token_id]\n input_labels = input_labels + [other_label_id]\n\n attention_mask = [1] * len(input_ids)\n \n padding_length = self.max_len - len(input_ids)\n if padding_length > 0:\n #if self.tokenizer.padding_side == \"right\":\n input_ids = input_ids + [self.tokenizer.pad_token_id] * padding_length\n input_labels = input_labels + [padding_label_id] * padding_length\n attention_mask = attention_mask + [0] * padding_length\n #else:\n # input_ids = [self.tokenizer.pad_token_id] * padding_length + input_ids\n # input_labels = [padding_label_id] * padding_length + input_labels\n # attention_mask = [0] * padding_length + attention_mask\n \n targets = torch.tensor(input_labels, dtype=torch.long)\n if self.use4span:\n length = len(input_labels)\n start_labels, end_labels = [0] * length, [0] * length\n skip_tokens = [\"O\", \"PAD\"]\n idx = 0\n while idx < length:\n label = id_target_map[input_labels[idx]]\n if label in skip_tokens:\n idx += 1\n continue\n \n label = label[2:]\n start_labels[idx] = span_target_id_map[label]\n next_idx = idx + 1\n while next_idx < length:\n next_label = id_target_map[input_labels[next_idx]]\n if len(next_label) > 3:\n if next_label[2:] == label:\n next_idx += 1\n if next_idx == length:\n print(f\"add end {span_target_id_map[label]}\")\n end_labels[next_idx - 1] = span_target_id_map[label]\n continue\n \n end_labels[next_idx - 1] = span_target_id_map[label]\n break\n idx = next_idx\n \n targets = [torch.tensor(input_labels, dtype=torch.long), torch.tensor(start_labels, dtype=torch.long).contiguous(), torch.tensor(end_labels, dtype=torch.long)]\n \n \n sample = {\n \"ids\": torch.tensor(input_ids, dtype=torch.long),\n \"mask\": torch.tensor(attention_mask, dtype=torch.long),\n \"targets\": targets,\n }\n \n new_samples.append(sample)\n self.samples = new_samples\n \n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n return self.samples[idx]\n\nclass Biaffine(nn.Module):\n def __init__(self, in_size, out_size, bias_x=True, bias_y=True):\n super().__init__()\n self.bias_x = bias_x\n self.bias_y = bias_y\n self.out_size = out_size\n self.U = torch.nn.Parameter(torch.randn(in_size + int(bias_x), out_size ,in_size + int(bias_y)))\n # self.U1 = self.U.view(size=(in_size + int(bias_x),-1))\n #U.shape = [in_size,out_size,in_size] \n def forward(self, x, y):\n if self.bias_x:\n x = torch.cat((x, torch.ones_like(x[..., :1])), dim=-1)\n if self.bias_y:\n y = torch.cat((y, torch.ones_like(y[..., :1])), dim=-1)\n \n bilinar_mapping = torch.einsum('bxi,ioj,byj->bxyo', x, self.U, y)\n return bilinar_mapping\n\n\nclass FeedbackModel(nn.Module):\n def __init__(self,\n model_name,\n max_len=4096,\n num_labels=15,\n loss=\"ce\",\n decoder=\"softmax\",\n dynamic_merge_layers=False,\n span_num_labels=8,\n sce_alpha=4,\n sce_beta=1,\n label_smooth=0.01,\n ):\n self.num_labels = num_labels\n self.max_len = max_len\n self.merge_layers_num = merge_layers_num\n self.dynamic_merge_layers = dynamic_merge_layers\n self.sce_alpha=sce_alpha,\n self.sce_beta=sce_beta,\n self.label_smooth=label_smooth,\n\n\n hidden_dropout_prob: float = 0.1\n layer_norm_eps: float = 1e-7\n\n config = AutoConfig.from_pretrained(model_name)\n\n config.update(\n {\n \"output_hidden_states\": True,\n \"hidden_dropout_prob\": hidden_dropout_prob,\n \"layer_norm_eps\": layer_norm_eps,\n \"add_pooling_layer\": False,\n \"num_labels\": self.num_labels,\n }\n )\n \n if self.model_name == \"microsoft/deberta-v3-large\":\n config.update({\"max_position_embeddings\": 1536})\n \n self.transformer = AutoModel.from_pretrained(model_name, config=config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.dropout1 = nn.Dropout(0.1)\n self.dropout2 = nn.Dropout(0.2)\n self.dropout3 = nn.Dropout(0.3)\n self.dropout4 = nn.Dropout(0.4)\n self.dropout5 = nn.Dropout(0.5)\n \n \n if self.dynamic_merge_layers:\n self.layer_logits = nn.Linear(config.hidden_size, 1)\n \n if self.decoder == \"span\":\n self.start_fc = nn.Linear(config.hidden_size, span_num_labels)\n self.end_fc = nn.Linear(config.hidden_size, span_num_labels)\n else:\n self.output = nn.Linear(config.hidden_size, self.num_labels)\n if self.decoder == \"crf\":\n self.crf = CRF(num_tags=num_labels, batch_first=True)\n \n \n if loss == \"ce\":\n self.loss_layer = nn.CrossEntropyLoss(label_smoothing=label_smooth)\n elif loss == \"sce\":\n self.loss_layer = SCELoss(sce_alpha, sce_beta, num_classes=num_labels if self.decoder != \"span\" else span_num_labels, label_smooth=label_smooth)\n else:\n raise ValueError(\"loss set error, must in [ce, sce]\")\n\n\n\n def forward(self, ids, mask, token_type_ids=None, targets=None):\n if token_type_ids:\n transformer_out = self.transformer(ids, mask, token_type_ids, output_hidden_states=self.dynamic_merge_layers)\n else:\n transformer_out = self.transformer(ids, mask, output_hidden_states=self.dynamic_merge_layers)\n \n if self.decoder == \"crf\" and transformer_out.last_hidden_state.shape[1] != ids.shape[1]:\n mask_add = torch.zeros((mask.shape[0], transformer_out.hidden_states[-1].shape[1] - ids.shape[1])).to(mask.device)\n mask = torch.cat((mask, mask_add), dim=-1)\n \n if self.dynamic_merge_layers:\n layers_output = torch.cat([torch.unsqueeze(layer, 2) for layer in transformer_out.hidden_states[self.merge_layers_num:]], dim=2)\n layers_logits = self.layer_logits(layers_output)\n layers_weights = torch.transpose(torch.softmax(layers_logits, dim=-1), 2, 3)\n sequence_output = torch.squeeze(torch.matmul(layers_weights, layers_output), 2)\n else:\n sequence_output = transformer_out.last_hidden_state\n \n if self.log_loss:\n sequence_output = self.layer_norm(sequence_output)\n \n sequence_output = self.dropout(sequence_output)\n \n if self.decoder == \"softmax\":\n logits1 = self.output(self.dropout1(sequence_output))\n logits2 = self.output(self.dropout2(sequence_output))\n logits3 = self.output(self.dropout3(sequence_output))\n logits4 = self.output(self.dropout4(sequence_output))\n logits5 = self.output(self.dropout5(sequence_output))\n logits = (logits1 + logits2 + logits3 + logits4 + logits5) / 5\n elif self.decoder == \"crf\":\n logits = self.output(self.dropout2(sequence_output))\n elif self.decoder == \"span\":\n sequence_output1 = self.dropout1(sequence_output)\n sequence_output2 = self.dropout2(sequence_output)\n sequence_output3 = self.dropout3(sequence_output)\n sequence_output4 = self.dropout4(sequence_output)\n sequence_output5 = self.dropout5(sequence_output)\n \n start_logits1 = self.start_fc(sequence_output1)\n start_logits2 = self.start_fc(sequence_output2)\n start_logits3 = self.start_fc(sequence_output3)\n start_logits4 = self.start_fc(sequence_output4)\n start_logits5 = self.start_fc(sequence_output5)\n start_logits = (start_logits1 + start_logits2 + start_logits3 + start_logits4 + start_logits5) / 5\n \n end_logits1 = self.end_fc(sequence_output1)\n end_logits2 = self.end_fc(sequence_output2)\n end_logits3 = self.end_fc(sequence_output3)\n end_logits4 = self.end_fc(sequence_output4)\n end_logits5 = self.end_fc(sequence_output5)\n end_logits = (end_logits1 + end_logits2 + end_logits3 + end_logits4 + end_logits5) / 5\n \n logits = (start_logits, end_logits)\n \n probs = None\n if self.decoder == \"softmax\":\n probs = torch.softmax(logits, dim=-1)\n elif self.decoder == \"crf\":\n probs = self.crf.decode(emissions=logits, mask=mask.byte())\n elif self.decoder == \"span\":\n probs = span_decode(start_logits, end_logits)\n else:\n raise ValueException(\"except decoder in [softmax, crf]\")\n loss = 0\n metric = {}\n\n if targets is not None:\n if self.decoder == \"softmax\":\n loss1 = self.loss(logits1, targets, attention_mask=mask)\n loss2 = self.loss(logits2, targets, attention_mask=mask)\n loss3 = self.loss(logits3, targets, attention_mask=mask)\n loss4 = self.loss(logits4, targets, attention_mask=mask)\n loss5 = self.loss(logits5, targets, attention_mask=mask)\n loss = (loss1 + loss2 + loss3 + loss4 + loss5) / 5\n elif self.decoder == \"crf\":\n targets = targets * mask\n loss = -1. * self.crf(emissions=logits, tags=targets, mask=mask.byte(), reduction='mean')\n elif self.decoder == \"span\":\n targets, start_targets, end_targets = targets\n \n start_loss1 = self.loss(start_logits1, start_targets, attention_mask=mask)\n start_loss2 = self.loss(start_logits2, start_targets, attention_mask=mask)\n start_loss3 = self.loss(start_logits3, start_targets, attention_mask=mask)\n start_loss4 = self.loss(start_logits4, start_targets, attention_mask=mask)\n start_loss5 = self.loss(start_logits5, start_targets, attention_mask=mask)\n start_loss = (start_loss1 + start_loss2 + start_loss3 + start_loss4 + start_loss5) / 5\n \n end_loss1 = self.loss(end_logits1, end_targets, attention_mask=mask)\n end_loss2 = self.loss(end_logits2, end_targets, attention_mask=mask)\n end_loss3 = self.loss(end_logits3, end_targets, attention_mask=mask)\n end_loss4 = self.loss(end_logits4, end_targets, attention_mask=mask)\n end_loss5 = self.loss(end_logits5, end_targets, attention_mask=mask)\n end_loss = (end_loss1 + end_loss2 + end_loss3 + end_loss4 + end_loss5) / 5\n \n loss = start_loss + end_loss\n else:\n raise ValueException(\"except decoder in [softmax, crf]\")\n \n f1 = self.monitor_metrics(probs, targets, attention_mask=mask)[\"f1\"]\n metric[\"f1\"] = f1\n \n if self.log_loss:\n loss = torch.log(loss)\n \n return {\n \"preds\": probs,\n \"logits\": logits,\n \"loss\": loss,\n \"metric\": metric\n }\n\n\n def loss(self, outputs, targets, attention_mask):\n outputs = outputs.contiguous()\n targets = targets.contiguous()\n active_loss = attention_mask.view(-1) == 1\n active_logits = outputs.view(-1, self.num_labels if self.decoder not in [\"span\", \"biaffine\"] else span_num_labels)\n true_labels = targets.view(-1)\n outputs = active_logits.argmax(dim=-1)\n idxs = np.where(active_loss.cpu().numpy() == 1)[0]\n active_logits = active_logits[idxs]\n true_labels = true_labels[idxs].to(torch.long)\n\n loss = self.loss_layer(active_logits, true_labels)\n return loss\n\n def monitor_metrics(self, outputs, targets, attention_mask):\n active_loss = (attention_mask.view(-1) == 1).cpu().numpy()\n\n idxs = np.where(active_loss == 1)[0]\n \n true_labels = targets.view(-1).cpu().numpy()\n \n \n if self.decoder in [\"softmax\", \"span\", \"biaffine\"]:\n active_logits = outputs.view(-1, self.num_labels)\n outputs = active_logits.argmax(dim=-1).cpu().numpy()[idxs]\n elif self.decoder == \"crf\":\n outputs = torch.Tensor([output + [0] * (self.max_len - len(output)) for output in outputs])\n outputs = outputs.view(-1).cpu().numpy()[idxs]\n else:\n raise ValueException(\"except decoder in [softmax, crf]\")\n \n f1_score = metrics.f1_score(true_labels[idxs], outputs, average=\"macro\")\n return {\"f1\": f1_score}\n\n\ndef set_diff_lr(\n model,\n transformer_learning_rate=1e-5,\n other_learning_rate=1e-3\n ):\n param_optimizer = list(model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\"]\n\n transformer_param_optimizer = []\n crf_param_optimizer = []\n other_param_optimizer = []\n\n for name, para in param_optimizer:\n space = name.split('.')\n if space[0] == 'transformer':\n transformer_param_optimizer.append((name, para))\n elif space[0] == 'crf':\n crf_param_optimizer.append((name, para))\n else:\n other_param_optimizer.append((name, para))\n\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in transformer_param_optimizer if not any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.01, 'lr': transformer_learning_rate},\n {\"params\": [p for n, p in transformer_param_optimizer if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0, 'lr': transformer_learning_rate},\n # crf模块,差分学习率\n {\"params\": [p for n, p in crf_param_optimizer if not any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.01, 'lr': 0.01},\n {\"params\": [p for n, p in crf_param_optimizer if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0, 'lr': 0.01},\n\n # 其他模块,差分学习率\n {\"params\": [p for n, p in other_param_optimizer if not any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.01, 'lr': other_learning_rate},\n {\"params\": [p for n, p in other_param_optimizer if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0, 'lr': other_learning_rate},\n ]\n\n return optimizer_grouped_parameters\n\ndef process_output(output):\n for key, val in output.items():\n if isinstance(val, torch.Tensor):\n output[key] = val.cpu().detach().numpy()\n return output\n\n\ndef set_log(log_file):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n \n logfile = log_file\n fh = logging.FileHandler(logfile, mode=\"a\")\n fh.setLevel(logging.INFO)\n \n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n \n logger.addHandler(fh)\n logger.addHandler(ch)\n\nif __name__ == \"__main__\":\n NUM_JOBS = 14\n args = parse_args()\n seed_everything(43)\n set_log(args.log)\n tb_writer = SummaryWriter(log_dir=args.tb_log, flush_secs=10)\n os.makedirs(args.output, exist_ok=True)\n\n df = pd.read_csv(os.path.join(args.input, \"train_folds.csv\"))\n\n train_df = df[df[\"kfold\"] != args.fold].reset_index(drop=True)\n valid_df = df[df[\"kfold\"] == args.fold].reset_index(drop=True)\n if args.model == \"microsoft/deberta-v3-large\":\n tokenizer = DebertaV2TokenizerFast.from_pretrained(args.model)\n else:\n tokenizer = AutoTokenizer.from_pretrained(args.model)\n training_samples = prepare_training_data(train_df, tokenizer, args, num_jobs=NUM_JOBS)\n valid_samples = prepare_training_data(valid_df, tokenizer, args, num_jobs=NUM_JOBS)\n\n train_dataset = FeedbackDataset(training_samples, args.max_len, tokenizer, use4span=args.decoder == \"span\", use4biaf=args.decoder == \"biaffine\")\n valid_dataset = FeedbackDatasetValid(valid_samples, 4096, tokenizer)\n collate = Collate(tokenizer)\n valid_dataloader = DataLoader(valid_dataset, batch_size=8, num_workers=14, collate_fn=collate)\n \n \n num_labels = len(target_id_map) - 1\n span_num_labels = int((len(target_id_map) - 2) // 2) + 1\n model = FeedbackModel(\n max_len=args.max_len,\n model_name=args.model,\n dynamic_merge_layers=args.dynamic_merge_layers,\n merge_layers_num=args.merge_layers_num,\n num_labels=num_labels,\n span_num_labels=span_num_labels,\n loss=args.loss,\n sce_alpha=args.sce_alpha,\n sce_beta=args.sce_beta,\n label_smooth=args.label_smooth,\n decoder=args.decoder,\n )\n\n best_score = None\n counter = 0\n\n params = set_diff_lr(model, transformer_learning_rate=args.trans_lr, other_learning_rate=args.other_lr,)\n \n model_engine, _, training_dataloader, _ = deepspeed.initialize(args=args,\n model=model,\n model_parameters=params,\n training_data=train_dataset\n )\n\n start_epoch = 0\n if args.ckpt:\n _, client_sd = model_engine.load_checkpoint(args.ckpt, args.ckpt_id)\n start_epoch = client_sd[\"epoch\"]\n\n steps_per_epoch = len(training_dataloader)\n for ep in range(args.epochs - start_epoch):\n ep += start_epoch\n tk0 = tqdm(training_dataloader, total=steps_per_epoch)\n \n losses = AverageMeter()\n monitor = AverageMeter()\n model_engine.train()\n for step, batch in tk0:\n #forward() method\n\n output = model_engine(batch)\n\n loss = output[\"loss\"]\n metric = output[\"metric\"]\n losses.update(loss.item(), training_dataloader.batch_size)\n monitor.update(metric, training_dataloader.batch_size)\n tb_writer.add_scalar(\"cur_f1\", metric, step + ep * steps_per_epoch)\n tb_writer.add_scalar(\"avg_f1\", monitor.avg, step + ep * steps_per_epoch)\n #runs backpropagation\n model_engine.backward(loss)\n #weight update\n model_engine.step()\n \n tk0.set_postfix(loss=losses.avg, stage=\"train\", f1=monitor.avg)\n tk0.close()\n\n client_sd['epoch'] = ep\n ckpt_id = losses.avg\n model_engine.save_checkpoint(args.output, ckpt_id, client_sd = client_sd)\n\n model_engine.eval()\n\n tk1 = tqdm(valid_dataloader, total=len(valid_dataloader))\n preds_iter = []\n for _, batch in tk1:\n output = model_engine(batch)\n preds_iter.append(process_output(output))\n tk0.set_postfix(stage=\"test\")\n tk1.close()\n\n final_preds = []\n final_scores = []\n for output in preds_iter:\n if args.direct_output:\n pred_class = output[\"preds\"]\n pred_scrs = [[1] * len(_) for _ in pred_class]\n else:\n pred_class = np.argmax(output[\"preds\"], axis=2)\n pred_scrs = np.max(output[\"preds\"], axis=2)\n \n for pred, pred_scr in zip(pred_class, pred_scrs):\n final_preds.append(pred if isinstance(pred, list) else pred.tolist())\n final_scores.append(pred_scr if isinstance(pred_scr, list) else pred_scr.tolist())\n\n for j in range(len(valid_samples)):\n tt = [id_target_map[p] for p in final_preds[j][1:]]\n tt_score = final_scores[j][1:]\n valid_samples[j][\"preds\"] = tt\n valid_samples[j][\"pred_scores\"] = tt_score\n\n submission = []\n min_thresh = {\n \"Lead\": 9,\n \"Position\": 5,\n \"Evidence\": 14,\n \"Claim\": 3,\n \"Concluding Statement\": 11,\n \"Counterclaim\": 6,\n \"Rebuttal\": 4,\n }\n proba_thresh = {\n \"Lead\": 0.7,\n \"Position\": 0.55,\n \"Evidence\": 0.65,\n \"Claim\": 0.55,\n \"Concluding Statement\": 0.7,\n \"Counterclaim\": 0.5,\n \"Rebuttal\": 0.55,\n }\n\n for _, sample in enumerate(valid_samples):\n preds = sample[\"preds\"]\n offset_mapping = sample[\"offset_mapping\"]\n sample_id = sample[\"id\"]\n sample_text = sample[\"text\"]\n sample_pred_scores = sample[\"pred_scores\"]\n\n # pad preds to same length as offset_mapping\n if len(preds) < len(offset_mapping):\n preds = preds + [\"O\"] * (len(offset_mapping) - len(preds))\n sample_pred_scores = sample_pred_scores + [0] * (len(offset_mapping) - len(sample_pred_scores))\n\n idx = 0\n phrase_preds = []\n while idx < len(offset_mapping):\n start, _ = offset_mapping[idx]\n if preds[idx] != \"O\":\n label = preds[idx][2:]\n else:\n label = \"O\"\n phrase_scores = []\n phrase_scores.append(sample_pred_scores[idx])\n idx += 1\n while idx < len(offset_mapping):\n if label == \"O\":\n matching_label = \"O\"\n else:\n matching_label = f\"I-{label}\"\n if preds[idx] == matching_label:\n _, end = offset_mapping[idx]\n phrase_scores.append(sample_pred_scores[idx])\n idx += 1\n else:\n break\n if \"end\" in locals():\n phrase = sample_text[start:end]\n phrase_preds.append((phrase, start, end, label, phrase_scores))\n\n temp_df = []\n for phrase_idx, (phrase, start, end, label, phrase_scores) in enumerate(phrase_preds):\n word_start = len(sample_text[:start].split())\n word_end = word_start + len(sample_text[start:end].split())\n word_end = min(word_end, len(sample_text.split()))\n ps = \" \".join([str(x) for x in range(word_start, word_end)])\n if label != \"O\":\n if sum(phrase_scores) / len(phrase_scores) >= proba_thresh[label]:\n temp_df.append((sample_id, label, ps))\n\n temp_df = pd.DataFrame(temp_df, columns=[\"id\", \"class\", \"predictionstring\"])\n\n submission.append(temp_df)\n\n submission = pd.concat(submission).reset_index(drop=True)\n submission[\"len\"] = submission.predictionstring.apply(lambda x: len(x.split()))\n\n def threshold(df):\n df = df.copy()\n for key, value in min_thresh.items():\n index = df.loc[df[\"class\"] == key].query(f\"len<{value}\").index\n df.drop(index, inplace=True)\n return df\n\n submission = threshold(submission)\n\n # drop len\n submission = submission.drop(columns=[\"len\"])\n\n scr = score_feedback_comp(submission, valid_df, return_class_scores=True)\n logging.info(f\"epoch {ep} total:{scr}\")\n\n epoch_score = scr[0]\n \n score = np.copy(epoch_score)\n\n if best_score is None:\n best_score = score\n best_epoch = ep\n elif score < best_score + 0.0005:\n counter += 1\n logging.info(f\"epoch {ep} EarlyStopping counter: {counter} out of {5}\")\n if counter >= 5:\n break\n else:\n best_score = score\n best_epoch = epoch\n counter = 0","repo_name":"Jeamee/model_train","sub_path":"src/train_dp.py","file_name":"train_dp.py","file_ext":"py","file_size_in_byte":30065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6264671009","text":"import argparse\nimport sys\nimport os\nimport logging as log\n\nimport cv2\nimport numpy as np\n\nfrom openvino.inference_engine import IENetwork, IECore\n\n# -----------------------------------------------------------------------------\n# ----------------- Configure logging -----------------------------------------\n# -----------------------------------------------------------------------------\nlog.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)\n\n# -----------------------------------------------------------------------------\n# ----------------- Parse arguments -------------------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Creating the argument parser...')\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('-i', '--ifile', type=str, required=True,\n help='Required. Filename of the image to load and classify')\nparser.add_argument('-m', '--model', type=str, required=True,\n help='Required. Path to the model to use for classification. Should end in .xml')\nparser.add_argument('-o', '--ofile', type=str, required=False,\n help='Optional. Filename to write the annotated image to', default=None)\nparser.add_argument('-l', '--labels', type=str, required=False,\n help='Optional. Filename of the class id to label mappings', default=None)\nparser.add_argument('-d', '--device', type=str, required=False,\n help='Optional. Specify the target device to infer on: CPU, GPU, MYRIAD or HETERO.', default='CPU')\nparser.add_argument('-x', '--extension', type=str, required=False,\n help='Optional. Extension for custom layers.', default=None)\n\nargs = parser.parse_args()\nargs = vars(args)\n\n# -----------------------------------------------------------------------------\n# ----------------- Load the model and create the inference engine ------------\n# -----------------------------------------------------------------------------\nlog.info(f'Loading model')\n\nmodel_xml = args['model']\nmodel_bin = os.path.splitext(model_xml)[0] + '.bin'\n\nlog.info(f'... model file {model_xml}')\nlog.info(f'... weights file {model_bin}')\n\n# -----------------------------------------------------------------------------\n# ----------------- Create inference engine -----------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Creating inference engine')\nie = IECore()\nnet = IENetwork(model=model_xml, weights=model_bin)\n\nif args['extension'] and 'CPU' in args['device']:\n ie.add_extension(args['extension'], 'CPU')\n\nlog.info('...Checking that the network can be run on the selected device')\nsupported_layers = ie.query_network(net, args['device'])\nnot_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]\n\nif len(not_supported_layers) != 0:\n log.error('...The following layers are not supported by the device.\\n {}'.format(', '.join(not_supported_layers)))\n\n\nlog.info('...Checking that the network has a single input and output')\nassert len(net.inputs.keys()) == 1, 'The application supports single input topologies.'\nassert len(net.outputs) == 1, 'The application supports single output topologies'\n\nlog.info('...Loading the model')\nexec_net = ie.load_network(network=net, device_name=args['device'])\n\n# -----------------------------------------------------------------------------\n# ----------------- Input layer preparation -----------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Getting input information')\ninput_blob = next(iter(net.inputs))\nnet.batch_size = 1\ninput_name = ''\ninput_info_name = ''\n\nfor input_key in net.inputs:\n if len(net.inputs[input_key].layout) == 4:\n input_name = input_key\n net.inputs[input_key].precision = 'U8'\n elif len(net.inputs[input_key].layout) == 2:\n input_info_name = input_key\n net.inputs[input_key].precision = 'FP32'\n if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or net.inputs[input_key].shape[\n 0] != 1:\n log.error('Invalid input info. Should be 3 or 6 values length.')\n\nn, c, h, w = net.inputs[input_blob].shape\n\n# -----------------------------------------------------------------------------\n# ----------------- Output layer preparation ------------------------------------\n# -----------------------------------------------------------------------------\nout_blob = next(iter(net.outputs))\noutput_name = ''\noutput_info = net.outputs[next(iter(net.outputs.keys()))]\n\nfor output_key in net.outputs:\n if net.layers[output_key].type == 'DetectionOutput':\n output_name, output_info = output_key, net.outputs[output_key]\n\nif output_name == '':\n log.error('Can not find a DetectionOutput layer in the topology')\n\noutput_dims = output_info.shape\nif len(output_dims) != 4:\n log.error('Incorrect output dimensions for SSD model')\nmax_proposal_count, object_size = output_dims[2], output_dims[3]\n\nif object_size != 7:\n log.error('Output item should have 7 as a last dimension')\n\noutput_info.precision = 'FP32'\n\n# -----------------------------------------------------------------------------\n# ----------------- Load image ------------------------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Loading image')\nifile = args['ifile']\nimages = np.ndarray(shape=(n, c, h, w))\nimages_hw = []\n\nimage = cv2.imread(ifile)\nih, iw = image.shape[:-1]\nimages_hw.append((ih, iw))\n\nif image.shape[:-1] != (h, w):\n log.info(f'Image {ifile} has been resized from {image.shape[:-1]} to {(h, w)}')\n image = cv2.resize(image, (w, h))\n\nimage = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\nimages[0] = image\n\n# -----------------------------------------------------------------------------\n# ----------------- Run inference --------------------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Starting inference in synchronous mode')\nres = exec_net.infer(inputs={input_blob: images})\n\n# -----------------------------------------------------------------------------\n# ----------------- Get results ----------------------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Processing the output blob')\nres = res[out_blob]\n\n# -----------------------------------------------------------------------------\n# ----------------- Process results ------------------------------------------\n# -----------------------------------------------------------------------------\nlog.info('Processing detected objects')\nboxes = {}\nclasses = {}\ndata = res[0][0]\n\nfor number, proposal in enumerate(data):\n if proposal[2] > 0:\n imid = np.int(proposal[0])\n ih, iw = images_hw[imid]\n label = np.int(proposal[1])\n confidence = proposal[2]\n xmin = np.int(iw * proposal[3])\n ymin = np.int(ih * proposal[4])\n xmax = np.int(iw * proposal[5])\n ymax = np.int(ih * proposal[6])\n print(f'[{number}, {label}] element, prob = {confidence:.6} ({xmin}, {ymin})-({xmax}, {ymax}) batch id : {imid}', end=\"\\n\")\n if confidence > 0.5:\n if not imid in boxes.keys():\n boxes[imid] = []\n boxes[imid].append([xmin, ymin, xmax, ymax])\n if not imid in classes.keys():\n classes[imid] = []\n classes[imid].append(label)\n\nif args['labels']:\n with open(args['labels'], 'r') as f:\n labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]\nelse:\n labels_map = None\n\nimage = cv2.imread(ifile)\nfor imid in classes.keys():\n for idx, box in enumerate(boxes[imid]):\n class_id = classes[imid][idx] - 1\n label = labels_map[class_id] if labels_map else class_id\n image = cv2.putText(image, f'{label}', (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)\n image = cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (232, 35, 244), 2)\n cv2.imwrite('out.jpeg', image)\n\ncv2.imshow('OV Detection', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n# -----------------------------------------------------------------------------\n# ----------------- All done --------------------------------------------------\n# -----------------------------------------------------------------------------\nsys.exit(0)","repo_name":"tobymcclean/edge-inference-intro","sub_path":"openvino/ov-detection.py","file_name":"ov-detection.py","file_ext":"py","file_size_in_byte":8583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"34030571206","text":"import json\nfrom difflib import get_close_matches\n\ndata = json.load(open(\"data.json\"))\n\ndef definition(word):\n word = word.lower()\n if word in data:\n return data[word]\n elif word.title() in data:\n return data[word.title()]\n elif word.upper() in data: #in case user enters words like USA or NATO\n return data[word.upper()]\n elif len(get_close_matches(word, data.keys())) > 0:\n closer = input(\"Did you mean %s instead? Write Y if yes, else N: \" % get_close_matches(word, data.keys())[0])\n if closer == \"Y\" or closer == \"y\":\n return data[get_close_matches(word, data.keys())[0]]\n elif closer ==\"N\" or closer == \"n\":\n return \"The word does not exist. Please double check it.\"\n else: \n return \"-_- WE didn't understand your query.\"\n else:\n return \"The word does not exist. Please double check it.\"\n\nwhile True:\n word = input(\"Enter a word: \")\n if word == \"q\":\n break\n output = definition(word)\n\n k = 0\n if type(output) == list:\n for item in output:\n k += 1\n print(\"\\n\" + f\"{k}. \" + item)\n \n print(\"\\n To quite please enter 'q' \")\n else: \n print(output)\n ","repo_name":"Fakhrillo/Python_dictionary","sub_path":"definition.py","file_name":"definition.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"38601838850","text":"from sensor_msgs.msg import LaserScan\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import Pose, Point\n\nfrom tf.transformations import euler_from_quaternion\nfrom math import sqrt, atan2\n\n\nclass ScanPreProcessing():\n def __init__(self, sample_size = 400, max_range = 20, padding_size=50):\n self.sample_size = sample_size\n self.max_range = max_range\n self.padding_size = padding_size\n\n def downsample(self, scan:LaserScan):\n \"\"\"\n downsample\n \"\"\"\n\n if self.sample_size == -1:\n return scan.ranges\n \n increment = len(scan.ranges)/self.sample_size\n idx = increment/2\n samples = []\n while len(samples) int:\n ans = 0\n while head:\n ans = ans*10 + head.val\n head = head.next\n return int(str(ans), 2)\n\nif __name__ == \"__main__\":\n so = Solution()\n print(so.getDecimalValue( head = [1,0,1]))","repo_name":"JackWang0107/leetcode","sub_path":"python/easy/1290_Convert_Binary_Number_in_a_Linked_List_to_Integer.py","file_name":"1290_Convert_Binary_Number_in_a_Linked_List_to_Integer.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"35455127954","text":"import cv2\nimport numpy as np\n\nimgPath = '2im.jpg'\n\ndef localStd(imgPath):\n img = cv2.imread(imgPath, True)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img = img / 255.0\n\n # c = imfilter(I,h,'symmetric');\n h = np.ones((3,3))\n n = h.sum()\n n1 = n - 1\n c1 = cv2.filter2D(img**2, -1, h/n1, borderType=cv2.BORDER_REFLECT)\n c2 = cv2.filter2D(img, -1, h, borderType=cv2.BORDER_REFLECT)**2 / (n*n1)\n J = np.sqrt( np.maximum(c1-c2,0) )\n\n cv2.imshow('stdfilt', J)\n cv2.waitKey(0)\n cv2.destroyWindow('stdfilt') \n return J\n\nlocalStd(imgPath)","repo_name":"FlechitUp/Top_Graphic","sub_path":"TrabFim/XOtsu/std.py","file_name":"std.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23506710114","text":"import time\r\nimport requests\r\nimport re\r\n\r\n#UA\r\nhead = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.3282.204 Safari/537.36'}\r\n#正则表达式\r\npat1 = re.compile('_content \"> '+'(.*?)'+'
',re.S)\r\n#因为有三页所以循环三次\r\nfor page_number in range(1,4):\r\n page_url = 'https://tieba.baidu.com/p/5329292142?see_lz=1&pn='\r\n #加上页码\r\n page_url += str(page_number)\r\n #访问网页源码\r\n page_data = requests.get(page_url,headers=head)\r\n time.sleep(2)\r\n page_data = page_data.text\r\n #匹配正则,将内容提取出来\r\n texts = pat1.findall(page_data)\r\n #遍历返回的列表\r\n for text in texts:\r\n #将标签换成换行符\r\n text_new = text.replace('
','\\n')\r\n #以追加模式写入文件\r\n with open('novel.txt','a',encoding='utf-8')as f:\r\n f.write(text_new)","repo_name":"Ligeest/my_python","sub_path":"tieba.py","file_name":"tieba.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"16218564089","text":"#!/usr/bin/python -u\n\nfrom functools import partial\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\nimport gobject\nimport urllib2\n\nip_address = \"192.168.1.9\"\n\ndef track(conn, value):\n if value[\"Value\"] < 24.5:\n response = urllib2.urlopen('http://%s/cm?cmnd=Power2%%20ON' % ip_address)\n\ndef main():\n DBusGMainLoop(set_as_default=True)\n conn = dbus.SystemBus()\n\n conn.add_signal_receiver(partial(track, conn),\n dbus_interface='com.victronenergy.BusItem',\n signal_name='PropertiesChanged',\n path=\"/Dc/Battery/Voltage\",\n bus_name=\"com.victronenergy.system\")\n\n gobject.MainLoop().run()\n\nif __name__ == \"__main__\":\n main()","repo_name":"osaether/code-snippets","sub_path":"victronenergy/sonoff/sonoff1.py","file_name":"sonoff1.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"13960968143","text":"print('\\nПередача списка\\n')\n\n# Часто при вызове функции удобно передать список — имен, чисел или более\n# сложных объектов (например, словарей). При передаче списка функция получает\n# прямой доступ ко всему его содержимому.\n\n#пример на программе, приветствующая всех людей из списка:\n\ndef greet_users_1(names):\n \"\"\"Вывод простого сообщения для каждого пользователя в списке\"\"\"\n for name in names:\n msg = 'Hello, ' + name.title() + '!'\n print(msg)\n\nusernames = ['sanya','daniil','artem','roma']\ngreet_users_1(usernames)","repo_name":"daniileontev/Python_education","sub_path":"peredacha_spiska_v_funkciu.py","file_name":"peredacha_spiska_v_funkciu.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"72341850556","text":"import healpy as hp\nimport numpy as np\nimport astropy.io.fits as fits\nfrom astropy.io.fits import getdata\nimport glob\n\ndef make_ftp_ipix(infile, nside_out, nest=True):\n '''\n '''\n data = getdata(infile)\n ra = data['ra']\n dec = data['dec']\n hpx = hp.ang2pix(nside_out, ra, dec, nest=nest, lonlat=True)\n HPX_UN = np.unique(hpx)\n SIGNAL = np.ones(len(HPX_UN))\n \n col0 = fits.Column(name=\"HP_PIXEL_NEST_4096\", format=\"J\", array=HPX_UN)\n col1 = fits.Column(name=\"SIGNAL\", format=\"E\", array=SIGNAL)\n\n print(infile.split('/')[-1])\n cols = fits.ColDefs([col0, col1])\n tbhdu = fits.BinTableHDU.from_columns(cols)\n tbhdu.writeto('./' + infile.split('/')[-1], overwrite=False)\n\nDP0_files = glob.glob('/lustre/t1/cl/lsst/dp0_skinny/DP0/DP0_FULL/healpix/32/*.fits')\n\nfor i in DP0_files:\n make_ftp_ipix(i, 4096)\n","repo_name":"linea-it/ga_sim","sub_path":"surveys/lsst/DP0_ftp/make_ftp_DP0.py","file_name":"make_ftp_DP0.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39457452475","text":"import copy\nimport torch as th\nfrom torch.optim import Adam\nimport numpy as np\nimport random as rd\n\nimport os\n\nfrom Controller.RL.NN import NN\n\nclass QLearner:\n \"\"\"\n 1. DQN- RNNAgent\n 2. train\n \"\"\"\n def __init__(self,param_set, writer, name):\n self.obs_shape = param_set['obs_shape']\n self.gamma = param_set['gamma']\n self.learning_rate = param_set['learning_rate']\n self.grad_norm_clip = param_set['grad_norm_clip']\n\n self.ob_style = param_set['ob_style']\n self.Q = NN['MF-RNN'](param_set)\n\n self.params = self.Q.parameters()\n self.target_Q = copy.deepcopy(self.Q)\n\n if param_set['cuda']:\n self.Q.cuda()\n self.target_Q.cuda()\n\n self.optimiser = Adam(params=self.params, lr=self.learning_rate)\n self.train_step = 0\n self.last_update_step = 0\n self.update_frequncy = param_set['target_update_interval']\n self.writer = writer\n self.name = name\n\n def update(self):\n self.target_Q.load_state_dict(self.Q.state_dict())\n\n def approximate_Q(self, batch):\n hidden_states = self.Q.init_hidden()\n for t in range(len(batch['obs'])):\n q, hidden_states = self.Q(obs=batch['obs'][t:t+1], action_prob=batch['lma'][t:t+1], hidden_state=hidden_states)\n return q\n\n\n\n def train(self, batch, episode):\n\n self.train_step += 1\n\n hidden_states = self.Q.init_hidden().unsqueeze(0).expand(batch['bs'], -1)\n q_batch = []\n for t, done in enumerate(batch['done'][0]):\n q, hidden_states = self.Q(obs=batch['obs'][:, t], action_prob=batch['lma'][:, t], hidden_state=hidden_states)\n q_batch.append(q)\n if done:\n hidden_states = self.Q.init_hidden().unsqueeze(0).expand(batch['bs'], -1)\n q_batch = th.stack(q_batch, dim=1)\n\n next_hidden_states = self.target_Q.init_hidden().unsqueeze(0).expand(batch['bs'], -1)\n next_q_batch = []\n _, next_hidden_states = self.target_Q(obs=batch['obs'][:,0], action_prob=batch['lma'][:,0], hidden_state=next_hidden_states)\n for t, done in enumerate(batch['done'][0]):\n q, next_hidden_states = self.target_Q(obs=batch['obs'][:, t], action_prob=batch['lma'][:, t], hidden_state=next_hidden_states)\n next_q_batch.append(q)\n if done:\n if t == len(batch['done'][0]) -1:\n break\n next_hidden_states = self.target_Q.init_hidden().unsqueeze(0).expand(batch['bs'], -1)\n _, next_hidden_states = self.target_Q(obs=batch['obs'][:, t+1], action_prob=batch['lma'][:,t+1],\n hidden_state=next_hidden_states)\n next_q_batch = th.stack(next_q_batch, dim=1)\n\n chosen_action_qvals = th.gather(q, dim=2, index=batch['action'].unsqueeze(-1)).squeeze(-1)\n\n\n next_q_batch[batch['next_avail_action'] == 0] = -9999\n next_max_q, _ = next_q_batch.max(dim=1)\n\n targets = (batch['reward'] + self.gamma * (1 - batch['done']) * next_max_q).detach()\n loss = ((chosen_action_qvals - targets) ** 2).sum()\n\n self.writer.add_scalar('Loss/TD_loss_'+self.name, loss.item(), episode)\n\n # Optimise\n self.optimiser.zero_grad()\n loss.backward()\n grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.grad_norm_clip)\n self.optimiser.step()\n\n # if (loss) < 0.25 and (self.train_step - self.last_update_step)/self.update_frequncy >= 1.0:\n if (self.train_step - self.last_update_step)/self.update_frequncy >= 1.0:\n self.update()\n self.last_update_step = self.train_step\n\n def save_model(self, path):\n if not os.path.exists(path):\n os.makedirs(path)\n th.save(self.Q.state_dict(), path + 'Q' +'.pth')\n\n\n def load_model(self, path):\n file = path + 'Q.pth'\n if not os.path.isfile(file):\n print(\"here have not such model\")\n return\n self.Q.load_state_dict(th.load(file, map_location=th.device('cpu')))\n self.target_Q.load_state_dict(self.Q.state_dict())\n print('sucess load the model in ', file)\n return\n\n","repo_name":"ThousandOfWind/langtong","sub_path":"DEMO/Controller/RL/MF_learner_RNN.py","file_name":"MF_learner_RNN.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"17054212491","text":"from django import forms\nfrom django.contrib.auth.forms import AuthenticationForm, UsernameField\nfrom django.contrib.auth.models import User\n\nfrom .models import Application, Company, Specialty, Vacancy\n\n\nclass UserAuthenticationForm(AuthenticationForm):\n username = UsernameField(\n label='Логин',\n widget=forms.TextInput(\n attrs={\n 'autofocus': True,\n 'class': 'form-control',\n 'id': 'inputLogin'}\n )\n )\n password = forms.CharField(\n label=\"Пароль\",\n widget=forms.PasswordInput(\n attrs={\n 'autofocus': True,\n 'class': 'form-control',\n 'id': 'inputPassword'}\n )\n )\n\n\nclass UserCompanyEditForm(forms.ModelForm):\n name = forms.CharField(\n max_length=64,\n label='Название компании',\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'id': 'companyName',\n 'type': 'text',\n }\n )\n )\n location = forms.CharField(\n max_length=64,\n label='География',\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'id': 'companyLocation',\n 'type': 'text'\n }\n )\n )\n logo = forms.FileField(\n label='Логотип',\n widget=forms.FileInput(\n attrs={\n 'class': 'custom-file-input',\n 'id': 'inputGroupFile01',\n\n }\n )\n )\n description = forms.CharField(\n max_length=200,\n label='Информация о компании',\n widget=forms.Textarea(\n attrs={\n 'class': 'form-control',\n 'id': 'companyInfo',\n 'rows': '4',\n 'style': 'color:#000;'\n }\n )\n )\n employee_count = forms.IntegerField(\n label='Количество человек в компании',\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'id': 'companyTeam',\n }\n )\n )\n\n class Meta:\n model = Company\n fields = ('name', 'location', 'logo', 'description', 'employee_count',)\n\n\nclass UserCompanyVacancyEditForm(forms.ModelForm):\n\n title = forms.CharField(\n max_length=64,\n label='Название вакансии',\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'id': 'vacancyTitle',\n 'type': 'text',\n }\n )\n )\n specialty = forms.ChoiceField(\n label='Специализация',\n widget=forms.Select(\n attrs={\n 'class': 'custom-select mr-sm-2',\n 'id': 'userSpecialization'\n }\n ),\n choices=((str(Specialty), str(Specialty)) for Specialty in Specialty.objects.all())\n )\n\n salary_min = forms.IntegerField(\n label='Зарплата от',\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'id': 'vacancySalaryMin',\n }\n )\n )\n\n salary_max = forms.IntegerField(\n label='Зарплата до',\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'id': 'vacancySalaryMax',\n }\n )\n )\n\n skills = forms.CharField(\n max_length=200,\n label='Требуемые навыки',\n widget=forms.Textarea(\n attrs={\n 'rows': '3',\n 'class': 'form-control',\n 'id': 'vacancySkills',\n 'style': \"color:#000;\"\n }\n )\n )\n\n description = forms.CharField(\n max_length=200,\n label='Описание',\n widget=forms.Textarea(\n attrs={\n 'rows': '13',\n 'class': 'form-control',\n 'id': 'vacancyDescription',\n 'style': \"color:#000;\"\n }\n )\n )\n\n class Meta:\n model = Vacancy\n fields = ('title', 'salary_min', 'salary_max', 'skills', 'description', )\n\n\nclass UserRegisterForm(forms.ModelForm):\n username = forms.CharField(\n label='Логин',\n widget=forms.TextInput(\n attrs={\n 'autofocus': True,\n 'class': 'form-control',\n 'id': 'inputLogin'}\n )\n )\n first_name = forms.CharField(\n label='Имя',\n widget=forms.TextInput(\n attrs={\n 'autofocus': True,\n 'class': 'form-control',\n 'id': 'inputName'}\n )\n )\n last_name = forms.CharField(\n label='Фамилия',\n widget=forms.TextInput(\n attrs={\n 'autofocus': True,\n 'class': 'form-control',\n 'id': 'inputSurname'}\n )\n )\n password = forms.CharField(\n label=\"Пароль\",\n widget=forms.PasswordInput(\n attrs={\n 'autofocus': True,\n 'class': 'form-control',\n 'id': 'inputPassword'}\n )\n )\n\n class Meta:\n model = User\n fields = ('username', 'password', 'first_name', 'last_name',)\n\n\nclass UserApplicationForm(forms.ModelForm):\n written_username = forms.CharField(\n max_length=64,\n label='Вас зовут',\n widget=forms.TextInput(\n attrs={\n 'type': 'text',\n 'class': 'form-control',\n 'id': 'userName',\n 'placeholder': ''}\n )\n )\n written_phone = forms.CharField(\n max_length=64,\n label='Ваш телефон',\n widget=forms.TextInput(\n attrs={\n 'type': 'tel',\n 'class': 'form-control',\n 'id': 'userPhone',\n 'placeholder': ''}\n )\n )\n written_cover_letter = forms.CharField(\n max_length=500,\n label='Сопроводительное письмо',\n widget=forms.Textarea(\n attrs={\n 'rows': '8',\n 'class': 'form-control',\n 'id': 'userMsg',\n }\n )\n )\n\n class Meta:\n model = Application\n fields = ('written_username', 'written_phone', 'written_cover_letter',)\n","repo_name":"EnikeevAI/Stepik_Django_vacancies_project","sub_path":"vacancies/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73262708477","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom scipy.io import loadmat\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport pandas as pd\nimport prince\nimport triedacp\nimport triedtools\n\n\ndef centre_and_reduce(X):\n '''\n Centres an array by subtracting mean of column from each value\n :param X: X\n :type X: nparray\n :return: centre\n :rtype: nparray\n '''\n mean = np.mean(X, 0)\n centered = X - mean\n\n stddev = np.std(centered, 0)\n # centered_reduced = centered[:, None] / stddev\n centered_reduced = centered[:, :] / stddev\n return centered_reduced\n\n\n# ************************\n# load the data\n\nmat_dict = loadmat('/Users/carl/Dropbox/Docs/Python/PyCharm/TRIED_RNRF_GIT/TRIED_TP4/clim_t2C_J1982D2010.mat')\nclim_t2 = mat_dict['clim_t2']\n\n# find out what type is in the dict\nif isinstance(clim_t2, list):\n print('list')\nelif isinstance(clim_t2, np.ndarray):\n print('ndarray')\nelse:\n print('something else')\n\nprint(np.shape(clim_t2))\n\n# ************************\n# Plot graph of temperature by months\n\n# set up values for graphs\nville = ['Reykjavik', 'Oslo', 'Paris', 'New York', 'Tunis', 'Alger', 'Beyrouth', 'Atlan27N40W', 'Dakar']\nvalues = range(9)\nprint(values)\n\njet = plt.get_cmap('jet')\ncNorm = colors.Normalize(vmin=0, vmax=values[-1])\nscalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\n\n# create new figure\nfig = plt.figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Temperatures dans 9 regions, de janvier 1982 à décembre 2010')\nplt.ylabel('Temperature')\nplt.xticks(clim_t2[::12, 0], rotation=45)\nplt.xlabel('Années')\n\nfor i in values:\n colorVal = scalarMap.to_rgba(values[i])\n plt.plot(clim_t2[:, 0] + (clim_t2[:, 1] / 12), clim_t2[:, i+2], color=colorVal, label=ville[i])\n\nplt.legend()\nplt.show()\n\nfig.savefig('temp_by_month.png')\n\n# ************************\n# show graph of mean temperatures and std devs for each location (calculated over the entire time range)\n\nmean_temps = np.mean(clim_t2[:, 2::], 0)\nstd_temps = np.std(clim_t2[:, 2::], 0)\n\n# set up values for graphs\nville = ['Reykjavik', 'Oslo', 'Paris', 'New York', 'Tunis', 'Alger', 'Beyrouth', 'Atlan27N40W', 'Dakar']\nvalues = range(9)\n\n# create new figure\nfig = plt.figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Temperatures moyennes dans 9 regions, de janvier 1982 à décembre 2010')\nplt.ylabel('Temperature')\nplt.xticks(values, ville[:], rotation=45)\nplt.xlabel('Années')\n\nplt.scatter(values, mean_temps, color='b', label='Mean')\nplt.scatter(values, mean_temps - std_temps, color='g', label='-1 Std dev', marker='v')\nplt.scatter(values, mean_temps + std_temps, color='g', label='+1 Std dev', marker='^')\n\nplt.legend()\nplt.show()\n\nfig.savefig('mean_temp_entire_range.png')\n\n# ************************\n# show graph of mean temperatures and std devs for each location (calculated over the entire time range)\n\nclim_t2_centered = centre_and_reduce(clim_t2[:, 2:])\n\n# ************************\n# plot correlation circle with labels\n\ndf = pd.DataFrame(data=clim_t2_centered)\ndf.columns = ville\n# variables are the 9 locations (use -1 to mean all)\npca = prince.PCA(df, n_components=-1)\nfig1, ax1 = pca.plot_correlation_circle(axes=(0, 1), show_labels=True)\nfig1.show()\nfig1.savefig('correlation_circle.png')\n\nfig1, ax1 = pca.plot_cumulative_inertia()\nfig1.show()\nfig1.savefig('inertia_cumulative.png')\n\nfig1, ax1 = pca.plot_inertia()\nfig1.show()\nfig1.savefig('inertia.png')\n\n# get eigenvalues & eigenvectors, and project onto principal axes\neigval, eigvec, XU = triedacp.acp(clim_t2_centered)\n\n# ************************\n# plot eigenvalues as bar chart, and cumulative inertia as line plot\n\ninertia, cumulative_inertia = triedacp.phinertie(eigval)\nfig.savefig('bar_eigen_line_cumul_inertia.png')\n\n# ************************\n# qual : Les qualités de réprésentation des individus par les axes\n# contrib: Les contributions des individus à la formation des axes\n\nqual, contrib = triedacp.qltctr2(XU, eigval)\n\n# verify that the sum of representations of each individual equals one (over each row)\nqual_sum = np.sum(qual, 1)\n# output is:\n# [ 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n\n# verify that the sum of contributions to each axis equals one (over each col)\ncontrib_sum = np.sum(contrib, 0)\n# output is:\n# [ 1. 1. 1. 1. 1.]\n\n# calc mean temps for each month across all locations\nmean_temps = np.mean(clim_t2[:, 2:], 1)\n\n# create new figure\nfig = plt.figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Scatter-plot of instances in variable space')\nplt.ylabel('PC2')\nplt.xlabel('PC1')\nplt.axhline(0, color='k')\nplt.axvline(0, color='k')\n\njet = plt.get_cmap('jet')\ncNorm = colors.Normalize(vmin=mean_temps.min(), vmax=mean_temps.max())\nscalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\ncolorVals = scalarMap.to_rgba(mean_temps)\n\nsc = plt.scatter(XU[:, 0], XU[:, 1], marker='None', c=mean_temps, cmap=jet)\n\nplt.scatter(XU[1::12, 0], XU[1::12, 1], marker='o', c=colorVals[1::12, :], label='Jan')\nplt.scatter(XU[2::12, 0], XU[2::12, 1], marker=',', c=colorVals[2::12, :], label='Feb')\nplt.scatter(XU[3::12, 0], XU[3::12, 1], marker='v', c=colorVals[3::12, :], label='Mar')\nplt.scatter(XU[4::12, 0], XU[4::12, 1], marker='8', c=colorVals[4::12, :], label='Apr')\nplt.scatter(XU[5::12, 0], XU[5::12, 1], marker='+', c=colorVals[5::12, :], label='May')\nplt.scatter(XU[6::12, 0], XU[6::12, 1], marker='D', c=colorVals[6::12, :], label='Jun')\nplt.scatter(XU[7::12, 0], XU[7::12, 1], marker='*', c=colorVals[7::12, :], label='Jul')\nplt.scatter(XU[8::12, 0], XU[8::12, 1], marker='_', c=colorVals[8::12, :], label='Aug')\nplt.scatter(XU[9::12, 0], XU[9::12, 1], marker='^', c=colorVals[9::12, :], label='Sep')\nplt.scatter(XU[10::12, 0], XU[10::12, 1], marker='x', c=colorVals[10::12, :], label='Oct')\nplt.scatter(XU[11::12, 0], XU[11::12, 1], marker='|', c=colorVals[11::12, :], label='Nov')\nplt.scatter(XU[12::12, 0], XU[12::12, 1], marker='p', c=colorVals[12::12, :], label='Dec')\n\nplt.colorbar(sc)\n\nmonths = ['Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin',\n 'Juillet', 'Aout', 'Septembre', 'Octobre', 'Novembre', 'Decembre']\n# N = np.size(XU, 0)\n# for i in range(N):\n# plt.text(XU[:, 0], XU[:, 1], months[i % 12])\n\nplt.legend()\nplt.show()\n\nfig.savefig('pca-nuage-1-2.png')\n\n\n# ************************\n# calculate mean temps for each location across each month\n\nmean_temps_monthly = []\nfor i in np.arange(12):\n mean_temps_monthly.append(np.mean(clim_t2[i::12, 2:], 0))\nmean_temps_monthly = np.array(mean_temps_monthly)\n\n\n# create new figure\nfig = plt.figure(num=None, figsize=(20, 8), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Plot of average monthly temperatures for 9 locations')\nplt.xlabel('Month')\nplt.ylabel('Average Temperature (degrees C)')\nmonths = ['Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin',\n 'Juillet', 'Aout', 'Septembre', 'Octobre', 'Novembre', 'Decembre']\nplt.xticks(np.arange(len(months)), months, rotation=25)\n\njet = plt.get_cmap('jet')\ncNorm = colors.Normalize(vmin=0, vmax=values[-1])\nscalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\n\nville = ['Reykjavik', 'Oslo', 'Paris', 'New York', 'Tunis', 'Alger', 'Beyrouth', 'Atlan27N40W', 'Dakar']\nvalues = range(len(ville))\n\nfor i in values:\n colorVal = scalarMap.to_rgba(values[i])\n plt.plot(np.arange(len(months)), mean_temps_monthly[:, i], color=colorVal, label=ville[i])\n\nplt.legend()\nplt.show()\n\nfig.savefig('average_monthly_temps.png')\n\n\n# ************************\n# Load co2 values\n\nmat_dict = loadmat('/Users/carl/Dropbox/Docs/Python/PyCharm/TRIED_RNRF_GIT/TRIED_TP4/clim_co2_J1982D2010.mat')\nclim_co2 = mat_dict['clim_co2']\n\n# find out what type is in the dict\nif isinstance(clim_co2, list):\n print('list')\nelif isinstance(clim_co2, np.ndarray):\n print('ndarray')\nelse:\n print('something else')\n\nprint(np.shape(clim_co2))\n\n# ************************\n# Calculate linear regression of co2\n\nb0, b1, s, R2, sigb0, sigb1 = triedtools.linreg(clim_co2[:, 0] + (clim_co2[:, 1] / 12), clim_co2[:, 2])\n\n# ************************\n# Subtract from each co2 value it's corresponding point on the linreg line\n\n# create list of just co2 values\nclim_co2_cor = np.array(clim_co2[:, 2])\n\n# create list of indices to use in calculation\nindices = np.arange(np.size(clim_co2_cor, 0))\n\n# increment all indices by 1, so the list starts from 1\nindices = indices + 1\n\n# divide all indices by 12, so the values represent steps in years\nindices = indices / 12\n\n# multiply all indices by b1, so the values represent the values of the linreg line\nindices = indices * b1\n\n# calculate diffs between co2 and linreg line\nclim_co2_cor = clim_co2_cor - indices\n\n# ************************\n# Plot XU scatter as before, using co2 differences as colour values\n\n# create new figure\nfig = plt.figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Scatter-plot of instances in variable space')\nplt.ylabel('PC2')\nplt.xlabel('PC1')\nplt.axhline(0, color='k')\nplt.axvline(0, color='k')\n\njet = plt.get_cmap('jet')\ncNorm = colors.Normalize(vmin=clim_co2_cor.min(), vmax=clim_co2_cor.max())\nscalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\ncolorVals = scalarMap.to_rgba(clim_co2_cor)\n\nsc = plt.scatter(XU[:, 0], XU[:, 1], marker='None', c=clim_co2_cor, cmap=jet)\n\nplt.scatter(XU[1::12, 0], XU[1::12, 1], marker='o', c=colorVals[1::12, :], label='Jan')\nplt.scatter(XU[2::12, 0], XU[2::12, 1], marker=',', c=colorVals[2::12, :], label='Feb')\nplt.scatter(XU[3::12, 0], XU[3::12, 1], marker='v', c=colorVals[3::12, :], label='Mar')\nplt.scatter(XU[4::12, 0], XU[4::12, 1], marker='8', c=colorVals[4::12, :], label='Apr')\nplt.scatter(XU[5::12, 0], XU[5::12, 1], marker='+', c=colorVals[5::12, :], label='May')\nplt.scatter(XU[6::12, 0], XU[6::12, 1], marker='D', c=colorVals[6::12, :], label='Jun')\nplt.scatter(XU[7::12, 0], XU[7::12, 1], marker='*', c=colorVals[7::12, :], label='Jul')\nplt.scatter(XU[8::12, 0], XU[8::12, 1], marker='_', c=colorVals[8::12, :], label='Aug')\nplt.scatter(XU[9::12, 0], XU[9::12, 1], marker='^', c=colorVals[9::12, :], label='Sep')\nplt.scatter(XU[10::12, 0], XU[10::12, 1], marker='x', c=colorVals[10::12, :], label='Oct')\nplt.scatter(XU[11::12, 0], XU[11::12, 1], marker='|', c=colorVals[11::12, :], label='Nov')\nplt.scatter(XU[12::12, 0], XU[12::12, 1], marker='p', c=colorVals[12::12, :], label='Dec')\n\nplt.colorbar(sc)\n\nplt.legend()\nplt.show()\n\nfig.savefig('pca-nuage-1-2-co2.png')\n","repo_name":"carl-robinson/tried-rnrf","sub_path":"TRIED_TP4/tp4.py","file_name":"tp4.py","file_ext":"py","file_size_in_byte":10502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16181197204","text":"import gzip\nimport json\nfrom tqdm import tqdm\nimport pickle\n\ndef parse(path):\n g = gzip.open(path, 'r')\n for l in tqdm(g):\n yield json.loads(l)\n\nasins_set = set()\nList_repeated = set()\nwith open('data_asin.json', 'r') as f:\n asins = json.load(f)\n for x in asins:\n if x in asins_set:\n List_repeated.add(x)\n else:\n asins_set.add(x)\n\n# print(len(asins_set))\n# print(List_repeated)\n# pickle.dump( List_repeated, open( \"list_repeated.p\", \"wb\" ) )\n# print(len(List_repeated))\n\ndata = list(parse(\"AMAZON_FASHION.json.gz\"))\nfinal_review_data = []\nfor obj in tqdm(data):\n if obj['asin'] in asins_set:\n final_review_data.append(obj)\n\nwith open('review_data.json', 'w', encoding='utf-8') as f:\n json.dump(final_review_data, f, ensure_ascii=False, indent=4)\n","repo_name":"Surya97/Surya97.github.io","sub_path":"data_analysis/asin_reviews.py","file_name":"asin_reviews.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"14572655265","text":"'''\nFind the kth smallest numbers in an unsorted integer array.\n\nHave you met this question in a real interview? Yes\nExample\nGiven [3, 4, 1, 2, 5], k = 3, the 3rd smallest numbers are [1, 2, 3].\n\nChallenge \nAn O(nlogn) algorithm is acceptable, if you can do it in O(n), that would be great.\n'''\n\nclass Solution:\n # @param {int} k an integer\n # @param {int[]} nums an integer array\n # return {int} kth smallest element\n def kthSmallest(self, k, nums):\n # Write your code here\n if k > len(nums) or len(nums) == 0 or k == 0:\n return []\n return self.quickSelect(nums, 0, len(nums)-1, k-1)\n \n def quickSelect(self, nums, start, end, k):\n if start == end:\n return nums[start]\n i = start\n j = end\n pivot = nums[(i+j)/2]\n while i <= j:\n while i <= j and nums[i] < pivot:\n i += 1\n while i <= j and nums[j] > pivot:\n j -= 1\n if i <= j:\n temp = nums[i]\n nums[i] = nums[j]\n nums[j] = temp\n i += 1\n j -= 1\n if j >= k and start <= j:\n return self.quickSelect(nums, start, j, k)\n elif i <= k and end >= i:\n return self.quickSelect(nums, i, end, k)\n else:\n return nums[k]","repo_name":"EvianTan/Lintcode-Leetcode","sub_path":"Kth Smallest Numbers in Unsorted Array.py","file_name":"Kth Smallest Numbers in Unsorted Array.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30119865345","text":"from Tkinter import *\nfrom tkMessageBox import *\nimport tkFileDialog\nimport tkSimpleDialog\nimport ttk\n\n# Import for WebAutomation.\nimport automation\n\n# Imports for manipulating spreadsheet.\nimport spreadsheet\nimport htmails_ss\n\n# Import for directory manipulations.\nimport os.path\n\n# Import for reading latin-1 file.\nimport io\n\n# Import for waiting.\nimport time\n\n# Import json, for config.json.\nimport json\n\n# Webdrivers.\nimport webdriver\n\n### Classes.\nclass GUI (Tk):\n \"\"\"HTMails Automation GUI.\"\"\"\n\n def __init__ (self):\n \"\"\"Create all GUI Widgets: MenuBar, Labels, TextBoxs, Buttons etc.\"\"\"\n \n # Overhead code, to initialise Tkinter Frame.\n self.root = Tk.__init__ (self)\n self.frame = Frame (self.root)\n self.title (\"Automail-HT\")\n self.geometry (\"650x275\")\n\n # TODO: Add icon.\n # self.add_icon()\n \n self.frame.grid (sticky = W+E+N+S)\n\n # Create MenuBar with items.\n self.create_menubar ()\n \n # Create widgets.\n self.create_widgets ()\n \n # Configure grid.\n self.rowconfigure (4, weight = 1)\n self.columnconfigure (3, weight = 1)\n\n # Find webdrivers that are installed, to fill combobox.\n self.fill_combobox (self.combo_webdriver,\n get_installed_webdrivers ())\n \n # Set default values.\n self.set_default_values ()\n \n # Events.\n self.txt_password.bind (\"\", self._sendHTMails)\n self.btn_sendHTMails.bind (\"\", self.rollover_enter)\n self.btn_sendHTMails.bind (\"\", self.rollover_leave)\n self.btn_exit.bind (\"\", self.rollover_enter)\n self.btn_exit.bind (\"\", self.rollover_leave)\n\n # TODO\n # def add_icon(self):\n # \"\"\"Adds an icon. The use depends on the window manager\"\"\"\n \n # # Create the bitmap\n # icon = Image('photo',\n # file = get_thisfile_directory() + os.pardir + os.sep + \\\n # 'img/the-icon.gif')\n # self.tk.call('wm', 'iconphoto', self.master._w, icon)\n \n def create_menubar (self):\n \"\"\"Create the MenuBar for the application.\n Menu is:\n # Archivo -> Enviar Mails | Salir\n # Editar -> Usuario ||\n # -> Valores por Defecto\n # Ayuda -> Manual ||\n # -> Info ||\n # -> Licencia\"\"\"\n\n menubar = Menu (self)\n\n filemenu = Menu (menubar, tearoff = 0)\n filemenu.add_command (label = \"Enviar Mails\",\n command = self.sendHTMails)\n filemenu.add_command (label = \"Salir\", command = self.destroy)\n menubar.add_cascade (label = \"Archivo\", menu = filemenu)\n\n editmenu = Menu (menubar, tearoff = 0)\n editmenu.add_command (label = \"Valores por defecto\",\n command = self.change_config_file)\n menubar.add_cascade (label = \"Editar\", menu = editmenu)\n\n helpmenu = Menu (menubar, tearoff = 0)\n helpmenu.add_command (label = \"Manual\", command = self.show_manual)\n helpmenu.add_command (label = \"Informacion\",\n command = self.show_information)\n helpmenu.add_command (label = \"Licencia\", command = self.show_license)\n menubar.add_cascade (label = \"Ayuda\", menu = helpmenu)\n\n self.config (menu = menubar)\n\n def create_widgets (self):\n \"\"\"Create all the widgets for the GUI.\"\"\"\n # Static labels:\n self.lbl_user = Label (self, text = \"Usuario: \")\n self.lbl_password = Label (self, text = \"Clave: \")\n self.lbl_webdriver = Label (self, text = \"Navegador: \")\n self.lbl_spreadsheet = Label (self, text = \"Planilla: \")\n\n # Textboxs:\n self.user = StringVar ()\n self.user.set (\"\")\n self.txt_user = Entry (self, name = \"txt_user\",\n textvariable = self.user)\n # Make txtPassword display text as \"*\".\n self.password = StringVar ()\n self.password.set (\"\")\n self.txt_password = Entry (self, name = \"txt_password\", show = \"*\",\n textvariable = self.password)\n\n self.spreadsheet_path = StringVar ()\n self.spreadsheet_path.set (\"\")\n self.txt_spreadsheet_path = Entry (self, name = \"txt_spreadsheet_path\",\n textvariable = self.spreadsheet_path)\n self.txt_spreadsheet_path.config (state = \"readonly\")\n \n # Buttons.\n self.btn_select_spreadsheet = Button (self, text = \"...\",\n command = self.get_spreadsheet,\n height = 1) \n self.btn_sendHTMails = Button (self, text = \"Enviar HTMails\",\n command = self.sendHTMails,\n height = 2,\n width = 10)\n self.btn_exit = Button (self, text = \"Salir\", command = self.destroy,\n height = 2, width = 10)\n\n # Webdrivers combo box.\n self.combo_webdriver = ttk.Combobox (self)\n self.combo_webdriver.config (state = \"readonly\")\n\n # Browser extension checkbutton.\n self.use_htmails_extension = IntVar ()\n self.chk_use_htmails_extension = \\\n Checkbutton (self, text = \"Usar extension de HTMails\",\n variable = self.use_htmails_extension)\n\n # Automail-HT won't ask for login info.\n self.no_login_info = IntVar ()\n self.chk_no_login_info = \\\n Checkbutton (self, text = \"Poner información de logueo en HT\",\n variable = self.no_login_info)\n\n # FIXME: Disable the checkbutton for now, until the web extension\n # is supported.\n self.chk_use_htmails_extension.config (state = DISABLED)\n \n # Put widgets on frame.\n self.lbl_user.grid (row = 1, column = 0, padx = 10, pady = 0,\n sticky = E)\n self.lbl_password.grid (row = 2, column = 0, padx = 10, pady = 0,\n sticky = E)\n self.lbl_webdriver.grid (row = 1, column = 2, padx = 10, pady = 10,\n sticky = E)\n self.lbl_spreadsheet.grid (row = 3, column = 0, padx = 10, pady = 10,\n sticky = E)\n self.txt_user.grid (row = 1, column = 1, padx = 0, pady = 10)\n self.txt_password.grid (row = 2, column = 1, padx = 0, pady = 10)\n self.txt_spreadsheet_path.grid (row = 3, column = 1,\n padx = 0, pady = 10,\n sticky = E)\n self.btn_sendHTMails.grid (row = 4, column = 1, padx = 10, pady = 10,\n sticky = W)\n self.btn_exit.grid (row = 4, column = 2, padx = 10, pady = 10,\n sticky = W)\n self.btn_select_spreadsheet.grid (row = 3, column = 2, sticky = W)\n self.combo_webdriver.grid (row = 1, column = 3, sticky = W)\n self.chk_use_htmails_extension.grid (row = 2, column = 3, sticky = W)\n self.chk_no_login_info.grid (row = 3, column = 3, sticky = W)\n\n def fill_combobox (self, cbo, values):\n \"\"\"Fills the combobox CBO, with VALUES.\"\"\"\n \n cbo[\"values\"] = values\n \n def set_default_values (self):\n \"\"\"Sets default values of fields.\"\"\"\n\n try:\n config_file = open (get_thisfile_directory () + os.pardir + \\\n os.sep + \"config.json\")\n default_values = json.load (config_file)\n config_file.close ()\n except IOError:\n # In case of failing to read config_file, do this to avoid\n # exception when calling askopenfilename. See Issue #1.\n self.user.set (\"\")\n self.combo_webdriver.set (self.combo_webdriver[\"values\"][0])\n self.default_directory = os.path.expanduser (\"~\")\n showinfo (\"Error\", \"No pudo abrirse el archivo config.json\")\n else:\n self.user.set (default_values[\"Usuario\"])\n\n # Set the default driver. Default to the first if the default\n # browser has no webdriver installed (or supported).\n if default_values[\"Navegador Default\"] in list (self.combo_webdriver[\"values\"]):\n self.combo_webdriver.set (default_values[\"Navegador Default\"])\n else:\n self.combo_webdriver.set (self.combo_webdriver[\"values\"][0])\n\n # Maybe expand the default directory, based on '~' or 'src'.\n if (default_values[\"Directorio Default\"] == \"~\"):\n self.default_directory = os.path.expanduser (\"~\")\n elif (default_values[\"Directorio Default\"] == \"src\"):\n self.default_directory = get_thisfile_directory () + \\\n os.pardir + os.sep\n else:\n self.default_directory = default_values[\"Directorio Default\"]\n \n def get_spreadsheet (self):\n \"\"\"Prompt the user to find the path to the spreadsheet to be used.\"\"\"\n\n self.spreadsheet_path.set (tkFileDialog.askopenfilename (\n initialdir = self.default_directory,\n title = \"Selecciona la planilla\",\n filetypes = [(\"Hojas de datos\", \"*.xls* *.ods\"),\n (\"All files\", \"*.*\")]))\n # Show last part of the file selected.\n self.txt_spreadsheet_path.xview_moveto (1)\n \n def validate_entry (self):\n \"\"\"Check if textboxs are not empty.\"\"\"\n\n # Only check spreadsheet_path, if no_login_info is used.\n if self.spreadsheet_path.get () != \"\":\n if self.no_login_info.get () == 1:\n return True\n else:\n return (self.user.get () != \"\" and self.password.get () != \"\")\n else:\n return False\n\n def get_message_paths (self):\n \"\"\"Prompt the user for the paths to the messages templates to be used.\n The total messages are retrieved from the spreadsheet.\"\"\"\n\n message_paths = []\n total_messages = self.htmails_file.get_total_messages ()\n\n for message in range (total_messages):\n file_path = tkFileDialog.askopenfilename (\n initialdir = self.default_directory,\n title = \"Selecciona el mensaje \" + str (message + 1),\n filetypes = [(\"Archivos de texto\", \"*.txt\")])\n\n message_paths.append (file_path)\n\n # If the user doesn't give one message, break.\n if (file_path == \"\"):\n break\n \n return message_paths \n \n def _sendHTMails (self, event):\n \"\"\"Stub function that calls the real function, sendHTMails.\n Needed because can't call sendHTMails from event on\n Entry widget.\"\"\"\n \n self.sendHTMails ()\n\n def sendHTMails (self):\n \"\"\"Start the automation of sending HTMails.\"\"\"\n\n # If all textboxs contain text, execute.\n if self.validate_entry ():\n\n # Try to read the spreadsheet file.\n self.htmails_file = \\\n htmails_ss.htmails_ss (self.spreadsheet_path.get ())\n\n # Get the paths to the messages.\n message_paths = self.get_message_paths ()\n\n # See if there's no error with the templates.\n if '' in message_paths:\n showinfo (\"Error\",\n \"Error en el numero de mensajes. Revisar\")\n return None\n else:\n # Get all the data.\n data = self.htmails_file.get_fields ()\n\n # Sort the data by message number.\n # This is to avoid loading all 4 mail templates in memory.\n # Instead, load the one being used, and send all the messages\n # that require that message number.\n data = \\\n sorted (data,\n key = lambda player: player[self.htmails_file.preferences[\"Headers\"].index (\"Mensaje\")])\n\n # Start driver.\n self.driver = automation.ht_driver (self.combo_webdriver.get ())\n self.driver.visit_mainpage ()\n\n if self.no_login_info.get () == 1:\n login_values = None\n else:\n login_values = [self.user.get (), self.password.get ()]\n \n if self.driver.login (login_values):\n # `i' holds the template message being used.\n # Once all those mails are sent, increment it to use\n # the next template.\n i = 1\n\n # Read the mail_template.\n mail_template = read_textfile (message_paths[i - 1])\n\n # The file fallidos.txt will be created (or overwritten)\n # the first one mail fails. Then, all failed mails in the\n # current run will be appended.\n created_failed_file = False\n\n # For each row, customize the mail and send it.\n for field in data:\n # Change to the next template when needed.\n if (field[self.htmails_file.preferences[\"Headers\"].index (\"Mensaje\")] != i):\n i = field[self.htmails_file.preferences[\"Headers\"].index (\"Mensaje\")]\n mail_template = read_textfile (message_paths[i - 1])\n\n mail = self.customize_mail (mail_template,\n self.htmails_file.preferences[\"Headers\"],\n field)\n subject, content = mail.split (\"\\n\", 1)\n subject = get_subject (subject)\n content = get_content (content)\n\n # Try to send the mail, and if it was not sent\n # (a.k.a, driver.sendHTMail returns False), append data\n # to fallidos.txt\n\n # HACK ALERT: This is a quick hack to get it working\n # fast.\n # dir_access will be True if Owner ID is given.\n try:\n self.htmails_file.preferences[\"Headers\"].index (\"ID Manager\")\n except ValueError:\n dir_access = False\n else:\n dir_access = True\n \n if not (self.driver.sendHTMail (subject, content, field,\n 0,\n self.htmails_file.preferences[\"BlackList\"], dir_access)):\n if (not created_failed_file):\n create_failed_file (get_thisfile_directory () + \\\n os.pardir)\n created_failed_file = True\n \n # TODO: Add description of the reason it failed.\n dump_failed_email (field,\n get_thisfile_directory () + \\\n os.pardir)\n time.sleep (self.htmails_file.preferences[\"Seconds_wait\"])\n else:\n showinfo (\"Error\",\n \"El login no fue posible. Revisar campos\")\n\n # Exit driver\n self.driver.destroy ()\n\n else:\n showinfo (\"Error\", \"Uno o mas campos estan vacios\")\n\n def customize_mail (self, mail, reference, data):\n \"\"\"Replace keywords on the template.\"\"\"\n\n # Add Apodo, only if it is given.\n try:\n reference.index (\"Nombre Jugador\")\n except ValueError:\n if (mail.find (\"{0}\") != -1):\n mail = replace_keyword (mail, \"{0}\", \"\")\n mail = replace_keyword (mail, \"{6}\", \"\")\n else:\n try:\n reference.index (\"Apodo Jugador\")\n except ValueError:\n mail = replace_keyword (mail, \"{0}\",\n data[reference.index (\"Nombre Jugador\")])\n mail = replace_keyword (mail, \"{6}\", \"\")\n else:\n if data[reference.index (\"Apodo Jugador\")]:\n # Apodo is added before the surname, between \"'\" quotes.\n # If the player has two surnames, then it fails.\n # FIXME: Add it after the first name?\n name = data[reference.index(\"Nombre Jugador\")].rsplit (\" \", 1)\n mail = replace_keyword (mail, \"{0}\", name[0] + \" '\" + \\\n data[reference.index (\"Apodo Jugador\")] + \\\n \"' \" + name[1])\n else:\n mail = replace_keyword (mail, \"{0}\",\n data[reference.index(\"Nombre Jugador\")])\n\n try:\n reference.index (\"ID Jugador\")\n except ValueError:\n if (mail.find (\"{1}\") != -1):\n mail = replace_keyword (mail, \"{1}\", \"\")\n else:\n mail = replace_keyword (mail, \"{1}\",\n data[reference.index(\"ID Jugador\")])\n\n try:\n reference.index (\"Nombre Usuario\")\n except ValueError:\n if (mail.find (\"{2}\") != -1):\n mail = replace_keyword (mail, \"{2}\", \"\")\n else:\n mail = replace_keyword (mail, \"{2}\",\n data[reference.index(\"ID Jugador\")])\n\n try:\n reference.index (u\"Fecha de Promoción\")\n except ValueError: \n if (mail.find (\"{3}\") != -1):\n mail = replace_keyword (mail, \"{3}\", \"\")\n else:\n mail = replace_keyword (mail, \"{3}\",\n data[reference.index(u\"ID Jugador\")])\n \n mail = replace_keyword (mail, \"{4}\", self.htmails_file.get_thread_id())\n\n try:\n reference.index (u\"Condición\")\n except ValueError:\n if (mail.find (\"{5}\") != -1):\n mail = replace_keyword (mail, \"{5}\", \"\")\n else:\n mail = replace_keyword (mail, \"{5}\",\n data[reference.index(u\"Condición\")])\n\n # FIXME: If no thread link is given, then \"None\" persists.\n # That's why this line is required.\n mail = replace_keyword (mail, \"None\", \"\")\n\n return mail\n \n def rollover_enter (self, event):\n \"\"\"Set button to GROOVE.\"\"\"\n \n event.widget.config (relief = GROOVE)\n \n def rollover_leave (self, event):\n \"\"\"Set button to RAISED.\"\"\"\n \n event.widget.config (relief = RAISED)\n \n def change_config_file (self):\n \"\"\"Modifies config file.\"\"\"\n\n d = Dialog_config (self.frame,\n get_thisfile_directory () + os.pardir + os.sep + \\\n \"config.json\")\n \n def show_manual (self):\n manual_frame = Toplevel ()\n manual_frame.title (\"Manual\")\n\n msg = Label (manual_frame, text = \"Por favor, lee el archivo etc/TIPS\")\n msg.pack ()\n \n def show_information (self):\n info_frame = Toplevel ()\n info_frame.title (\"Informacion\")\n\n msg = Label (info_frame, text = \\\n \"\"\"Este programa fue creado por Mauro Aranda.\nVersion: 4.0\nmail: maurooaranda@gmail.com\n\nCopyright (C) 2019 Mauro Aranda.\"\"\")\n msg.pack ()\n\n def show_license (self):\n license_frame = Toplevel ()\n license_frame.title (\"Licencia\")\n\n msg = Label (license_frame, text = \"\"\"Copyright (C) 2020 Mauro Aranda.\n Automail-HT comes with ABSOLUTELY NO WARRANTY.\n You may redistribute copies of Automail-HT\n under the terms of the GNU General Public License.\"\"\")\n msg.pack ()\n\n def destroy (self):\n self.quit ()\n\n\n# Class Dialog_config: Implements a Dialog window, to perform changes\n# in `config.json' file.\n\nclass Dialog_config (tkSimpleDialog.Dialog):\n \"\"\"Dialog to perform changes in config file.\"\"\"\n\n def __init__ (self, parent, config_file_path):\n \"\"\"Initialize the Dialog, calling the parent constructor.\n Save the path to the config_file for reading and writing.\"\"\"\n\n self.config_file_path = config_file_path\n tkSimpleDialog.Dialog.__init__ (self, parent,\n title = \"Valores por defecto\")\n \n def body (self, master):\n \"\"\"Create widgets for every field, and set focus to user entry.\"\"\"\n \n try:\n config_file = open (self.config_file_path)\n self.default_values = json.load (config_file)\n config_file.close ()\n except IOError:\n showerror (\"Error\", \"No se encuentra el archivo config.json\")\n tkSimpleDialog.Dialog.cancel ()\n else:\n Label (master, text = \"Usuario:\").grid (row = 0, column = 0,\n sticky = W)\n Label (master, text = \"Directorio Default:\").grid (row = 1,\n column = 0,\n sticky = W)\n Label (master, text = \"Navegador Default:\").grid (row = 2,\n column = 0,\n sticky = W)\n self.txt_user = Entry (master)\n self.txt_user.grid (row = 0, column = 1)\n self.txt_user.insert (0, self.default_values[\"Usuario\"])\n self.txt_directory = Entry (master)\n self.txt_directory.grid (row = 1, column = 1)\n self.txt_directory.insert (0,\n self.default_values[\"Directorio Default\"])\n self.txt_browser = Entry (master)\n self.txt_browser.grid (row = 2, column = 1)\n self.txt_browser.insert (0, self.default_values[\"Navegador Default\"])\n\n # Return self.txt_user, so it gets initial focus.\n return self.txt_user\n\n def validate (self):\n \"\"\"Validate default fields input.\"\"\"\n\n # Check for a supported driver, and for a directory that exists.\n try:\n installed_webdrivers = get_installed_webdrivers ()\n installed_webdrivers.index (self.txt_browser.get ())\n except ValueError:\n showwarning (\"Navegador incorrecto\",\n \"No se pudo encontrar el driver del navegador seleccionado.\\nNavegadores instalados: \" + str (installed_webdrivers))\n return 0\n else:\n input_path = self.txt_directory.get ()\n \n if input_path == \"~\" or input_path == \"src\":\n return 1\n else:\n if os.path.isdir (input_path):\n return 1\n else:\n showwarning (\"Directorio incorrecto\", \"El directorio especificado no existe\")\n return 0\n \n def apply(self):\n \"\"\"Apply changes to default values.\"\"\"\n\n try:\n config_file = open (self.config_file_path, 'w')\n except IOError:\n showerror (\"Error\", \"No se pudo abrir config.json para escribir los valores por defecto\")\n tkSimpleDialog.Dialog.cancel ()\n else:\n self.default_values[\"Usuario\"] = self.txt_user.get ()\n self.default_values[\"Navegador Default\"] = self.txt_browser.get ()\n self.default_values[\"Directorio Default\"] = self.txt_directory.get ()\n json.dump (self.default_values, config_file, indent = 4,\n separators = (\",\", \": \"))\n config_file.close ()\n\n### Utils.\n# TODO: Find a place where to put this functions. \ndef get_thisfile_directory ():\n \"\"\"Helper function for obtaining directory of the software.\"\"\"\n \n return (os.path.dirname (os.path.realpath (__file__)) + os.sep)\n\ndef get_installed_webdrivers ():\n \"\"\"Return the installed webdrivers on the machine.\"\"\"\n\n # chromedriver used to support -V too, but now they don't. Oh well...\n webdrivers = [webdriver.Webdriver (\"geckodriver\", \"Firefox\",\n {\"version\": \"-V\"}),\n webdriver.Webdriver (\"chromedriver\", \"Chrome\",\n {\"version\": \"--version\"})]\n ret = []\n for wd in webdrivers:\n if wd.is_installed_p ():\n ret.insert (len (ret), wd.webbrowser)\n \n return ret\n\ndef read_textfile (filepath):\n \"\"\"Read an entire textfile, with 'latin-1' support.\"\"\"\n \n try:\n f = io.open (filepath, 'rt', encoding = 'latin-1')\n text = f.read ()\n f.close ()\n except:\n return \"\"\n else:\n return text\n \ndef replace_keyword (text, keyword, replacement):\n \"\"\"Replace the keyword given, with replacement, in text.\"\"\"\n\n # When no replacement is given, simply replace it with \"\".\n if (replacement or replacement != \"None\"):\n return text.replace (keyword, replacement)\n else:\n return text.replace (keyword, \"\")\n \ndef get_subject (subject):\n \"\"\"Erase the tags used for the subject.\"\"\"\n \n subject = subject.replace (\"[Asunto]\", \"\")\n subject = subject.replace (\"[/Asunto]\", \"\")\n\n return subject\n\ndef get_content (content):\n \"\"\"Makes sure the content is well formated.\"\"\"\n\n # Delete trailing newline.\n return content.replace (\"\\n\", \"\", 1)\n\ndef create_failed_file (folder):\n \"\"\"Create a textfile or open it in overwrite mode.\"\"\"\n \n try:\n f = io.open (folder + os.sep + 'fallidos.txt', \"w+\",\n encoding = 'latin-1')\n f.write (u\"\")\n f.close ()\n except IOError:\n None\n \ndef dump_failed_email (failed_email, folder):\n \"\"\"Function that puts in textfile the HT-Mails that couldn't be sent.\"\"\"\n\n try:\n f = io.open (folder + os.sep + 'fallidos.txt', 'a',\n encoding = 'latin-1')\n f.write (failed_email[0] + '\\n')\n f.close ()\n except IOError:\n None\n","repo_name":"maurooaranda/Automail-HT","sub_path":"src/HTMailsGUI.py","file_name":"HTMailsGUI.py","file_ext":"py","file_size_in_byte":26746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"31034801645","text":"from enum import Enum\nfrom typing import Iterable, Set, Tuple\n\n\nclass Coordinate(tuple):\n def __new__(cls, a: int, b: int):\n return super(Coordinate, cls).__new__(Coordinate, (a, b))\n\n def __add__(self, other):\n assert isinstance(other, Coordinate)\n return Coordinate(self[0] + other[0], self[1] + other[1])\n\n\nclass Direction(Enum):\n Left = Coordinate(-1, 0)\n Right = Coordinate(1, 0)\n Up = Coordinate(0, -1)\n Down = Coordinate(0, 1)\n\n\nDirections = Tuple[Direction, ...]\n\n\ndef parse_directions(text: str) -> Iterable[Directions]:\n mapping = {\n \"D\": Direction.Down,\n \"U\": Direction.Up,\n \"L\": Direction.Left,\n \"R\": Direction.Right,\n }\n for line in text.splitlines():\n yield tuple(mapping[d] for d in line)\n\n\ndef follow_directions(start: Coordinate, directions: Directions, valid_options: Set[Coordinate]) -> Coordinate:\n position = start\n for direction in directions:\n new_position = position + direction.value\n if new_position not in valid_options:\n continue\n position = new_position\n return Coordinate(position[0], position[1])\n","repo_name":"mcuelenaere/advent_of_code","sub_path":"advent_of_code/year2016/day02/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"15512718856","text":"import imageio\nimport os\nimport sys\nimport time\nimport shutil\nimport click\nfrom click._compat import raw_input\nimport timeit\nimport datetime\nfrom moviepy.editor import *\nfrom os import startfile\nuser_input= raw_input(\"Enter path name: \")\nuser_input1 = raw_input(\"Enter file name: \")\n\npath_name = os.path.abspath(user_input)\n\ndef clipMerger():\n name2 = raw_input(\"Enter second clip name(File has to be in same folder as other one): \")\n clip1 = VideoFileClip(user_input1)\n clip2 = VideoFileClip(name2)\n clip_name = os.path.splitext(user_input1)[0]\n clip_name1 = os.path.splitext(name2)[0]\n final = concatenate_videoclips([clip1, clip2])\n final.write_videofile('merged.mp4', threads=4, fps=24, codec='libx264', preset='ultrafast')\n Current_Date = datetime.datetime.today().strftime(\"%H.%M.%S\")\n os.rename('merged.mp4', 'merged_' + str(clip_name) + '_' + str(clip_name1) + '_' + str(Current_Date) + '.mp4')\n def cls():\n print(\"\\n\" * 100)\n cls()\n print(\"Clip was created, you can find it at: \" + path_name + \" with file name: \" + 'merged_' + str(clip_name) + '_' + str(clip_name1) + '_' + str(Current_Date))\n user_input6 = raw_input(\"Do you want to open that file?: \")\n if user_input6 == 'yes':\n startfile('merged_' + str(clip_name) + '_' + str(clip_name1) + '_' + str(Current_Date) + '.mp4')\n else:\n exit()\n\n\ndef textadder():\n name = os.path.splitext(user_input1)[0]\n clip = VideoFileClip(user_input1)\n desiredText = raw_input(\"What text you want to add?: \")\n txt = TextClip(desiredText, font='Courier', fontsize=30, color='white')\n txt = txt.set_pos('right', 'bottom').set_duration(5)\n\n Current_Date = datetime.datetime.today().strftime(\"%H.%M.%S\")\n video = CompositeVideoClip([clip, txt])\n video.write_videofile('converted.mp4', threads=4, fps=24, codec='libx264', preset='ultrafast')\n os.rename('converted.mp4', 'new_' + str(name) + '_' + str(Current_Date) + '.mp4')\n def cls():\n print(\"\\n\" * 100)\n cls()\n print(\"Clip was created, you can find it at: \" + path_name + \" with file name: \" + 'new_' + str(name))\n user_input6 = raw_input(\"Do you want to open that file?: \")\n if user_input6 == 'yes':\n startfile('new_' + str(name) + '_' + str(Current_Date) + '.mp4')\n else:\n exit()\n\ndef converter(): # converting function\n clip = VideoFileClip(user_input1)\n user_input2 = raw_input(\"How long the video should be?: \")\n clip = clip.subclip(0 , user_input2)\n\n name = os.path.splitext(user_input1)[0] # getting name of file without extension\n user_input4 = raw_input(\"Should I optimize clip?: \")\n if user_input4 == 'yes':\n start = timeit.default_timer()\n clip.write_videofile('converted.mp4', preset='ultrafast', codec='libx264') # creating new video file with following settings\n stop = timeit.default_timer()\n print('This took time: ', stop - start, ' seconds')\n Current_Date = datetime.datetime.today().strftime(\"%H.%M.%S\")\n os.rename('converted.mp4', 'new_' + str(name) + '_' + str(Current_Date) + '.mp4') # renaming file to its original name\n def cls(): print (\"\\n\" * 100)\n cls()\n print(\"Clip was created, you can find it at: \" + path_name + \" with file name: \" + 'new_' + str(name))\n user_input6 = raw_input(\"Do you want to open that file?: \")\n if user_input6 == 'yes':\n startfile('new_' + str(name) + '_' + str(Current_Date) + '.mp4')\n else:\n exit()\n else:\n user_input5 = raw_input(\"Should I use 30 or 60fps mode?[60fps mode is faster]: \")\n if user_input5 == \"30\":\n clip.write_gif('converted.gif', fps=30, program='ffmpeg')\n Current_Date = datetime.datetime.today().strftime(\"%H.%M.%S\")\n os.rename('converted.gif', 'new_' + str(name) + '_' + str(Current_Date) + '.gif')\n def cls(): print (\"\\n\" * 100)\n cls()\n print(\"Clip was created, you can find it at: \" + path_name + \" with file name: \" + 'new_' + str(name))\n user_input6 = raw_input(\"Do you want to open that file?: \")\n if user_input6 == 'yes':\n startfile('new_' + str(name) + '_' + str(Current_Date) + '.gif')\n else:\n exit()\n else:\n clip.write_gif('converted.gif', fps=60, program='ffmpeg')\n Current_Date = datetime.datetime.today().strftime(\"%H.%M.%S\")\n os.rename('converted.gif', 'new_' + str(name) + '_' + str(Current_Date) + '.gif')\n def cls():print(\"\\n\" * 100)\n cls()\n print(\"Clip was created, you can find it at: \" + path_name + \" with file name: \" + 'new_' + str(name))\n user_input6 = raw_input(\"Do you want to open that file?: \")\n if user_input6 == 'yes':\n startfile('new_' + str(name) + '_' + str(Current_Date) + '.gif')\n else:\n exit()\n\n\n\n\ndef action():\n\n action = raw_input(\"What do you want to do? \\n\"\n \"1: Add text to a clip \\n\"\n \"2: Edit clip Answer \\n\"\n \"3: Merge two clips \\n\"\n \"Answer: \")\n if action == \"1\":\n textadder()\n if action == \"2\":\n converter()\n else:\n clipMerger()\n\nif path_name == 'C:\\\\Users\\\\rt\\\\PycharmProjects\\\\gifconverter':\n action = raw_input(\"What do you want to do? \\n\"\n \"1: Add text to a clip \\n\"\n \"2: Edit clip Answer \\n\"\n \"3: Merge two clips \\n\"\n \"Answer: \")\n if action == \"1\":\n textadder()\n if action == \"2\":\n converter()\n else:\n clipMerger()\nelse:\n user_input2 = raw_input(\"File is not in working directory! Do you want to move file?: \")\n if user_input2 == 'yes':\n path = path_name + '/' + user_input1\n moveto = 'C:/Users/rt/PycharmProjects/gifconverter'\n src = path\n dst = moveto\n shutil.move(src, dst)\n user_input3 = raw_input(\"File was moved, do you want to continue?: \")\n if user_input3 == 'yes':\n action()\n else:\n exit()\n\n\n\n","repo_name":"devv3/ClipMaker","sub_path":"gifconverter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3993477115","text":"\"\"\"created_tables_added_two_more\n\nRevision ID: 342fe620a88d\nRevises: 4ac3290acdce\nCreate Date: 2022-04-20 11:24:11.854193\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '342fe620a88d'\ndown_revision = '4ac3290acdce'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('posts',\n sa.Column('post_content', sa.Text(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('numbers_of_likes', sa.Integer(), nullable=True),\n sa.Column('numbers_of_comments', sa.Integer(), nullable=True),\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('date_of_creation', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n op.create_table('comments',\n sa.Column('comment_content', sa.Text(), nullable=True),\n sa.Column('post_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('date_of_creation', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n op.create_table('like',\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('post_id', sa.Integer(), nullable=False),\n sa.Column('like', sa.Boolean(), nullable=True),\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('date_of_creation', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('like')\n op.drop_table('comments')\n op.drop_table('posts')\n # ### end Alembic commands ###\n","repo_name":"zzllooccaa/friends","sub_path":"alembic/versions/342fe620a88d_created_tables_added_two_more.py","file_name":"342fe620a88d_created_tables_added_two_more.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40302731231","text":"import os\nimport h5py\nimport numpy as np\nfrom keras.preprocessing import image\nvideo_path = '/home/VideoData/UCF-RGB/UCF-frame/'\nvideo_file = os.listdir(video_path)[1:]\nh5file_path= '/home/VideoData/UCF-h5/UCF-Augmentation/'\n\nvideo_class_key_value = {}\nwith open('/home/VideoData/ucfTrainTestlist/classInd.txt', 'r') as fp:\n for line in fp.readlines():\n key = line.strip().split(' ')[1].upper()\n value = line.strip().split(' ')[0]\n video_class_key_value[key] = value\nfp.close()\n\nfp = open('/home/VideoData/UCF-h5/UCF-Augmentation/ucflist-part1.txt','w')\n\nX_train = []\ny_train = []\nvideo_count = 1\noriginal_part = 0\nfor video in video_file:\n\n video_class = video.split('_')[1].upper()\n video_frames = os.listdir(video_path+'/'+video)\n frame_number = len(video_frames)\n skip_length = frame_number // 16\n\n for i in range(skip_length):\n current_part = video_count // 50000\n\n frame_select = tuple(range(1, skip_length*16, skip_length))\n video_cube = np.zeros((112, 112, 16, 3), dtype='uint8')\n frame_count = 0\n for frame in frame_select:\n if frame < 10:\n temp = 'frame-000'+str(frame)+'.jpg'\n elif frame < 100:\n temp = 'frame-00'+str(frame)+'.jpg'\n elif frame < 1000:\n temp = 'frame-0'+str(frame)+'.jpg'\n else:\n temp = 'frame-'+str(frame)+'.jpg'\n frame_path = video_path + '/'+ video + '/' + temp\n frame_image = image.load_img(frame_path, target_size=(112, 112))\n frame_array = image.img_to_array(frame_image)\n video_cube[:,:,frame_count,:] = frame_array\n frame_count += 1\n print('Read ' + video + ': ' + str(i+1) + '/' + str(skip_length) + ' - frame ' + str(frame_count))\n X_train.append(video_cube)\n y_train.append(int(video_class_key_value[video_class]) - 1)\n fp.write(video)\n fp.write(' ')\n fp.write(video_class_key_value[video_class])\n fp.write('\\n')\n video_count += 1\n\n if not current_part == original_part:\n print('CurrentPart %d, OriginalPart %d' % (current_part, original_part))\n print('VideoCount ', video_count-1)\n h5file = h5py.File(h5file_path+'UCF-part'+str(current_part)+'.h5','w')\n X_train = np.asarray(X_train)\n y_train = np.asarray(y_train)\n print('Create X_train ...')\n h5file.create_dataset('X_train', data=X_train)\n print('Create y_train ...')\n h5file.create_dataset('y_train', data=y_train)\n print('Create h5 file Done!')\n h5file.close()\n X_train = []\n y_train = []\n fp.close()\n fp = open('/home/VideoData/UCF-h5/UCF-Augmentation/ucflist-part'+str(current_part+1)+'.txt','w')\n\n if video_count == 148933:\n print('VideoCount ', video_count-1)\n h5file = h5py.File(h5file_path+'UCF-part'+str(current_part+1)+'.h5','w')\n X_train = np.asarray(X_train)\n y_train = np.asarray(y_train)\n print('Create X_train ...')\n h5file.create_dataset('X_train', data=X_train)\n print('Create y_train ...')\n h5file.create_dataset('y_train', data=y_train)\n print('Create h5 file Done!')\n h5file.close()\n X_train = []\n y_train = []\n fp.close()\n\n original_part = current_part\n\n\n\n\n\nindex = []\nwith open('/home/VideoData/ucfTrainTestlist/testlist03.txt','r') as fp:\n for line in fp.readlines():\n index.append(line.strip().split('/')[1].split('.avi')[0])\nfp.close()\n\nfw = open('/home/VideoData/ucfTrainTestlist/testlist03-aug.txt','w')\nfor i in range(15):\n content = []\n with open('/home/VideoData/UCF-h5/UCF-Augmentation-224/ucflist-224-part'+str(i+1)+'.txt') as fp:\n for line in fp.readlines():\n content.append(line.strip().split(' ')[0])\n fp.close()\n for ind in index:\n if ind in content:\n cnt = content.count(ind)\n beg = content.index(ind)\n for c in range(cnt):\n print(beg+c+i*10000)\n num = str(beg+c+i*10000)\n fw.write(num)\nfw.close()","repo_name":"Tsingzao/shenlan_actionrecognition","sub_path":"VideoPreProcessingAndDataAugmentation.py","file_name":"VideoPreProcessingAndDataAugmentation.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"73195103034","text":"import os\nfrom flask import request\nfrom flask import Flask\nfrom gcp import access_secret_version, get_notifications, delete_notifications\nfrom config import FB_CHALLENGE, PROJECT_ID\nimport logging_handler\nimport logging\nfrom fb import handleMessage, handlePostback, handleOptin, generate_one_time_template\nfrom datetime import date, timedelta\n\napp = Flask(__name__)\n\nCHALLENGE = access_secret_version(\n PROJECT_ID, FB_CHALLENGE[\"name\"], FB_CHALLENGE[\"version\"]\n)\n\n\n@app.route(\"/webhook\", methods=[\"GET\"])\ndef verify():\n # when the endpoint is registered as a webhook, it must echo back\n # the 'hub.challenge' value it receives in the query arguments\n if request.args.get(\"hub.mode\") == \"subscribe\" and request.args.get(\n \"hub.challenge\"\n ):\n if not request.args.get(\"hub.verify_token\") == CHALLENGE:\n return \"Verification token mismatch\", 403\n return request.args[\"hub.challenge\"], 200\n\n return \"Waiting for verification\", 200\n\n\n@app.route(\"/webhook\", methods=[\"POST\"])\ndef webhook():\n data = request.json\n print(data)\n if data[\"object\"] == \"page\":\n for entry in data[\"entry\"]:\n for messaging_event in entry[\"messaging\"]:\n psid = messaging_event.get(\"sender\").get(\"id\")\n\n if messaging_event.get(\"message\"): # someone sent us a message\n handleMessage(psid, messaging_event.get(\"message\"))\n\n elif messaging_event.get(\"postback\"):\n handlePostback(psid, messaging_event.get(\"postback\"))\n elif messaging_event.get(\"optin\"):\n handleOptin(psid, messaging_event.get(\"optin\"))\n\n return \"ok\", 200\n else:\n return \"Unknown object in body\", 404\n\n\n@app.route(\"/notifications\", methods=[\"GET\"])\ndef notifications():\n today = date.today()\n yesterday = date.today() - timedelta(days=1)\n today = str(today.year) + str(today.month) + str(today.day)\n yesterday = str(yesterday.year) + str(yesterday.month) + str(yesterday.day)\n notifications = get_notifications(today)\n y_notifs = get_notifications(yesterday)\n delete_notifications(today)\n delete_notifications(yesterday)\n notifications = notifications + y_notifs\n for notif in notifications:\n generate_one_time_template(\n notif[\"token\"], int(notif[\"cur_assignment\"]), int(notif[\"skill\"])\n )\n return \"ok\", 200\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8081)))\n","repo_name":"Gaikanomer9/mentor-chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"25879279406","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom six import print_\nfrom .utils import Color as c\n\ntry:\n from argparse import ArgumentParser as ArgParser\nexcept ImportError:\n from optparse import OptionParser as ArgParser\n\n\ndef get_parser():\n parser = ArgParser(description=\"ServerScope.io benchmark kit\")\n # Give optparse.OptionParser an `add_argument` method for\n # compatibility with argparse.ArgumentParser\n try:\n parser.add_argument = parser.add_option\n except AttributeError:\n pass\n\n parser.add_argument('-p', '--plan', help='Required. Server provider and plan' +\n ' names as follows: \"Plan name|Provider name\"')\n parser.add_argument('-e', '--email', help='Required. An e-mail to receive online report link')\n parser.add_argument('-i', '--include',\n help='Comma-separated list of benchmarks to run if you don\\'t want to ' +\n 'run all of them: dd, fio, speedtest, unixbench')\n parser.add_argument('--locale', default=\"en\")\n\n options = parser.parse_args()\n if isinstance(options, tuple):\n args = options[0]\n else:\n args = options\n\n if args is not dict:\n args = vars(args)\n\n mandatories = ['plan', 'email']\n for m in mandatories:\n if (m not in args) or args[m] is None:\n print_(\"Required parameter \" + c.RED + c.BOLD + m + c.RESET + \" is missing\")\n parser.print_help()\n sys.exit(1)\n\n return args\n","repo_name":"distonocalm/serverscope","sub_path":"serverscope_benchmark/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"72151183357","text":"t = int(input())\n\nfor i in range(t):\n tmp = input().split(\" \")\n cnt = [tmp.count(\"0\"), tmp.count(\"1\"), tmp.count(\"2\")]\n if(cnt[1]==cnt[2]):\n print(\"Draw\")\n elif cnt[1]>cnt[2]:\n print(\"India\")\n else:\n print(\"England\")","repo_name":"AbhinavChowdaryM98/Competitive-Programming","sub_path":"codechef/Snackdown_2021_Qualifier/test_match.py","file_name":"test_match.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36749499513","text":"import code\n\ndef test(data):\n print(data)\n solution = code.Codec()\n tree = solution.deserialize(data)\n data2 = solution.serialize(tree)\n print(data2)\n assert(data == data2)\n\ntest([1,2,None,None,3,4,None,None,5,None,None])\ntest([None])\ntest([1,None,None])\n","repo_name":"luzi82/codelog.leetcode","sub_path":"0002xx/000297/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"3422306675","text":"import logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nlogger.info('Start Reading Database')\n\n#read Database here\nrecords = {'Shruthi':111, 'Pavan':222}\n\nlogger.debug('Reacords : %s',records)\nlogger.info('Updating Records')\n\n#update records here\nlogger.info('Records Updated')\n\nlogger.warn('close DB Connection')","repo_name":"saipavandarsi/Python-Learnings","sub_path":"Logging/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11920083931","text":"import collections\nimport random\n\nfrom .utils import read_words_file, get_random_word\n\n\nclass WordScramble:\n \"\"\"\n Implementation of a simple Word Scramble game.\n The player is given a scrambled word and must guess the original word.\n \"\"\"\n DEFAULT_ATTEMPTS = 3\n DEFAULT_DIFFICULTY = 'easy'\n\n def __init__(self) -> None:\n words = read_words_file()\n self.words = {\n 'easy': list(filter(lambda x: len(x) <= 4, words)),\n 'medium': list(filter(lambda x: 5 <= len(x) <= 7, words)),\n 'hard': list(filter(lambda x: len(x) >= 8, words))\n }\n self.word = ''\n self.scrambled_word = ''\n self.attempts = 0\n self.reset()\n\n def reset(self, difficulty: str = DEFAULT_DIFFICULTY, attempts: int = DEFAULT_ATTEMPTS) -> None:\n \"\"\"\n Resets the game.\n\n :param difficulty: The difficulty level.\n :param attempts: The number of attempts.\n :return: None\n \"\"\"\n word_list = self.get_word_list(difficulty)\n self.word = get_random_word(word_list)\n self.scrambled_word = self.scramble_word(self.word)\n self.attempts = attempts\n\n @staticmethod\n def scramble_word(word: str) -> str:\n \"\"\"\n Scrambles a word.\n\n :param word: The word to scramble.\n :return: The scrambled word.\n \"\"\"\n while True:\n word_chars = list(word)\n random.shuffle(word_chars)\n shuffled_word = ''.join(word_chars)\n if shuffled_word != word:\n return shuffled_word\n\n def get_word_list(self, difficulty: str) -> list[str]:\n \"\"\"\n Gets a list of words based on the difficulty level.\n\n :param difficulty: The difficulty level.\n :return: A list of words.\n \"\"\"\n if difficulty not in self.words:\n raise ValueError(f\"Difficulty level must be one of {list(self.words.keys())}\")\n return self.words[difficulty]\n\n def validate_input(self, user_input: str) -> bool:\n \"\"\"\n Validates the user's input.\n\n :param user_input: The user's input.\n :return: True if the user input contains same characters as the scrambled word, False otherwise.\n \"\"\"\n if collections.Counter(user_input) == collections.Counter(self.scrambled_word):\n return True\n return False\n\n def guess(self, user_input: str) -> bool:\n \"\"\"\n Checks if the user's guess is correct.\n\n :param user_input: The user's input.\n :return: True if the guess is correct, False otherwise.\n \"\"\"\n if user_input == self.word:\n print(f\"You got it! The word was: {self.word}\")\n return True\n\n self.attempts -= 1\n if self.attempts:\n print(f\"Incorrect. You have {self.attempts} attempt(s) left. Good Luck!\")\n else:\n print(f\"Incorrect. You have no attempts left. The word was {self.word}.\")\n return False\n\n def play(self, difficulty: str = DEFAULT_DIFFICULTY, attempts: int = DEFAULT_ATTEMPTS) -> None:\n \"\"\"\n Plays the game.\n\n :param difficulty: The difficulty level.\n :param attempts: The number of attempts.\n :return: None\n \"\"\"\n self.reset(difficulty=difficulty, attempts=attempts)\n\n print(f\"Welcome to Word Scramble Level {difficulty.capitalize()}! You have {attempts} attempts.\")\n print(\"Unscramble the word:\", self.scrambled_word)\n\n while self.attempts:\n user_input = input('> ').lower().strip()\n if self.validate_input(user_input):\n if self.guess(user_input):\n break\n else:\n print(\"Guess must contain the same characters as the scrambled word.\")\n","repo_name":"aavishkar6/3-python-package-exercise-prime-order","sub_path":"src/pygamebox/word_scramble.py","file_name":"word_scramble.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"96"} +{"seq_id":"14047416629","text":"from Core.Fonctions.AuteurIcon import auteur\nfrom Core.Fonctions.Embeds import embedAssertClassic, sendEmbed\nfrom Core.Fonctions.setMaxPage import setMax, setPage\nfrom Stats.Embeds.Central import statsEmbed\nfrom Stats.SQL.ConnectSQL import connectSQL\n\ntableauMois={\"01\":\"Janvier\",\"02\":\"Février\",\"03\":\"Mars\",\"04\":\"Avril\",\"05\":\"Mai\",\"06\":\"Juin\",\"07\":\"Juillet\",\"08\":\"Aout\",\"09\":\"Septembre\",\"10\":\"Octobre\",\"11\":\"Novembre\",\"12\":\"Décembre\",\"TO\":\"Année\",\"janvier\":\"01\",\"février\":\"02\",\"mars\":\"03\",\"avril\":\"04\",\"mai\":\"05\",\"juin\":\"06\",\"juillet\":\"07\",\"aout\":\"08\",\"septembre\":\"09\",\"octobre\":\"10\",\"novembre\":\"11\",\"décembre\":\"12\",\"glob\":\"GL\",\"to\":\"TO\"}\ndictOption={\"tortues\":\"Tortues\",\"tortuesduo\":\"TortuesDuo\",\"trivialversus\":\"TrivialVersus\",\"trivialbr\":\"TrivialBR\",\"trivialparty\":\"TrivialParty\",\"p4\":\"P4\",\"bataillenavale\":\"BatailleNavale\"}\ndictNoms={\"culture\":0,\"divertissement\":1,\"sciences\":2,\"mythologie\":3,\"sport\":4,\"géographie\":5,\"histoire\":6,\"politique\":7,\"art\":8,\"célébrités\":9,\"animaux\":10,\"véhicules\":11,\"streak\":\"Streak\"}\n\nasync def statsTrivial(ctx,turn,react,ligne,bot,option):\n try:\n connexionCMD,curseurCMD=connectSQL(ctx.guild.id,\"Commandes\",\"Guild\",None,None)\n if not react:\n if option==\"trivialperso\":\n table=\"trivial{0}\".format(ctx.author.id)\n db=ctx.author.id\n mode=\"perso\"\n tri=\"expDesc\"\n else:\n db=\"ranks\"\n tri=\"countDesc\"\n if len(ctx.args)>2:\n table=\"trivial{0}\".format(dictNoms[ctx.args[2].lower()])\n mode=ctx.args[2].lower()\n else:\n table=\"trivial12\"\n mode=\"général\"\n\n curseurCMD.execute(\"INSERT INTO commandes VALUES({0},{1},'trivial','{2}','{3}','{4}','{5}','None',1,1,'{6}',False)\".format(ctx.message.id,ctx.author.id,option,db,table,mode,tri))\n ligne=curseurCMD.execute(\"SELECT * FROM commandes WHERE MessageID={0}\".format(ctx.message.id)).fetchone()\n else:\n db,table,mode=ligne[\"Args1\"],ligne[\"Args2\"],ligne[\"Args3\"]\n \n connexion,curseur=connectSQL(\"OT\",db,\"Trivial\",None,None)\n\n pagemax=setMax(curseur.execute(\"SELECT COUNT() as Nombre FROM {0}\".format(table)).fetchone()[\"Nombre\"])\n\n page=setPage(ligne[\"Page\"],pagemax,turn)\n\n embed=await statsEmbed(table,ligne,page,pagemax,option,ctx.guild,bot,False,False,curseur)\n embed.title=\"Classement Trivial Mondial {0}\".format(mode)\n if option==\"trivialperso\":\n user=ctx.guild.get_member(ligne[\"AuthorID\"])\n embed=auteur(user.id,user.name,user.avatar,embed,\"user\")\n else:\n embed=auteur(ctx.guild.get_member(699728606493933650),None,None,embed,\"olbor\")\n embed.colour=0x3498db\n await sendEmbed(ctx,embed,react,True,curseurCMD,connexionCMD,page,pagemax)\n except:\n if react:\n await ctx.reply(embed=embedAssertClassic(\"Impossible de trouver ce que vous cherchez.\\nLe classement cherché n'existe plus ou alors il y a un problème de mon côté.\"))\n else:\n await ctx.reply(embed=embedAssertClassic(\"Impossible de trouver ce que vous cherchez.\\Le classement cherché n'existe pas ou alors il y a un problème de mon côté.\\nVérifiez les arguments de la commande : {0}\".format(ctx.command.usage)))\n","repo_name":"OlborEgamorf/OlborTrack-Bot","sub_path":"Stats/Commandes/Trivial.py","file_name":"Trivial.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"7858218847","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\n\n#first arg is length\n## 2nd arg is frq\n\nfrq = 440\nsr = 44100\nlength = 5.0\n\nif(len(sys.argv)>1):\n\tlength = float(sys.argv[1])\n\t\nif(len(sys.argv)>2):\n\tfrq = float(sys.argv[2])\n\n\n\n\n\n\nt = np.arange(0, length, 1.0/sr)\ns = np.sin(2*np.pi* frq *t)\n\n\nplt.plot(t, s)\nplt.show()\n\n\ns *= 32767\ns = np.int16(s)\nprint(f\"rendering {frq}Hz_{length}Sec.wav\")\nwavfile.write(f\"{frq}Hz_{length}Sec.wav\", sr, s)\n\n\n","repo_name":"Metallicode/python_dsp","sub_path":"renderer/01_generate_sin.py","file_name":"01_generate_sin.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"96"} +{"seq_id":"74574197756","text":"import torch\nfrom torch import nn\nfrom torchtyping import TensorType\nfrom typing_extensions import Literal\nfrom typing import Optional\n\nimport nerfacc\nimport torch\nfrom torch import nn\nfrom torchtyping import TensorType\nfrom typing_extensions import Literal\n\nfrom nerfstudio.cameras.rays import RaySamples\n\n\nclass STDRender(nn.Module):\n \"\"\"Calculate std along the ray.\"\"\"\n\n @classmethod\n def forward(\n cls,\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ) -> TensorType[\"bs\":..., \"num_classes\"]:\n \"\"\"Calculate std along the ray.\"\"\"\n std = torch.std(weights, dim=-2)\n return std\n\n\nclass DepthRenderer(nn.Module):\n \"\"\"Calculate depth along ray.\n\n Depth Method:\n - median: Depth is set to the distance where the accumulated weight reaches 0.5.\n - expected: Expected depth along ray. Same procedure as rendering rgb, but with depth.\n\n Args:\n method: Depth calculation method.\n \"\"\"\n\n def __init__(self, method: Literal[\"median\", \"expected\"] = \"median\") -> None:\n super().__init__()\n self.method = method\n\n def forward(\n self,\n weights: TensorType[..., \"num_samples\", 1],\n ray_samples: RaySamples,\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[..., 1]:\n \"\"\"Composite samples along ray and calculate depths.\n\n Args:\n weights: Weights for each sample.\n ray_samples: Set of ray samples.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of depth values.\n \"\"\"\n if self.method == \"median\":\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n raise NotImplementedError(\n \"Median depth calculation is not implemented for packed samples.\"\n )\n cumulative_weights = torch.cumsum(\n weights[..., 0], dim=-1\n ) # [..., num_samples]\n split = (\n torch.ones((*weights.shape[:-2], 1), device=weights.device) * 0.5\n ) # [..., 1]\n median_index = torch.searchsorted(\n cumulative_weights, split, side=\"left\"\n ) # [..., 1]\n median_index = torch.clamp(median_index, 0, steps.shape[-2] - 1) # [..., 1]\n median_depth = torch.gather(\n steps[..., 0], dim=-1, index=median_index\n ) # [..., 1]\n ray_depth = median_depth\n if self.method == \"expected\":\n eps = 1e-10\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n depth = nerfacc.accumulate_along_rays(\n weights, ray_indices, steps, num_rays\n )\n accumulation = nerfacc.accumulate_along_rays(\n weights, ray_indices, None, num_rays\n )\n depth = depth / (accumulation + eps)\n else:\n depth = torch.sum(weights * steps, dim=-2) / (\n torch.sum(weights, -2) + eps\n )\n\n ray_depth = torch.clip(depth, steps.min(), steps.max())\n\n if True: # FIXME:\n # try:\n factor_rd_2_d = ray_samples.metadata[\"factor_depth_coords\"][:, 0]\n assert ray_depth.shape == factor_rd_2_d.shape\n depth = ray_depth * factor_rd_2_d\n # FIXME -> strange dim\n # import ipdb\n\n # ipdb.set_trace()\n # except:\n # import ipdb\n\n # ipdb.set_trace()\n return depth\n\n raise NotImplementedError(f\"Method {self.method} not implemented\")\n","repo_name":"robincourant/blunf","sub_path":"src/model_components/extra_renderer.py","file_name":"extra_renderer.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"44661416185","text":"from datetime import datetime, timedelta\nuser_birthday_input = input(\"Enter your birth date (format: DD/MM/YYYY): \")\n\n#convert the date string to datetime\n\nbirthday = datetime.strptime(user_birthday_input, \"%d/%m/%Y\")\nprint(birthday)\n# calculate the next birthday number of days\ncurr_date = datetime.now()\nnext_birthday = datetime(curr_date.year, birthday.month, birthday.day)\nif curr_date > next_birthday:\n next_birthday = next_birthday.replace(year = curr_date.year + 1)\n\ntime_till_next_birthday = next_birthday - curr_date\ndays_till_next_birthday = time_till_next_birthday.days\n\n# calculate exact float age\ntime_alive = curr_date - birthday\nexact_age = time_alive.days / 365\n\n#the day of the week the user born\nbirthday_weekday = birthday.strftime(\"%A\")\n\n\n# printing the output\n\nprint(\"Days until next birthday:\", days_till_next_birthday)\nprint(\"Exact float age:\", round(exact_age, 1))\nprint(\"Day of the week of birth:\", birthday_weekday)\n\n\n","repo_name":"zeroxiru/1.Sprints","sub_path":"Sprint106.1/khasish_class/nextBirthday.py","file_name":"nextBirthday.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9329040999","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 26 11:05:21 2018\n\n@author: Irah Wajchman\n\n@description:\n convert a shapefile in WGS84 to csv with headers 'easting northing yield epsg_code'\n easting and northing are in UTM\n uses 'Dry Yield' for yield column\n optional point filtering by kml \n\n@reference: https://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.htmlhttps://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.html\n\"\"\"\n\nfrom osgeo import ogr\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport utm\nimport argparse\n\ndef get_utm(lat, lng):\n east, north, zn_num, zn_ltr = utm.from_latlon(lat,lng)\n if zn_ltr in 'CDEFGHJKLM':\n epsg_code = 32700 + zn_num\n else:\n assert zn_ltr in 'NPQRSTUVWXX'\n epsg_code = 32600 + zn_num\n return {'easting':east,\n 'northing':north,\n 'epsg_code':epsg_code}\n\ndef get_features(layer):\n pt_list=[]\n for ftr in layer:\n ftr = json.loads(ftr.ExportToJson())\n lng, lat = ftr['geometry']['coordinates']\n yld = ftr['properties']['Dry_Yield']\n if yld and yld > 0:\n p = get_utm(lat, lng)\n p.update({\n 'longitude':lng,\n 'latitude':lat,\n 'yield':yld,\n })\n pt_list.append(p)\n df = pd.DataFrame(pt_list)\n return df\n\ndef main(\n shapefile = r'D:\\Steve_SA_winter_2017 Yield Data\\2017 Yield Data\\Clark Bros #30.shp',\n csvfile = r'D:\\Steve_SA_winter_2017 Yield Data\\sample.csv',\n kmlfile = r'D:\\Steve_SA_winter_2017 Yield Data\\sample_kml_fld30_export.kml',\n print_epsg = True,\n show_plot = True,\n ):\n \n # open shapefile\n dataSource = ogr.Open(shapefile, 0)\n layer = dataSource.GetLayer()\n \n # set filter if kml specified\n if kmlfile is not None:\n print('filtering to '+kmlfile)\n kml_datasource = ogr.Open(kmlfile)\n kml_layer = kml_datasource.GetLayer()\n for ftr in kml_layer:\n g = ftr.geometry()\n wkt = g.ExportToWkt()\n break # assume 1 feature\n layer.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))\n \n df = get_features(layer)\n assert len(df.epsg_code.unique()) #all points should be in same UTM zone\n \n if print_epsg:\n print('epsg:{}'.format(df.epsg_code.iloc[0]))\n \n if show_plot:\n plt.figure()\n plt.scatter(df.easting, df.northing)\n plt.show()\n \n df['easting northing yield epsg_code'.split()].to_csv(csvfile)\n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='input args')\n parser.add_argument('shapefile', type=str, help='shapefile (.shp) to convert (WGS84 latitude, longitude)')\n parser.add_argument('csvfile', type=str, help='output csv with 3 columns \"easting northing yield\" (UTM coordinates)')\n parser.add_argument('-kmlfile', type=str, help='filter points inside kml boundary (kml must contain a single polygon in WGS84 coords)')\n parser.add_argument('-print_epsg', action='store_true', help='use swith to show plots')\n parser.add_argument('-show_plot', action='store_true', help='use swith to show plots')\n args = parser.parse_args()\n main(**vars(args))\n\n\n","repo_name":"1rah/Yield-Map-Correlation","sub_path":"convert_shapefile_to_yield_csv.py","file_name":"convert_shapefile_to_yield_csv.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24372890110","text":"import pandas as pd\nimport os\nimport numpy as np\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport re\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\n\nporter = PorterStemmer()\nstop = stopwords.words('english')\n\n\ndef preprocessor(text):\n\ttext = re.sub('<[^>]*>', '', text)\n\temotions = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text)\n\ttext = re.sub('[\\W]+', ' ', text.lower()) + ','.join(emotions).replace('-', '')\n\t# print(emotions)\n\treturn text\n\n\ndef tokenizer_porter(text):\n\treturn [porter.stem(word) for word in text.split()]\n\n\ndef tokenizer(text):\n\treturn text.split()\n\n\nmovie_csv = '/Users/rick/src/ml_data/data/aclImdb_data/movie_data.csv'\ndf = pd.read_csv(movie_csv)\n# test = preprocessor('this :) is :( a test :-) !<>')\n# print(test)\n# test = tokenizer_porter(text='runners like running and thus they run')\n# print(test)\n# test = [w for w in tokenizer_porter('runners like running and thus they run')[-10:] if w not in stop]\n# print(test)\n\ndf['review'] = df['review'].apply(preprocessor)\n\nx_train = df.loc[:25000, 'review'].values\ny_train = df.loc[:25000, 'sentiment'].values\n\nx_test = df.loc[25000:, 'review'].values\ny_test = df.loc[25000:, 'sentiment'].values\n\ntfidf = TfidfVectorizer(strip_accents=None, lowercase=False, preprocessor=None)\n\nparam_grid = [\n\t{'vect__ngram_range': [(1, 1)],\n\t 'vect__stop_words': [stop, None],\n\t 'vect__tokenizer': [tokenizer_porter],\n\t 'clf__penalty': ['l1', 'l2'],\n\t 'clf__C': [1.0, 10.0, 100.0]},\n\n\t{'vect__ngram_range': [(1, 1)],\n\t 'vect__stop_words': [stop, None],\n\t 'vect__tokenizer': [tokenizer_porter],\n\t 'vect__use_idf': [False],\n\t 'vect__norm': [None],\n\t 'clf__penalty': ['l1', 'l2'],\n\t 'clf__C': [1.0, 10.0, 100.0]}\n]\nlr_tfidf = Pipeline([('vect', tfidf),\n\t\t\t\t\t ('clf', LogisticRegression(random_state=0))])\ngs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid,\n\t\t\t\t\t\t scoring='accuracy',\n\t\t\t\t\t\t cv=5, verbose=1,\n\t\t\t\t\t\t n_jobs=-1)\ngs_lr_tfidf.fit(x_train, y_train)\n\nprint('best param set %s' % gs_lr_tfidf.best_params_)\n\nprint('CV Accuracy: %3.f' % gs_lr_tfidf.best_score_)\nclf = gs_lr_tfidf.best_estimator_\nprint('Test Accuracy: %.3f' % (clf.score(x_test, y_test)))","repo_name":"rick00young/machine_learn","sub_path":"text_emotional_analysis/text_emotional_analysis.py","file_name":"text_emotional_analysis.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23913666762","text":"\"\"\"empty message\n\nRevision ID: 14d437c08eac\nRevises: \nCreate Date: 2021-05-17 14:06:09.243056\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '14d437c08eac'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('products',\n sa.Column('id', sa.String(length=56), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('deleted_at', sa.DateTime(), nullable=True),\n sa.Column('title', sa.String(length=100), nullable=False),\n sa.Column('description', sa.String(length=500), nullable=False),\n sa.Column('price_in_cents', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=True)\n op.create_table('users',\n sa.Column('id', sa.String(length=56), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('deleted_at', sa.DateTime(), nullable=True),\n sa.Column('first_name', sa.String(length=100), nullable=False),\n sa.Column('last_name', sa.String(length=500), nullable=False),\n sa.Column('birth_date', sa.Date(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_users_id'), table_name='users')\n op.drop_table('users')\n op.drop_index(op.f('ix_products_id'), table_name='products')\n op.drop_table('products')\n # ### end Alembic commands ###\n","repo_name":"dalmarcogd/mobstore","sub_path":"discounts/src/database/migration/versions/14d437c08eac_.py","file_name":"14d437c08eac_.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7348412830","text":"\r\n#interfacce RESTful\r\n\r\nfrom flask import Flask, request\r\nfrom waitress import serve\r\nimport logging\r\nimport common as com\r\nimport dataLayer as dl\r\nimport neuralNetwork as ann\r\nimport numpy as np\r\n\r\n#inizializza l'interfaccia rest\r\napp = Flask(__name__)\r\n\r\n\r\n#ping\r\n@app.route('/ping', methods=['GET'])\r\ndef ping():\r\n return \"it works!\", 200, {'ContentType':'text/html'} \r\n\r\n\t\r\n#inizializza la struttura del database\r\n@app.route('/init', methods=['GET'])\r\ndef init():\r\n try:\r\n #inizializza il db\r\n dl.clearAndInitDb()\r\n \r\n return \"init ok\", 200, {'ContentType':'text/html'} \r\n\t \r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n\r\n\r\n#ritorna la lista delle aree\r\n@app.route(\"/getAreaList\", methods=['GET'])\r\ndef getAreaList():\r\n\r\n print(\"invocato metodo getAreaList\")\r\n \r\n try:\r\n #ottiene la lista delle aree dal database\r\n areaList = dl.getAreaListFromDb()\r\n \t\t\r\n #trasforma la lista di dictionary in stringa e torna l'output\r\n return com.json2Str(areaList), 200, {'ContentType':'application/json'} \r\n \r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n\r\n\r\n#aggiunge un'area nel db\r\n@app.route(\"/addArea\", methods=['POST'])\r\ndef addArea():\r\n\r\n print(\"invocato metodo addArea\")\r\n \r\n try:\r\n #trasforma il bodyrequest in json\r\n area = com.bodyRequest2Json(request)\r\n \t\t\r\n #aggiunge l'area nel db\r\n dl.addAreaToDb(area)\r\n \r\n #torna la risposta\r\n return \"\", 200, {'ContentType':'application/json'} \r\n \r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n\t\r\n\r\n#rimuove un'area dal db\r\n@app.route(\"/deleteArea\", methods=['POST'])\r\ndef deleteArea():\r\n\r\n print(\"invocato metodo deleteArea\")\r\n \r\n try:\r\n #trasforma il bodyrequest in json\r\n area = com.bodyRequest2Json(request)\r\n \r\n #cancella l'area dal db\r\n dl.deleteAreaToDb(area)\r\n \r\n #torna la risposta\r\n return \"\", 200, {'ContentType':'application/json'} \r\n \r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n \r\n\r\n#acquisisce i dati\r\n@app.route('/sendData/', methods=['POST'])\r\ndef sendData(areaId):\r\n \r\n print(\"invocato metodo sendData con areaId: \", areaId)\r\n \r\n try:\r\n #trasforma il bodyrequest in json\r\n inputJson = com.bodyRequest2Json(request)\r\n #print(inputJson)\r\n \r\n #itera l'array di scansioni, ogni scansione contiene una wifiList da inserire nel db\r\n for wifiList in inputJson:\r\n \r\n #salva le scansioni wifi sul database\r\n dl.saveWifiScansToDb(areaId, wifiList)\r\n \t\r\n \r\n #torna la risposta\r\n return \"\", 200, {'ContentType':'application/json'} \r\n\r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n\t\r\n\r\n#avvia il training\r\n@app.route('/training', methods=['GET'])\r\ndef training():\r\n \r\n print(\"invocato metodo training\")\r\n\t\r\n try:\r\n #costruisce i dati\r\n X, Y = ann.makeDataFromDb()\r\n \r\n #addestra l'ann\r\n ann.buildAndFitAnn(X, Y)\r\n \r\n #torna la risposta\r\n return \"\", 200, {'ContentType':'application/json'} \r\n\r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n \r\n\r\n#effettua una predict\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n \r\n print(\"invocato metodo predict\")\r\n\r\n try:\r\n #trasforma il bodyrequest in json\r\n inputJson = com.bodyRequest2Json(request)\r\n #print(inputJson)\r\n\r\n #inizializza la matrice di input\r\n X = []\r\n \r\n #itera l'array di scansioni, ogni scansione contiene una wifiList da usare per effettuare una previsione\r\n for wifiList in inputJson:\r\n \r\n #accumula i segnali intercettati ottenendo una matrice\r\n x = ann.makeInputMatrixFromScans(wifiList)\r\n if(len(X) == 0):\r\n X = x\r\n else:\r\n X = np.vstack((X, x))\r\n \r\n #effettua una predict dell'area\r\n predictArea = ann.predictArea(X)\r\n \r\n #trasforma il json di risposta in stringa e torna l'output\r\n return com.json2Str(predictArea), 200, {'ContentType':'application/json'}\r\n\r\n except Exception as e:\r\n \r\n logging.exception(\"Got exception\")\r\n return str(e), 500, {'ContentType':'text/html'} \r\n \r\n\t\r\n#main\r\nif __name__ == '__main__':\r\n\r\n '''app.run(\r\n host =com.getCfg('server', 'address'), \r\n port =com.getCfg('server', 'port'), \r\n debug=com.getCfg('server', 'debug')\r\n )'''\r\n \r\n #avvia il server waitress in ascolto\r\n serve(app, \r\n host=com.getCfg('server', 'address'), \r\n port=com.getCfg('server', 'port'))\r\n","repo_name":"MaxDam/FenceIndoor","sub_path":"FenceIndoorServer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"it","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"37524999510","text":"from __future__ import division\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n\nclass anom_detect():\n \"\"\"Anomaly detection for time series data\n\n The method can be used to computed a moving average based on a certain\n window size, using a discrete linear convolution method. Anomalous Points\n can then be found based on a defined singifcance level using an Extreme Studentized\n deviate (ESD) test.\n\n Parameters\n ----------\n method : str\n Method used in linear convolution method for dealing with boundaries\n window : int\n Window size to average data points over for moving average calculation\n max_outliers : int\n Maximum number of outliers to search for, if set to default\n then it will be set to the length of data set. It is recommended\n to limit this value to speed up computation.\n alpha : float\n Significance level for ESD test\n mode : {full, valid, same}, default same\n Method used in linear convolution method for dealing with boundaries\n refer to numpy.convolve for more details regarding methods\n\n Notes\n -----\n The ESD test can only be used if the residuals are aproximately normally\n distributed this condition can be checked using the normality method.\n The 'same' option is used for convolution method by default, this means\n that the window of averaging must intersect with data points with a\n length of >len(lag)/2. This improves dealing with boundary issues.\n\n References\n ----------\n [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm\n [2] https://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html\n \"\"\"\n def __init__(self,method='average',window=5,max_outliers=None,alpha=0.05,mode='same'):\n self.method = method\n self.window = window\n self.max_outliers = max_outliers\n self.alpha = alpha\n self.mode = mode\n\n def moving_average(self,f_t):\n '''A moving average calculation (low pass filter) based on discrete\n linear convolution\n\n A moving average is calculated using discrete linear convolution.\n\n Parameters\n ----------\n f_t : numpy.array\n Data to calculate moving average\n\n Attributes\n ----------\n rolling_mean : numpy.ndarray\n The rolling average (filtered) data calcualated based on the window\n size set and the inputted raw data.\n Notes\n -----\n For the moment the implementation does not handle sparse time series data\n ensure that the data does not have gaps > window or else the averaging will\n be greatly impacted. Fill in missing data if possible.\n '''\n if type(f_t) is not np.ndarray:\n raise TypeError\\\n ('Expected one dimensional numpy array.')\n if f_t.shape[1] != 1:\n raise IndexError\\\n ('Expected one dimensional numpy array, %d dimensions given.' % (f_t.shape[1]))\n\n f_t = f_t.flatten()\n window = self.window\n mode = self.mode\n g_t = np.ones(int(window))/float(window)\n # Deal with boundaries with atleast lag/2 day window\n #mode = 'same'\n rolling_mean = np.convolve(f_t,g_t,mode)\n self.rolling_mean = rolling_mean\n return rolling_mean\n\n def deviation_stats(self,df):\n '''Calculates standard deviation statistics for data\n\n This function calculates the standard deviation of the dataset\n and adds the stationary standard deviation (1 and 2 sigma) to the\n moving average. For displaying the standard deviation on the Anomaly\n plot.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame containing timeseries data points\n\n Attributes\n ----------\n df : pandas.DataFrame\n DataFrame containing columns with original data, rolling average\n and standard deviation for 1 and 2 sigma above and below the\n rolling average.\n Notes\n -----\n For the moment no rolling standard deviation is implemented. The\n stationary standard deviation calculated is before removal of Anomalous\n points, so will be higher than the actual std. deviation if these where\n removed.\n '''\n\n df['mean_count'] = self.rolling_mean\n df['residual'] = df.iloc[:,0] - self.rolling_mean\n std_resid = np.std(df.residual)\n df['pos_std'] = df.mean_count + std_resid\n df['neg_std'] = df.mean_count - std_resid\n df['pos_std_2'] = df.mean_count + 2*std_resid\n df['neg_std_2'] = df.mean_count - 2*std_resid\n return df\n\n def normality(self):\n \"\"\"\n Plots the distribution and probability plot for the Residuals\n\n These two plots are used for a sanity check to confirm that the\n residual between the actual data and the moving average are\n approximately normally distributed.\n This is important as the ESD test can only be used if the data is\n approximately normally distributed. Refer to notes and References\n for more details.\n \"\"\"\n if self.results is not None:\n df = self.results\n fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(10, 6))\n x = df.residual.values\n re = stats.probplot(x, plot=ax2)\n ax1.hist(df.residual,bins=100);\n ax1.set_title('Distribution of Residuals');\n else:\n raise NameError\\\n ('The moving average for the data has not yet been computed. Run moving_averge or evaluate prior to normality.')\n\n\n def esd_test(self,df_in):\n '''Implementation of Generalized ESD test for Outliers\n\n An extension to Grubbs test to k unknown outliers, all that requires\n specified is the maximum number of outliers and the confidence\n interval. The data must be approximately normal to apply the test.\n From Rosner, 1983 [1].\n\n Parameters\n ----------\n df_in : list\n Data to be tested for outliers, must be approximately normal\n\n Attributes\n ----------\n ESD_stats : Pandas.DataFrame\n Dataframe containing the ESD test statistic and Critical value.\n outliers : list\n List containing tuple of dataframe index of outlier & the\n x value found to be anomolous.\n\n References\n ----------\n [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm\n '''\n ind = list(df_in.index)\n x = list(df_in.values)\n outliers = []\n res_lst = [] # ESD Test Statistic for each k anomaly\n lam_lst = [] # Critical Value for each k anomaly\n n = len(x)\n\n if self.max_outliers is None:\n self.max_outliers = len(x)\n\n for i in range(1,self.max_outliers+1):\n x_mean = np.mean(x)\n x_std = np.std(x,ddof=1)\n res = abs((x - x_mean) / x_std)\n max_res = np.max(res)\n max_ind = np.argmax(res)\n p = 1 - self.alpha / (2*(n-i+1))\n t_v = stats.t.ppf(p,(n-i-1)) # Get critical values from t-distribution based on p and n\n lam_i = ((n-i)*t_v)/ np.sqrt((n-i-1+t_v**2)*(n-i+1)) # Calculate critical region (lambdas)\n res_lst.append(max_res)\n lam_lst.append(lam_i)\n if max_res > lam_i:\n outliers.append((ind.pop(max_ind),x.pop(max_ind)))\n # Record outlier Points\n outliers_index = [x[0] for x in outliers]\n\n ESD_stats = pd.DataFrame()\n ESD_stats['ESD Test Statistic'] = res_lst\n ESD_stats['Critical Value'] = lam_lst\n self.ESD_stats = ESD_stats\n\n return outliers_index\n\n def ESD_plot(self):\n \"\"\"\n esd_plot : bool\n This shows the plot of the critical value and test statistic\n against number of anomalies removed, showing based on an alpha\n the number of anomalies to be removed to be confident of the data.\n \"\"\"\n # Plot will show the point of intersection between critical value\n # and ESD test statistic\n self.ESD_stats.plot()\n\n def plot(self,data_label=None,left=None,right=None,bottom=None,top=None):\n '''Anomalous datapoint plotting method\n\n This can be used to plot a visualisation of the data with the moving\n average, standard deviations and the anomalous data points marked.\n\n Parameters\n ----------\n data_label : str\n Raw data series name, to be displayed on y-axis and legend.\n left : int\n xlimit for left limit of plot x-axis\n right : int\n xlimit for right limit of plot x-axis\n bottom : int\n ylimit for bottom limit of plot y-axis\n top : int\n ylimit for top limit of plot y-axis\n '''\n df = self.results\n anoma_points = self.anoma_points\n fig, ax1 = plt.subplots(1, 1,figsize=(15, 8))\n ax1.plot(list(df.index),df.iloc[:,0],'b.',label=data_label)\n ax1.plot(list(df.index),df.mean_count,'r',label='Moving Average')\n ax1.fill_between(df.index,df.pos_std,df.neg_std,color='red',alpha=0.3,label='1Sigma')\n ax1.fill_between(df.index,df.pos_std_2,df.neg_std_2,color='red',alpha=0.1,label='2Sigma')\n ax1.plot(list(anoma_points.index),anoma_points.iloc[:,0],'r*',label='Anomalous Points')\n ax1.set_xlabel('time')\n ax1.set_ylabel(data_label)\n ax1.set_title('Data with Anomalies starred')\n ax1.set_xlim(left=left,right=right)\n ax1.set_ylim(bottom=bottom,top=top)\n ax1.legend();\n\n def evaluate(self,data,anom_detect=True):\n '''Anomalous datapoint evaluation method\n\n This method takes a timeseries data set in and calculates the moving\n average and if desired identifies the anomalous points in the data set\n within the significance level set.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Raw data with index as the time component and one dimensional\n array for values containing the raw data points.\n anom_detect : bool\n Identify anomalous data points, default is true, if set to false\n only the moving average for the data will be calculated.\n\n Attributes\n ----------\n anoma_points : pandas.DataFrame\n DataFrame containing the anomalous datapoints identified.\n '''\n # Check data is in the right format and right order and dimension\n # Check for example if there are large gaps in the data.\n df = pd.DataFrame(data)\n df.sort_index()\n\n if df.shape[1] != 1:\n raise IndexError\\\n ('Insufficient dimensions provided, input data needs time and value columns.')\n\n if self.method == 'average':\n data_points = df.values\n self.moving_average(data_points)\n df = self.deviation_stats(df)\n self.results = df\n\n if anom_detect:\n outliers_index = self.esd_test(df[['residual']])\n anoma_points = pd.DataFrame(df[['sunspots']].iloc[outliers_index,0].sort_index())\n self.anoma_points = anoma_points\n return anoma_points\n","repo_name":"HamishWoodrow/anomaly_detection","sub_path":"anom_detect.py","file_name":"anom_detect.py","file_ext":"py","file_size_in_byte":11440,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"96"} +{"seq_id":"19391171515","text":"import functools\nimport streamlit as st\nfrom streamlit.report_thread import get_report_ctx\nimport time\n\n\ndef fancy_cache(func=None, ttl=600, unique_to_session=False, **cache_kwargs):\n \"\"\"A fancier cache decorator which allows items to expire after a certain time\n as well as promises the cache values are unique to each session.\n Parameters\n ----------\n func : Callable\n If not None, the function to be cached.\n ttl : Optional[int]\n If not None, specifies the maximum number of seconds that this item will\n remain in the cache.\n unique_to_session : boolean\n If so, then hash values are unique to that session. Otherwise, use the default\n behavior which is to make the cache global across sessions.\n **cache_kwargs\n You can pass any other arguments which you might to @st.cache\n \"\"\"\n # Support passing the params via function decorator, e.g.\n # @fancy_cache(ttl=10)\n if func is None:\n return lambda f: fancy_cache(\n func=f, ttl=ttl, unique_to_session=unique_to_session, **cache_kwargs\n )\n\n # This will behave like func by adds two dummy variables.\n dummy_func = st.cache(\n func=lambda ttl_token, session_token, *func_args, **func_kwargs: func(\n *func_args, **func_kwargs\n ),\n **cache_kwargs\n )\n\n # This will behave like func but with fancy caching.\n @functools.wraps(func)\n def fancy_cached_func(*func_args, **func_kwargs):\n # Create a token which changes every ttl seconds.\n ttl_token = None\n if ttl is not None:\n ttl_token = int(time.time() / ttl)\n\n # Create a token which is unique to each session.\n session_token = None\n if unique_to_session:\n ctx = get_report_ctx()\n session_token = ctx.session_id\n\n # Call the dummy func\n return dummy_func(ttl_token, session_token, *func_args, **func_kwargs)\n\n return fancy_cached_func\n","repo_name":"maxibor/pydamage_poster","sub_path":"fancy_cache.py","file_name":"fancy_cache.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"28767214989","text":"# ROCK-PAPER-SCISSORS\nfrom random import choice\n\ngame_choices = ['rock', 'paper', 'scissors']\n\n\ndef get_computer_choice():\n \"\"\"\n Function to get the computer's choice from the R-P-S choices at random.\n\n :return: comp_choice \n \"\"\"\n comp_choice = choice(game_choices)\n\n # print(f\"Computer Chose: {comp_choice}.\")\n\n return comp_choice\n\n\ndef get_player_choice():\n \"\"\"\n Function to get the user's choice input.\n The choice selected should be case-insensitive.\n\n :return: user_choice \n \"\"\"\n\n user_choice = ''\n\n while user_choice.lower() not in game_choices:\n user_choice = input(\"\\nInput weapon name:\\t\")\n\n user_choice = user_choice.lower()\n\n # print(f\"You have selected: {user_choice}\")\n\n return user_choice\n\n\ndef game():\n \"\"\"\n Function to play the R-P-S game 5 times to keep score\n Rules are:\n - Rock wins against scissors\n - paper wins against rock\n - scissors wins against paper\n \"\"\"\n\n comp_score = player_score = 0\n\n msg = \" Rock / Paper / Scissors. Choose your weapon. \"\n\n print(f\"{msg:-^100}\")\n\n for round_played in range(5):\n computer = get_computer_choice()\n player = get_player_choice()\n\n if computer == player:\n print(f\"It's a tie!\")\n\n elif computer == 'rock' and player == 'paper':\n print(f\"\\nYou win! Paper beats Rock!\")\n player_score += 1\n\n elif computer == 'rock' and player == 'scissors':\n print(f\"\\nYou loose! Rock beats Scissors!\")\n comp_score += 1\n\n elif computer == 'paper' and player == 'rock':\n print(f\"\\nYou loose! Paper beats Rock!\")\n comp_score += 1\n\n elif computer == 'paper' and player == 'scissors':\n print(f\"\\nYou win! Scissors beats Paper!\")\n player_score += 1\n\n elif computer == 'scissors' and player == 'paper':\n print(f\"\\nYou loose! Scissors beats Paper!\")\n comp_score += 1\n\n elif computer == 'scissors' and player == 'rock':\n print(f\"\\nYou win! Rock beats scissors!\")\n player_score += 1\n\n print(f\"The computer selected: {computer}\")\n print(f\"You selected: {player}\")\n\n print(f\"\\nComp Score: {comp_score}\\nPlayer Score: {player_score}\")\n\n\ngame()\n# print(get_computer_choice())\n# print(get_player_choice())\n","repo_name":"Mwanaidi-M/Python-Programming-Practice","sub_path":"RockPaperScissors/rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12593665354","text":"from turtle import Turtle\n\nSNAKE_STARTING_POSITIONS_LIST = [(0, 0), (-20, 0), (-40, 0)]\nMOVE_DISTANCE = 20\n\n\nclass Snake():\n def __init__(self) -> None:\n self.positions_list = []\n\n self.snakes_list = []\n\n self.snake_heading = []\n\n self.set_snake_head_starting_position()\n\n def set_snake_head_starting_position(self):\n for position in range(0, len(SNAKE_STARTING_POSITIONS_LIST)):\n new_segment = Turtle()\n new_segment.shape(\"square\")\n new_segment.color(\"white\")\n new_segment.penup()\n new_segment.speed(\"fastest\")\n new_segment.setheading(0)\n new_segment.setposition(SNAKE_STARTING_POSITIONS_LIST[position])\n self.snakes_list.append(new_segment)\n for segment in range(0, len(self.snakes_list)):\n self.positions_list.append(self.snakes_list[segment].position())\n self.snake_heading.append(self.snakes_list[0].heading())\n\n\n def extend_snake(self):\n new_segment = Turtle()\n new_segment.shape(\"square\")\n new_segment.color(\"white\")\n new_segment.penup()\n new_segment.speed(\"fastest\")\n print(new_segment.position())# Test\n new_segment.setposition(self.positions_list[-1])\n print(new_segment.position())# Test\n self.snakes_list.append(new_segment)\n self.positions_list.append(self.snakes_list[len(self.snakes_list) - 1].position())\n print(new_segment.position())# Test\n\n def reset(self):\n for segment in self.snakes_list:\n segment.hideturtle()\n segment.goto(1000, 1000)\n self.snakes_list.clear()\n self.set_snake_head_starting_position()\n\n\n def move_snake_forward(self):\n move_counter = 0\n self.snakes_list[0].forward(MOVE_DISTANCE)\n for segment in range(1, len(self.snakes_list)):\n self.snakes_list[segment].setposition(self.positions_list[move_counter])\n move_counter += 1\n for segment in range(0, len(self.snakes_list)):\n self.positions_list[segment] = self.snakes_list[segment].position()\n #print(self.positions_list)\n\n\n def turn_left(self):\n \"\"\"Turns the head of the snake by 90 degrees leftwards in relation to the heads current heading\"\"\"\n self.snake_heading[0] += 90.0\n self.snakes_list[0].setheading(self.snake_heading[0])\n\n\n def turn_right(self):\n \"\"\"Turns the head of the snake by 90 degrees rightwards in relation to the heads current heading\"\"\"\n self.snake_heading[0] -= 90.0\n self.snakes_list[0].setheading(self.snake_heading[0])\n","repo_name":"G8w4y/Small-Projects","sub_path":"Small-Projects-master/100 Days of Code Challenge/Day20-start/Snake-game/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12013714802","text":"\"\"\"kessk_web URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom device.views import BindView, bindDevice, ccnameDevice, bindShareDevice\nfrom user.views import UserIndex, get_share_qrcode, UserShare, get_device_users, unbinding_device, upgrade_device_done, change_user_index\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('device/bind/',BindView.as_view()),\n path('user/index/',UserIndex.as_view()),\n path('user/share/',UserShare.as_view()),\n path('api/device/bind/',bindDevice),\n path('api/device/ccname/',ccnameDevice),\n path('api/user/share/',get_share_qrcode),\n path('api/user/share/bind/',bindShareDevice),\n path('api/device/bind/log/',get_device_users),\n path('api/device/unbind/',unbinding_device),\n path('api/device/update/',upgrade_device_done),\n path('api/user/ccindex/',change_user_index),\n]\n","repo_name":"yungs2017/kessk-switch","sub_path":"kessk_web/kessk_web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"96"} +{"seq_id":"17513505783","text":"from flask import Flask, render_template, jsonify, url_for\r\nfrom os import path\r\nimport webview\r\nimport requests\r\nimport pokepy\r\nimport json\r\nimport utilities\r\nimport random\r\nfrom sys import argv\r\n\r\n\r\napp = Flask(__name__)\r\napi = pokepy.V2Client()\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n types = ['Normal', 'Fire', 'Water', 'Electric', 'Grass', 'Ice', 'Fighting', 'Poison',\r\n 'Ground', 'Flying', 'Psychic', 'Bug', 'Rock', 'Ghost', 'Dragon', 'Dark', 'Steel', 'Fairy']\r\n if path.exists('pokedex.json'):\r\n with open('pokedex.json', 'r+') as pokedex:\r\n context = json.load(pokedex)\r\n else:\r\n context = pokedex().get_json()\r\n with open('pokedex.json', 'w+') as pokedex:\r\n json.dump(context, pokedex)\r\n return render_template('index.html', context=context, types=types)\r\n\r\n\r\n@app.route('/pokemon/')\r\ndef pokemon(name):\r\n data = utilities.pokemon_data(name, api)\r\n return render_template('pokemon.html', forms=data)\r\n\r\n\r\n@app.route('/pokedex')\r\ndef pokedex():\r\n species_endpoint = 'https://pokeapi.co/api/v2/pokemon-species?limit=893'\r\n context = []\r\n response = requests.get(species_endpoint)\r\n species_list = [object['name'] for object in response.json()['results']]\r\n for species in species_list:\r\n default_form = api.get_pokemon_species(\r\n species).varieties[0].pokemon.name\r\n data = api.get_pokemon(default_form)\r\n species_dict = {\r\n 'name': species.capitalize(),\r\n 'artwork': f'https://img.pokemondb.net/artwork/{default_form}.jpg',\r\n 'types': [type_resource.type.name for type_resource in data.types],\r\n 'id': '{:0>3}'.format(data.id)\r\n }\r\n context.append(species_dict)\r\n with open('pokedex.json', 'w+') as pokedex:\r\n json.dump(context, pokedex)\r\n return jsonify(context)\r\n\r\n\r\ndef test():\r\n return 'rgb(255,200,4)'\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if argv[1] == 'web':\r\n webview.create_window('Pokédex', app, fullscreen=True, resizable=False)\r\n webview.start()\r\n elif argv[1] == 'browser':\r\n app.run()\r\n","repo_name":"Archesper/Pokedex","sub_path":"pokédex/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32458551394","text":"\"\"\"Data generators for OpenWebText data-set.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport glob\nimport random\nimport traceback\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import registry\nfrom . import lookahead_tensorflow as lookahead_tf\nfrom . import largebatch_optim\nfrom tensor2tensor.models import transformer\n\nimport tensorflow as tf\n\n@registry.register_optimizer\ndef lookahead(learning_rate, hparams):\n \"\"\"By default, use LA_Adam with la_steps=10 and la_alpha=0.5.\"\"\"\n optim = tf.contrib.opt.LazyAdamOptimizer(\n learning_rate,\n beta1=hparams.optimizer_adam_beta1,\n beta2=hparams.optimizer_adam_beta2,\n epsilon=hparams.optimizer_adam_epsilon)\n return lookahead_tf.LookaheadOptimizer(optim, 10)\n\n\n@registry.register_optimizer\ndef largebatch(learning_rate, hparams):\n \"\"\" By default, use Adam with update_step=2. (Doubles the batch size) \"\"\"\n optim = tf.contrib.opt.LazyAdamOptimizer(\n learning_rate,\n beta1=hparams.optimizer_adam_beta1,\n beta2=hparams.optimizer_adam_beta2,\n epsilon=hparams.optimizer_adam_epsilon)\n return largebatch_optim.LargebatchOptimizer(optim, 2)\n\n\n@registry.register_hparams\ndef transformer_gpt2():\n \"\"\"\n HParams for training gpt2 on OpenWebText.\n For single node (4 GPUs), batch_size = 2048 + optimizer_multistep_accumulate_steps = 64\n For 2 nodes distributed training (8 GPUs), batch_size = 2048 + optimizer_multistep_accumulate_steps = 32\n \"\"\"\n hparams = transformer.transformer_lm_tpu_0()\n hparams.num_heads = 12 # Heads are expensive on TPUs.\n hparams.batch_size = 2048 #1024\n hparams.filter_size = 3072\n # hparams.learning_rate_constant = 2.5\n hparams.hidden_size = 768\n hparams.learning_rate_warmup_steps = 2000\n hparams.learning_rate_minimum = 0.0\n hparams.learning_rate_cosine_cycle_steps = 2000000\n hparams.learning_rate_constant = 2.5e-4\n hparams.learning_rate_schedule = \"constant*linear_warmup*cosdecay\" #\"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"\n hparams.max_length = 1024\n hparams.optimizer = \"multistep_adam\"\n hparams.optimizer_multistep_accumulate_steps = 32 #64 #128\n hparams.num_hidden_layers = 12\n return hparams\n\n\n@registry.register_hparams\ndef transformer_gpt2_medium():\n \"\"\"HParams for training gpt2_medium on OpenWebText.\"\"\"\n hparams = transformer.transformer_lm_tpu_0()\n hparams.num_heads = 16 # Heads are expensive on TPUs.\n hparams.batch_size = 2048\n hparams.filter_size = 3072\n hparams.hidden_size = 1024\n hparams.learning_rate_warmup_steps = 2000\n hparams.learning_rate_minimum = 0.0\n hparams.learning_rate_cosine_cycle_steps = 2000000\n hparams.learning_rate_constant = 2.5e-4\n hparams.learning_rate_schedule = \"constant*linear_warmup*cosdecay\" #\"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"\n hparams.max_length = 1024\n hparams.optimizer = \"multistep_adam\"\n hparams.optimizer_multistep_accumulate_steps = 64\n hparams.num_hidden_layers = 24\n return hparams\n\n\n@registry.register_hparams\ndef transformer_gpt2_large():\n \"\"\"HParams for training gpt2_large on OpenWebText.\"\"\"\n hparams = transformer.transformer_lm_tpu_0()\n hparams.num_heads = 20 # Heads are expensive on TPUs.\n hparams.batch_size = 2048 \n hparams.filter_size = 3072\n hparams.hidden_size = 1280\n hparams.learning_rate_warmup_steps = 2000\n hparams.learning_rate_minimum = 0.0\n hparams.learning_rate_cosine_cycle_steps = 2000000\n hparams.learning_rate_constant = 2.5e-4\n hparams.learning_rate_schedule = \"constant*linear_warmup*cosdecay\" #\"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"\n hparams.max_length = 1024\n hparams.optimizer = \"multistep_adam\"\n hparams.optimizer_multistep_accumulate_steps = 64 \n hparams.num_hidden_layers = 36\n return hparams\n\n\n@registry.register_hparams\ndef transformer_gpt2_xlarge():\n \"\"\"HParams for training gpt2_extra_large on OpenWebText.\"\"\"\n hparams = transformer.transformer_lm_tpu_0()\n hparams.num_heads = 25 # Heads are expensive on TPUs.\n hparams.batch_size = 2048\n hparams.filter_size = 3072\n hparams.hidden_size = 1600\n hparams.learning_rate_warmup_steps = 2000\n hparams.learning_rate_minimum = 0.0\n hparams.learning_rate_cosine_cycle_steps = 2000000\n hparams.learning_rate_constant = 2.5e-4\n hparams.learning_rate_schedule = \"constant*linear_warmup*cosdecay\" #\"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"\n hparams.max_length = 1024\n hparams.optimizer = \"multistep_adam\"\n hparams.optimizer_multistep_accumulate_steps = 64\n hparams.num_hidden_layers = 48\n return hparams\n\n\nsplit_files = None\n\ndef train_dev_split(tmp_dir, split, ratio=0.9, percentage=0.5):\n \"\"\"Split the data into training and validation set.\"\"\"\n global split_files\n if not split_files:\n if os.path.isfile(os.path.join(tmp_dir, 'training_set.txt')) and os.path.isfile(os.path.join(tmp_dir, 'val_set.txt')):\n tf.logging.info(\"Loading pre-generated splits...\")\n f = open(os.path.join(tmp_dir, 'training_set.txt'), 'r')\n _train_data_filenames = f.read().split('\\n')\n tf.logging.info(\"Using %d out of %d files.\", round(len(_train_data_filenames) * percentage), len(_train_data_filenames))\n _train_data_filenames = _train_data_filenames[:round(len(_train_data_filenames) * percentage)]\n f = open(os.path.join(tmp_dir, 'val_set.txt'), 'r')\n _dev_data_filenames = f.read().split('\\n')\n else:\n tf.logging.info(\"Generating train_val split...\")\n dataset_filenames = glob.glob(os.path.join(tmp_dir, '*', '*.txt'))\n random.shuffle(dataset_filenames)\n training_num = round(len(dataset_filenames) * 0.9)\n _train_data_filenames = dataset_filenames[:training_num]\n _dev_data_filenames = dataset_filenames[training_num:]\n split_files = {\n problem.DatasetSplit.TRAIN: _train_data_filenames,\n problem.DatasetSplit.EVAL: _dev_data_filenames,\n }\n return split_files[split]\n\n\n# def _train_data_filenames(tmp_dir):\n# # return [\n# # os.path.join(tmp_dir,\n# # \"openwebtext-language-modeling\",\n# # \"training-monolingual.bpe.shuffled\",\n# # \"text.en-%05d-of-00100\" % i) for i in range(1, 100)\n# # ]\n# file_list = glob.glob(os.path.join(tmp_dir, '*', '*.txt'))\n# return [\n# os.path.join(tmp_dir,\n# \"openwebtext-language-modeling\",\n# \"training-monolingual.bpe.shuffled\",\n# \"text.en-%05d-of-00100\" % i) for i in range(1, 100)\n# ]\n\n\n# def _dev_data_filenames(tmp_dir):\n# return [os.path.join(tmp_dir,\n# \"openwebtext-language-modeling\",\n# \"heldout-monolingual.bpe.shuffled\",\n# \"text.en.heldout-00000-of-00050\")]\n\n\n@registry.register_problem\nclass LanguagemodelOpenWebText(text_problems.Text2SelfProblem):\n \"\"\"\n A language model on the OpenWebText corpus.\n \"\"\"\n\n # @property\n # def approx_vocab_size(self):\n # # Only for VocabType.SUBWORD\n # return 50256\n\n @property\n def dataset_splits(self):\n \"\"\"Splits of data to produce and number of output shards for each.\"\"\"\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 512,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 32,\n }]\n\n @property\n def vocab_type(self):\n return text_problems.VocabType.BYTE_PAIR\n\n def is_generate_per_split(self):\n return True\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n # Get the data filenames and shuffle for train/val split.\n # dataset_filenames = glob.glob(os.path.join(tmp_dir, '*', '*.txt'))\n # random.shuffle(dataset_filenames)\n # training_num = round(len(dataset_filenames) * 0.9)\n # train_data_filenames = dataset_filenames[:training_num]\n # dev_data_filenames = dataset_filenames[training_num:]\n # split_files = {\n # problem.DatasetSplit.TRAIN: train_data_filenames,\n # problem.DatasetSplit.EVAL: dev_data_filenames,\n # }\n # Need to make sure the data has been downloaded and prepared!\n # _maybe_download_corpus(tmp_dir)\n # original_vocab = _original_vocab(tmp_dir)\n\n # Load the byte_pair_encoder.\n # byte_pair_encoder = text_encoder.BytePairEncoder(os.path.join(data_dir, 'encoder.json'), os.path.join(data_dir, 'vocab.bpe'))\n files = train_dev_split(tmp_dir, dataset_split)\n # files = split_files[dataset_split]\n for filepath in files:\n tf.logging.info(\"filepath = %s\", filepath)\n try:\n for line in tf.gfile.Open(filepath):\n # txt = _replace_oov(original_vocab, text_encoder.native_to_unicode(line))\n if line != '\\n':\n encoded_txt = line\n yield {\"targets\": encoded_txt}\n except Exception:\n traceback.print_exc()\n continue\n","repo_name":"Edwardlzy/NLP_Project","sub_path":"tensor2tensor/tensor2tensor/data_generators/openwebtext/openwebtext.py","file_name":"openwebtext.py","file_ext":"py","file_size_in_byte":9110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14088215906","text":"from typing import *\n\nclass Solution:\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n r_map = {0:-1}\n t_sum = 0\n for i in range(len(nums)):\n t_sum += nums[i]\n rem = t_sum % k\n if(rem in r_map):\n if i - r_map[rem] > 1:\n return True\n else:\n r_map[rem] = i\n\n return False\n \n\nif __name__ == '__main__':\n soln = Solution()\n print(soln.checkSubarraySum([23,2,4,6,6], 7))","repo_name":"shrsulav/algo_leetcode","sub_path":"523_subarray_sum.py","file_name":"523_subarray_sum.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38432180492","text":"\"\"\" Junction is a NAIS component that routes, multiplexes and eventually\n transforms messages between applications.\n\"\"\"\nimport asyncio\nimport signal\nimport socket\nimport logging\nimport sys\nimport traceback\nimport concurrent\nimport websockets\n\nfrom hbmqtt.client import MQTTClient\nfrom hbmqtt.mqtt.constants import QOS_1\n\nfrom pynais import SYNC_START_B, SYNC_END_B, DLINE\nfrom pynais.nais import is_protobuf, entity_from, is_ack, marshall, unmarshall, ConnectionClosed\nfrom pynais.packet import Packet\n\nA_STREAM = 0 # ascii stream\nB_STREAM = 1 # binary stream\nLEN_ENTER = 2 # reading len byte state\nLEN_DONE = 3\n\n#\n# binary packet format\n#\n# |SYNC_START(8)|\n# PKT_TYPE(8)|SLINE(8)|DLINE(8)|FLAGS(8)|PAYLOAD_LEN(16)|PAYLOAD\n# |SYNC_END(8)|\n#\n# SYNC_START = 0x1E\n# SYNC_END = 0x17\n\n# ascii strings are messages \\n terminated\n\n# json messages\n\n# PKT_TYPE values\nPROTOBUF_T = 0x01\n\nLOG = logging.getLogger(__name__)\nLOG.setLevel(logging.DEBUG)\n\ndef my_ip_address():\n \"\"\"return the public ip of the local host\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n return sock.getsockname()[0]\n\nclass Channel:\n \"\"\"encapsulate the (reader, writer) stream pair\n \"\"\"\n\n def __init__(self, reader, writer):\n self.reader = reader\n self.writer = writer\n\n def close(self):\n \"\"\"close the channel\n \"\"\"\n self.writer.close()\n\ndef pretty_print_peers(peers):\n \"\"\"return the peers list formatted as a readable string\n \"\"\"\n return [\"{}\".format(peer) for peer in peers]\n\n\nclass MessageTape:\n \"\"\"state machine for processing input bytes\n \"\"\"\n def __init__(self):\n self.expected_len = 65536\n self.cursor = 0\n self.mult = 1\n self.sts_len = 0\n self.hlen = 6\n self.plen = 0\n self.sts = A_STREAM\n self.msg = bytearray()\n\n def add_char(self, channel):\n \"\"\"process an input byte.\n\n Returns:\n bool: False if the input byte is the last of a complete record\n \"\"\"\n if self.sts == B_STREAM:\n self.cursor += 1\n if self.cursor == 5 or self.sts_len == LEN_ENTER:\n len_byte = int.from_bytes(channel, 'little')\n\n if len_byte & 0x80:\n self.plen += self.mult * (len_byte & 0x7f)\n self.mult *= 128\n self.hlen += 1\n self.sts_len = LEN_ENTER\n else:\n self.expected_len = self.hlen + self.plen + self.mult*len_byte\n self.sts_len = LEN_DONE\n\n #LOG.debug(\"read |%r|\", channel)\n self.msg += channel\n\n if channel == SYNC_START_B:\n self.cursor = 0\n self.plen = 0\n self.sts = B_STREAM\n self.sts_len = 0\n\n elif self.sts == B_STREAM and self.cursor == self.expected_len:\n if channel == SYNC_END_B:\n self.sts = A_STREAM\n return False\n elif self.sts == A_STREAM and channel == b'\\n':\n return False\n\n return True\n\n# def message(self):\n# return self.msg\n\ndef receive_from(reader, expect_json=False):\n \"\"\"return a message from a channel\n\n Detect the type of message, currently protobuf encoded or ascii\n strings \\n terminated.\n If expect_json is True return the received json message with\n a leap of faith.\n\n \"\"\"\n\n parser = MessageTape()\n\n if expect_json:\n msg = reader(2048)\n return msg\n\n read_tape = True\n\n while read_tape:\n\n channel = reader(1)\n if (channel == b''):\n raise ConnectionClosed(\"connection closed: {}\".format(reader))\n\n read_tape = parser.add_char(channel)\n\n return parser.msg\n\n#pylint: disable=C0103\nclass create_connection:\n \"\"\"wrapper class around a network socket\n \"\"\"\n def __init__(self, address):\n self.address = address\n self.sock = None\n\n def __enter__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):\n self.sock = socket.create_connection(\n self.address, timeout, source_address)\n return self\n\n def __exit__(self, *args):\n self.sock.close()\n\n def proto_send(self, msg):\n \"\"\"send a protobuf `msg`\n \"\"\"\n self.sock.send(marshall(msg))\n\n def ascii_send(self, msg):\n \"\"\"send an ascii `msg`\n \"\"\"\n if type(msg) == str:\n msg = msg.encode('utf-8')\n\n n = self.sock.send(msg)\n\n def ascii_receive(self, expect_json=False):\n \"\"\"block until an ascii message is received\n\n An ascii message is \\n terminated\n\n Returns:\n bytes: a \\n terminated ascii message\n \"\"\"\n while True:\n msg = receive_from(self.sock.recv, expect_json)\n\n if not is_protobuf(msg):\n return msg\n\n def proto_receive(self):\n \"\"\"block until a protobuf message is received\n\n Returns:\n object: a protobuf instance\n \"\"\"\n while True:\n msg = receive_from(self.sock.recv)\n\n if is_protobuf(msg):\n return unmarshall(msg)\n\n#pylint: enable=C0103\n\n\n\nasync def connect(host, port):\n \"\"\"coroutine connect as soon as the server is available. return a Channel\n \"\"\"\n while True:\n try:\n reader, writer = await asyncio.open_connection(host, port)\n #await asyncio.sleep(0.1)\n return Channel(reader, writer)\n except ConnectionRefusedError:\n pass\n\n\nasync def wsconnect(host, port):\n \"\"\"coroutine connect to a websocket as soon as the server is available. return a Channel\n \"\"\"\n while True:\n try:\n sock = await websockets.client.connect('ws://{}:{}'.format(host, port))\n LOG.debug('connected to ws://%s:%d', host, port)\n return sock\n except ConnectionRefusedError:\n pass\n\n\nasync def proto_send(channel, message, wait_ack=False):\n \"\"\"coroutine send a protobuf encoded ``message`` and optionally wait for an ack\n\n Raises:\n InvalidMessage: if ``message`` is not marshallable\n \"\"\"\n return await msg_send(channel, marshall(message), wait_ack)\n\n\nasync def msg_send(channel, message, wait_ack=False):\n \"\"\"coroutine send an array of bytes or a string ``message``\n \"\"\"\n if isinstance(message, str):\n message = message.encode('utf-8')\n channel.writer.write(message)\n while wait_ack:\n data = await msg_receive(channel)\n\n LOG.debug(\"msg_send (waiting for ack): recv |%r|\", data)\n response = entity_from(data)\n if not is_ack(response, message):\n LOG.debug(\"ACK expected, ignoring msg: |%s|\", response)\n else:\n wait_ack = False\n\n\nasync def proto_receive(channel, expect_json=False):\n \"\"\"coroutine block until a protobuf message is received\n \"\"\"\n while True:\n msg = await msg_receive(channel, expect_json)\n if is_protobuf(msg):\n return unmarshall(msg)\n\n\nasync def msg_receive(channel, expect_json=False):\n \"\"\"coroutine block until a message string/json/protobuf is received\n\n Detect the type of message, currently protobuf encoded or ascii\n strings \\n terminated.\n If expect_json is True return the received json message with\n a leap of faith.\n\n \"\"\"\n parser = MessageTape()\n\n if expect_json:\n msg = await channel.reader.read(2048)\n return msg\n\n read_tape = True\n\n while read_tape:\n\n chan = await channel.reader.read(1)\n if chan == b'':\n raise ConnectionClosed(\"connection closed: {}\".format(channel.reader))\n\n read_tape = parser.add_char(chan)\n\n return parser.msg\n\n\n\nclass Line:\n \"\"\" Base Line\n\n A Line has a unique identifier, set automatically\n\n \"\"\"\n\n ln = 0\n\n sid = 0\n\n def __init__(self, type):\n self.expect_json = False\n self.srv = None\n self.peers = []\n self.writers = []\n self.transform = {}\n self.id = Line.ln\n Line.ln += 1\n self.type = type\n junction.attach(self)\n self.msg_queue = asyncio.Queue()\n\n def alloc_sid(self):\n \"\"\"return a unique line instance id\n \"\"\"\n Line.sid += 1\n return Line.sid\n\n def free_sid(self, id):\n \"\"\"do nothing\n\n To be implemented if a previous line id may be reused by a new line\n instance\n \"\"\"\n pass\n\n async def close(self):\n LOG.debug(\"closing clients\")\n for c in self.writers:\n c.close()\n if self.srv:\n LOG.debug(\"%s: closing server\", self)\n self.srv.close()\n await self.srv.wait_closed()\n\n async def read_msg(self, channel):\n \"\"\"Detect the format of input bytes\n\n if no input parser are injected defaults to ascii string \\n terminated\n \"\"\"\n return await msg_receive(channel, self.expect_json)\n\n async def line_handler(self, channel):\n\n if not self.writers:\n # first connected client, start dispatching\n asyncio.ensure_future(self.dispatch())\n\n self.writers.append(channel.writer)\n channel.reader.line = self.alloc_sid()\n channel.writer.line = channel.reader.line\n try:\n reader_task = await self.reader_handler(channel)\n except (asyncio.CancelledError, concurrent.futures._base.CancelledError):\n pass\n else:\n # if the remote endpoint is a server try to reconnect\n if self.remote_is_server:\n asyncio.ensure_future(self.open())\n\n channel.writer.close()\n self.free_sid(channel.reader.line)\n self.writers.remove(channel.writer)\n\n\n async def deliver_to_peers(self, message, src_line):\n \"\"\"Deliver the message to related peers\n \"\"\"\n LOG.info(\"%s:line:%s <-- |%s|\", self, src_line, message)\n LOG.debug(\"%s peers: %s\", self, pretty_print_peers(self.peers))\n\n dline = 0\n if is_protobuf(message):\n dline = message[DLINE]\n\n for target in self.peers:\n\n #LOG.debug(\"queueing into %s\", target)\n if target in self.transform:\n msg = Packet(message, src_line,\n transformer=self.transform[target])\n else:\n msg = Packet(message, src_line)\n\n await target.msg_queue.put(msg)\n\n async def reader_handler(self, channel):\n \"\"\"Read a message from the client peer\n\n The message may be a bytes array or a string, for example a json\n formatted string\n \"\"\"\n try:\n while True:\n message = await self.read_msg(channel)\n if not message:\n # return a null message if the connection is unexpectedly closed\n break\n await self.deliver_to_peers(message, channel.reader.line)\n except asyncio.CancelledError:\n LOG.debug(\"task was cancelled\")\n raise\n except ConnectionClosed:\n pass\n\n LOG.debug(\"connection closed: %s\", self)\n\n async def dispatch(self):\n try:\n while True:\n LOG.debug(\"%s: ... waiting for messages\", self)\n message = await self.msg_queue.get()\n\n # if (type(message) == str):\n # message = bytes(message, 'utf-8')\n\n #LOG.debug(\"%s message to deliver: |%s|\", self, message)\n for w in self.writers:\n if w.line == message.dline or message.dline == 0:\n pkt = message.packet\n LOG.debug(\"%s:line:%s --> |%s| (PKT.DLINE %s)\",\n self, w.line, pkt, message.dline)\n w.write(pkt)\n\n except asyncio.CancelledError:\n LOG.debug(\"%s dispatch task: executed cancellation request\", self)\n except Exception as e:\n LOG.error(\"dispatch error: %s\", e)\n traceback.print_exc(file=sys.stderr)\n\n def add_peer(self, peer, transform=None):\n self.peers.append(peer)\n\n if transform:\n self.transform[peer] = transform\n\n def __str__(self):\n return \"%s:%s\" % (self.type, self.id)\n\n\nclass SerialLine(Line):\n def __init__(self, device='/dev/ttyUSB0',\n port=2000,\n state='raw',\n timeout=0,\n options='115200 8DATABITS NONE 1STOPBIT'):\n super().__init__('ser')\n self.host = 'localhost'\n self.port = port\n self.state = state\n self.timeout = timeout\n self.device = device\n self.options = options\n self.remote_is_server = False\n\n async def errors_from_serial(self):\n \"\"\"Serial errors come from ser2net stderr and always triggers a reconnect\n \"\"\"\n while True:\n msg = await self._serial.stderr.readline()\n LOG.error(\"%s\", msg.decode().strip('\\n'))\n await asyncio.sleep(2)\n await self.open_serial_client()\n\n async def open_serial_client(self):\n LOG.debug(\"connecting %s to serial server |%s|\", self, self.host)\n reader, writer = await asyncio.open_connection(self.host, self.port)\n asyncio.ensure_future(self.line_handler(Channel(reader, writer)))\n\n async def open(self):\n \"\"\"Open the serial line connection\n\n The reader receives the incoming messages from the server app and deliver it to the junction\n The writer sends a message received from serial port to the sever app\n \"\"\"\n\n cline = \":\".join((str(self.port), self.state, str(\n self.timeout), self.device, self.options))\n LOG.debug(\"ser2net C line: |%s|\", cline)\n create = asyncio.create_subprocess_exec(\n 'ser2net', '-d', '-C', cline, stderr=asyncio.subprocess.PIPE)\n\n self._serial = await create\n LOG.debug(\"serial channel |%s| started\", self)\n\n asyncio.ensure_future(self.errors_from_serial())\n\n # open the net socket proxing the serial port\n asyncio.ensure_future(self.open_serial_client())\n\n async def close(self):\n LOG.debug(\"%s: closing serial line\", self)\n self._serial.send_signal(signal.SIGTERM)\n await self._serial.wait()\n\n\nclass MqttLine(Line):\n\n def __init__(self, topic, host=\"localhost\", port=1883):\n super().__init__('mqtt')\n self.topic = topic\n self.host = host\n self.port = port\n\n async def open(self):\n \"\"\"Connect to mqtt broker\n \"\"\"\n config = {\n 'ping_delay': 0,\n 'keep_alive': 60\n }\n self.mqtt = MQTTClient(config=config)\n\n await self.mqtt.connect('mqtt://{}:{}'.format(self.host, self.port))\n\n self.line = self.alloc_sid()\n\n asyncio.ensure_future(self.dispatch())\n\n await self.mqtt.subscribe([(self.topic, QOS_1)])\n\n asyncio.ensure_future(self.mqtt_reader_handler())\n\n async def close(self):\n LOG.debug(\"%s: closing mqtt\", self)\n await self.mqtt.disconnect()\n\n async def mqtt_reader_handler(self):\n \"\"\"Wait for messages on subscribed topics\n \"\"\"\n while True:\n mqtt_msg = await self.mqtt.deliver_message()\n message = mqtt_msg.publish_packet.payload.data\n\n await self.deliver_to_peers(message, self.line)\n\n async def dispatch(self):\n \"\"\"Deliver messages to mqtt broker\n\n Route messages enqueued by related endpoints\n \"\"\"\n topic = 'hb/' + self.topic\n while True:\n LOG.debug(\"MQTT: dequeueing from %s\", self)\n message = await self.msg_queue.get()\n\n # if (type(message) == str):\n # message = bytes(message, 'utf-8')\n\n LOG.debug(\"topic: |%s| - message to deliver: |%r|\",\n topic, message)\n await self.mqtt.publish(topic, message.data)\n\n\nclass TcpLine(Line):\n \"\"\"A tcp socket endpoint\n \"\"\"\n\n def __init__(self, host=None, port=2000, remote_is_server=False, expect_json=False):\n super().__init__('tcp')\n self.host = host\n self.port = port\n self.remote_is_server = remote_is_server\n self.expect_json = expect_json\n\n async def cli_handler(self, reader, writer):\n addr = writer.get_extra_info('peername')\n LOG.debug(\"client connected: %r\", addr)\n await self.line_handler(Channel(reader, writer))\n\n async def open(self):\n \"\"\"Open the line connection\n\n The reader receives the incoming messages from the server app and\n deliver it to the junction the writer sends a message received from\n the junction to the sever app\n \"\"\"\n\n if (self.remote_is_server):\n try:\n LOG.debug(\"connecting %s to remote server |%s|\",\n self, self.host)\n reader, writer = await asyncio.open_connection(self.host, self.port)\n asyncio.ensure_future(self.cli_handler(reader, writer))\n LOG.debug(\"connection to remote \")\n except socket.gaierror as e:\n LOG.info(\"%s: %s: check if hostname %s is valid\",\n self, e, self.host)\n except (OSError, ConnectionRefusedError) as e:\n LOG.info(\"server connection refused: %s, retrying ...\", self)\n LOG.info(e)\n traceback.print_exc(file=sys.stderr)\n await asyncio.sleep(2)\n await self.open()\n #raise e\n else:\n coro = asyncio.start_server(\n self.cli_handler, self.host, self.port, family=socket.AF_INET)\n #self.srv = asyncio.ensure_future(coro)\n self.srv = await asyncio.wait_for(coro, 3)\n LOG.debug(\"%s server started: %s\", self, self.srv)\n\n\nclass WSLine(Line):\n def __init__(self, host='localhost', port=2000, remote_is_server=False):\n super().__init__('ws')\n self.host = host\n self.port = port\n self.remote_is_server = remote_is_server\n\n async def read_msg(self, channel):\n \"\"\"Read a full message in one shot from the socket\n\n Returns:\n bytes: read string converted to bytes\n \"\"\"\n try:\n msg = await channel.reader.recv()\n except websockets.exceptions.ConnectionClosed:\n msg = ''\n return bytes(msg, 'utf-8')\n\n async def dispatch(self):\n \"\"\"Send a json payload to all websocket clients connected\n \"\"\"\n try:\n while True:\n LOG.debug(\"dequeueing from %s\", self)\n message = await self.msg_queue.get()\n #LOG.debug(\"%s message to deliver: %s\", self, message.data)\n\n jmsg = message.to_json()\n if jmsg:\n for w in self.writers:\n # if w.line == message.dline:\n LOG.debug(\"%s:line:%s --> |%s|\",\n self, w.line, jmsg)\n await w.send(jmsg)\n except asyncio.CancelledError:\n LOG.debug(\"%s dispatch task: executed cancellation request\", self)\n\n async def cli_handler(self, websocket, path):\n LOG.debug(\"ws |%s| client connected: %r\", websocket, path)\n await self.line_handler(Channel(websocket, websocket))\n\n async def open(self):\n \"\"\"Open the websocket line connection\n\n The reader receives the incoming messages from the server app and deliver it to the junction\n The writer sends a message received from the junction to the sever app\n \"\"\"\n if (self.remote_is_server):\n try:\n LOG.debug(\"connecting %s to remote server: %s\", self, 'ws://{}:{}'.format(self.host, self.port))\n websocket = await websockets.connect('ws://{}:{}'.format(self.host, self.port))\n asyncio.ensure_future(self.cli_handler(websocket, \"/\"))\n except (OSError, ConnectionRefusedError) as e:\n LOG.info(\"server connection refused: %s, retrying ...\", self)\n LOG.info(e)\n await asyncio.sleep(2)\n await self.open()\n else:\n coro = websockets.server.serve(\n self.cli_handler, self.host, self.port, family=socket.AF_INET)\n self.srv = await coro\n LOG.debug(\"%s server started\", self)\n\n async def close(self):\n for c in self.writers:\n await c.close()\n if self.srv:\n LOG.debug(\"ws %s: closing server\", self)\n self.srv.close()\n\n\nclass Junction:\n \"\"\"\n \"\"\"\n\n def __init__(self):\n self.queue = asyncio.Queue()\n self.loop = asyncio.get_event_loop()\n self.registry = {}\n\n def attach(self, line):\n \"\"\"Register the line\n \"\"\"\n self.registry[line.id] = line\n\n async def detach(self, line):\n \"\"\"The inverse of Junction.attach\n \"\"\"\n # first close gracefully\n await self.registry[line.id].close()\n self.registry.pop(line.id)\n\n def route(self, src, dst, src_to_dst=None, dst_to_src=None):\n src.add_peer(dst, src_to_dst)\n dst.add_peer(src, dst_to_src)\n\n async def init(self):\n for l in self.registry:\n asyncio.ensure_future(self.registry[l].open())\n\n def run(self):\n self.loop.run_until_complete(self.init())\n\n self.loop.run_forever()\n\n async def shutdown(self):\n # wait time in case of shutdown invoked before init finishes\n await asyncio.sleep(0.1)\n for line in list(self.registry.values()):\n await self.detach(line)\n\n\n# the junction singleton\njunction = Junction()\n","repo_name":"attdona/NAIS","sub_path":"pynais/junction.py","file_name":"junction.py","file_ext":"py","file_size_in_byte":21897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"34774122180","text":"from rest_framework.response import Response\n\nfrom api.commons import viewsets\nfrom company import serializers\nfrom company.use_cases.item import shop as use_case\n\n\nclass ItemShopView(viewsets.RelationViewSet):\n \"\"\"\n Item Shop View\n \"\"\"\n serializer_class_dict = {\n 'default': serializers.ShopListSerializer,\n }\n serializer_alias_dict = {\n 'default': 'shops',\n }\n use_case_dict = {\n 'default': use_case.ListShop,\n }\n\n # LookUp\n lookup_field = 'id'\n\n def list(self, request, item_id:str, *args, **kwargs) -> Response:\n\n return super().list(request, item_id, *args, **kwargs)\n","repo_name":"mikeogawa/django-projects","sub_path":"01-clean_architecture_example/company/views/item/item_shop_view.py","file_name":"item_shop_view.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7631190964","text":"import argparse\nfrom typing import List, Dict, Optional\nfrom pathlib import Path\n\nfrom . import logger\nfrom .capture import Capture, Session\nfrom .utils.colmap import Camera, Image, write_model\n\n\ndef add_session_to_colmap(session: Session, colmap_cameras: Dict, colmap_images: Dict,\n image_prefix: str, keys: Optional[List] = None):\n # Prepare COLMAP cameras.\n camera_id_capture2colmap = {}\n\n for camera_id, sensor in session.cameras.items():\n colmap_camera_id = len(colmap_cameras) + 1\n colmap_cameras[colmap_camera_id] = Camera(colmap_camera_id, **sensor.asdict)\n camera_id_capture2colmap[camera_id] = colmap_camera_id\n\n T_cams2s = session.trajectories\n if session.proc is not None:\n T_s2w = session.proc.alignment_global.get_abs_pose('pose_graph_optimized')\n else:\n T_s2w = None\n if T_s2w is not None:\n T_cams2w = T_s2w * T_cams2s\n else:\n # No alignment to global world found.\n # Using the current's session coordinate frame as world frame.\n T_cams2w = T_cams2s\n\n # Prepare COLMAP images.\n if keys is None:\n keys = sorted(session.images.key_pairs())\n for ts, camera_id in keys:\n T_w2cam = session.get_pose(ts, camera_id, T_cams2w).inverse()\n colmap_image_id = len(colmap_images) + 1\n colmap_camera_id = camera_id_capture2colmap[camera_id]\n colmap_images[colmap_image_id] = Image(\n colmap_image_id, T_w2cam.qvec, T_w2cam.t, colmap_camera_id,\n (image_prefix / session.images[ts, camera_id]).as_posix(), [], [])\n\n\ndef run(capture: Capture, session_ids: List[str], output_path: Path, ext: str = '.bin'):\n output_path.mkdir(exist_ok=True, parents=True)\n\n colmap_cameras = {}\n colmap_images = {}\n colmap_points = {}\n for session_id in session_ids:\n prefix = capture.data_path(session_id).relative_to(capture.sessions_path())\n add_session_to_colmap(capture.sessions[session_id], colmap_cameras, colmap_images, prefix)\n\n # Write to disk.\n logger.info('Writing COLMAP empty %s reconstruction to %s.', ext, output_path.resolve())\n write_model(colmap_cameras, colmap_images, colmap_points, str(output_path), ext=ext)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--capture_path', type=Path, required=True)\n parser.add_argument('--session_ids', type=str, nargs='+', required=True)\n parser.add_argument('--output_path', type=Path, required=True)\n args = parser.parse_args().__dict__\n args['capture'] = Capture.load(args.pop('capture_path'))\n\n run(**args)\n","repo_name":"microsoft/lamar-benchmark","sub_path":"scantools/run_capture_to_empty_colmap.py","file_name":"run_capture_to_empty_colmap.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"96"} +{"seq_id":"22896787641","text":"#!/usr/bin/python3\n\"\"\"class dfinition\"\"\"\nBaseGeometry = __import__('7-base_geometry').BaseGeometry\n\n\nclass Rectangle(BaseGeometry):\n \"\"\"class representation a rectangle using BaseGeometry\"\"\"\n\n def __init__(self, width, height):\n \"\"\"intialization of a new Rectangle\n\n Args:\n width (int): new width of the Rectangle\n height (int): new height of the Rectangle\n \"\"\"\n self.integer_validator(\"width\", width)\n self.__width = width\n self.integer_validator(\"height\", height)\n self.__height = height\n","repo_name":"LameckL/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/8-rectangle.py","file_name":"8-rectangle.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37882686129","text":"from jsonschema import validate\nfrom typing import List, Optional\n\n\nclass Configuration:\n\n SCHEMA = {\n 'type': 'object',\n 'properties': {\n 'compress': {'type': 'boolean'},\n 'compressed_path': {'type': ['string', 'null']},\n 'clean_debug_path': {'type': 'boolean'},\n 'debug_path': {'type': ['string', 'null']},\n 'exclude_artifacts': {'type': 'array', 'items': {'type': 'string'}},\n 'exclude_plugins': {'type': 'array', 'items': {'type': 'string'}},\n 'structured_data': {'type': 'boolean'},\n 'timeout': {'type': 'integer'},\n },\n }\n\n def __init__(self):\n self.compress: bool = False\n self.compressed_path: Optional[str] = None\n self.clean_debug_path: bool = False\n self.debug_path: Optional[str] = None\n self.exclude_artifacts: List[str] = []\n self.exclude_plugins: List[str] = []\n self.structured_data: bool = False\n self.timeout: int = 20\n\n def apply(self, new_config: dict) -> None:\n validate(new_config, self.SCHEMA)\n self.__dict__.update(new_config)\n\n\nconf = Configuration()\n","repo_name":"truenas/ixdiagnose","sub_path":"ixdiagnose/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"23349382778","text":"inputs = {\n 1: '1. Para verificar se dois personagens possuem algum tipo de relação',\n 2: '2. Para verificar quem se odeia',\n 3: '3. Para verificar quem é malvado e quem é bonzinho',\n 4: '0. Para finalizar o programa'\n}\n\noptionStr = inputs[1] + '\\n' + inputs[2] + \\\n '\\n' + inputs[3] + '\\n' + inputs[4] + '\\n'\n\n\ndef divider():\n print('\\n', 50*'_', '\\n')\n","repo_name":"samuel-alves-chagas/Banco-de-dados-II","sub_path":"ExercicioAvaliativo2/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20457952985","text":"# import pymongo\r\n# import mxnet\r\n# from mxnet import nd\r\n#\r\n# #every time yield 20 items and read iobytes extract feature then insert to new nnindex\r\n#\r\n# import asyncio\r\n# import aiohttp\r\n# from io import BytesIO\r\n# import time\r\n# import requests\r\n#\r\n#\r\n# @asyncio.coroutine\r\n# def get_image(img_url):\r\n# resp = yield from requests.get(img_url)\r\n# return resp.content\r\n#\r\n# def save_image(img,fobj):\r\n# fobj.write(img)\r\n#\r\n# @asyncio.coroutine\r\n# def download_one(img_url,fobj):\r\n# image = yield from get_image(img_url)\r\n# save_image(image,fobj)\r\n\r\n# !/usr/bin/env python\r\n# import asyncio\r\n# import aiohttp\r\n#\r\n# async def fetch_img(session, url):\r\n# with aiohttp.Timeout(10):\r\n# async with session.get(url) as response:\r\n# assert response.status == 200\r\n# return await response.read()\r\n#\r\n# loop = asyncio.get_event_loop()\r\n# with aiohttp.ClientSession(loop=loop) as session:\r\n# img = loop.run_until_complete(\r\n# fetch_img(session, 'https://cdn.aidigger.com/images/instagram/f95f00da22a2e143e6e457b10544a120.jpeg'))\r\n# with open(\"img.png\", \"wb\") as f:\r\n# f.write(img)\r\n\r\n# if __name__ == '__main__':\r\n# url_list = ['https://cdn.aidigger.com/images/instagram/e2452f9daaad3ef7070adb22ee70958a.jpeg',\r\n# 'https://cdn.aidigger.com/images/instagram/bd717eaa4c351b842a497e8907b69855.jpeg',\r\n# 'https://cdn.aidigger.com/images/instagram/189a2af5d9661500b32271ca9b1865be.jpeg',\r\n# 'https://cdn.aidigger.com/images/instagram/6e70c94dd3fac214c5d7e6c061df2b2f.jpeg',\r\n# 'https://cdn.aidigger.com/images/instagram/f95f00da22a2e143e6e457b10544a120.jpeg']\r\n# fobj_list =[BytesIO() for _ in range(len(url_list))]\r\n# start = time.time()\r\n# loop = asyncio.get_event_loop()\r\n# to_do_tasks = [download_one(url,f) for url,f in zip(url_list,fobj_list)]\r\n# res,= loop.run_until_complete(asyncio.wait(to_do_tasks))\r\n# print(len(res))\r\n# print(time.time()-start)\r\n\r\n\r\nimport asyncio\r\nimport logging\r\nfrom contextlib import closing\r\nimport aiohttp # $ pip install aiohttp\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom pymongo import MongoClient\r\nfrom mxnet import nd\r\nimport mxnet as mx\r\nimport mxnet.gluon.data.vision.transforms as T\r\nimport mxnet.gluon.model_zoo.vision as vision_model\r\nfrom models import MarginNet\r\nimport mxnet\r\nfrom mxnet.image import imread\r\n\r\nlogging.basicConfig(level=logging.WARNING, format='%(asctime)s %(message)s')\r\nimport requests\r\nimport json\r\nimport binascii\r\nimport numpy as np\r\nfrom pymongo import MongoClient\r\nfrom requests import ReadTimeout\r\nfrom pprint import pprint\r\n\r\n\r\n\r\n\r\n#image transform\r\nnormalize=T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\r\ntest_transform = T.Compose([\r\n T.Resize(256),\r\n T.CenterCrop(224),\r\n T.ToTensor(),\r\n normalize\r\n])\r\n\r\n# define mongodb connect\r\ndef get_db():\r\n mongdb={}\r\n mongdb['host']='cc.com'\r\n mongdb['port']=3717\r\n client=MongoClient(host=mongdb['host'],port=mongdb['port'])\r\n dev=client.get_database('dev')\r\n dev.authenticate(name='cc',password='cc')\r\n return dev\r\n\r\n\r\n@asyncio.coroutine\r\ndef download(url, session, semaphore, chunk_size=1<<15):\r\n with (yield from semaphore): # limit number of concurrent downloads\r\n file = BytesIO()\r\n logging.info('downloading %s', file)\r\n response = yield from session.get(url)\r\n with closing(response):\r\n while True: # save file\r\n chunk = yield from response.content.read(chunk_size)\r\n if not chunk:\r\n break\r\n file.write(chunk)\r\n logging.info('done %s', file)\r\n return file, (response.status, tuple(response.headers.items()))\r\n\r\ndef get_net(gpu_id):\r\n param_path = 'checkpoints/Fashion_In.params'\r\n base_net = vision_model.get_model('resnet50_v2')\r\n net = MarginNet(base_net.features, 128, batch_k=5)\r\n context = [mxnet.gpu(gpu_id)]\r\n net.initialize()\r\n net.collect_params().reset_ctx(context)\r\n net.load_parameters(filename=param_path, ctx=context[0])\r\n return net,context\r\n\r\ndef get_cursor(db,collection_name,batch_size):\r\n #define source nn_prod data fetch\r\n nn_prod = db.get_collection(collection_name)\r\n cursor = nn_prod.find({},{'vector':0,},batch_size=batch_size)\r\n return cursor\r\n\r\ndef get_target_colection(db):\r\n colletion_name = 'image_metric_taobao128'\r\n target_collection = db.get_collection(colletion_name)\r\n return target_collection\r\n\r\n\r\ndef convert_vector_to_ascii(vector):\r\n \"\"\"convert a numpy array or a list to bytes, and to make it can be dumped by json, we convert the bytes to string\r\n \"\"\"\r\n if isinstance(vector, (list, np.ndarray, np.generic)):\r\n vector = np.asarray(vector, dtype=np.float32)\r\n else:\r\n raise ValueError(\"vector must be list or numpy array\")\r\n # add decode to convert base64 bytes to string\r\n return binascii.b2a_base64(vector.tobytes()).decode()\r\n\r\ndef get_nn_config(model_name ='image_metric_taobao128'):\r\n\r\n host = 'https://alpha-nnsearch.aidigger.com/api/v1/'\r\n path = 'model/'+model_name+'/'\r\n return host,path\r\n\r\n# begin to set basic paramter\r\nbatch_size=20\r\nurls= []\r\nrecords = []\r\ndb = get_db()\r\ncursor = get_cursor(db,'image_nn_prod',batch_size)\r\nnet,context = get_net(0)\r\nhost,path = get_nn_config('image_metric_taobao128')\r\n# set basic parameter finished\r\n\r\ntarge_collection = get_target_colection(db)\r\n\r\nloop = asyncio.get_event_loop()\r\nsession = aiohttp.ClientSession()\r\nsemaphore = asyncio.Semaphore(20)\r\n\r\nfor item in cursor:\r\n if len(urls)==batch_size:\r\n #process\r\n #with closing(asyncio.get_event_loop()) as loop, closing(aiohttp.ClientSession()) as session:\r\n try:\r\n download_tasks = (download(url, session, semaphore) for url in urls)\r\n result = loop.run_until_complete(asyncio.gather(*download_tasks))\r\n except Exception as e:\r\n print(e)\r\n urls = []\r\n records = []\r\n continue\r\n\r\n nd_img_list = []\r\n succeed_ids = []\r\n docs = []\r\n for i,(f_ret,rec) in enumerate(zip(result,records)):\r\n try:\r\n pil_img = Image.open(f_ret[0])\r\n nd_img_list.append(test_transform(nd.array(np.asarray(pil_img))))\r\n new_rec = {}\r\n new_rec['_id'] = rec['_id']\r\n new_rec['_int_id'] = rec['int_id']\r\n new_rec.update(rec['_source'])\r\n docs.append(new_rec)\r\n except Exception as e:\r\n print(urls[i])\r\n print(e)\r\n\r\n\r\n #nd_img_list = [test_transform(nd.array(np.asarray(Image.open(f_ret[0])))) for f_ret in result ]\r\n if len(nd_img_list)!=len(records) or len(nd_img_list)< 2:\r\n if len(nd_img_list)<2:\r\n print(urls[0])\r\n print(\"caution,failed to download all pictures\")\r\n print(result[0][1][0],result[0][1][1])\r\n\r\n records.clear()\r\n urls.clear()\r\n docs.clear()\r\n for f_ret in result:\r\n try:\r\n if not f_ret[0].closed:\r\n f_ret[0].close()\r\n except Exception as e:\r\n print(e)\r\n continue\r\n\r\n nd_tensor_img = nd.stack(*nd_img_list,axis=0)\r\n nd_tensor_img = nd_tensor_img.as_in_context(context[0])\r\n data = net.extract(nd_tensor_img)\r\n data = data.asnumpy()\r\n\r\n\r\n\r\n doc_types =['image']*len(records)\r\n vectors = [convert_vector_to_ascii(v) for v in data ]\r\n\r\n ret = requests.post(host + path + \"add/batch\", json={\"docs\": docs, \"doc_types\": doc_types, \"vectors\": vectors})\r\n print(ret.json())\r\n\r\n #for annother loop\r\n doc_types=[]\r\n vectors =[]\r\n doc_types=[]\r\n records = []\r\n urls=[]\r\n for f_ret in result:\r\n try:\r\n if not f_ret[0].closed:\r\n f_ret[0].close()\r\n except Exception as e:\r\n print(e)\r\n else:\r\n records.append(item)\r\n urls.append(item['_source']['cdn_url'])\r\n\r\n\r\n\r\n","repo_name":"hudengjunai/DeepEmbeding","sub_path":"server/copy_nn.py","file_name":"copy_nn.py","file_ext":"py","file_size_in_byte":8240,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"96"} +{"seq_id":"18427705070","text":"import json\nimport os\nimport urllib\nfrom urllib.parse import urlparse\n\nimport redis\nimport requests\nimport yaml\nfrom openstack.compute.v2.server import Server\nfrom ttypes import (\n Backend,\n BackendNotFoundException,\n CondaPackage,\n DefaultException,\n PlaybookNotFoundException,\n PlaybookResult,\n TemplateNotFoundException,\n)\nfrom util.logger import setup_custom_logger\nfrom util.state_enums import VmTaskStates\n\nfrom .playbook.playbook import Playbook\nfrom .template.template import ResearchEnvironmentMetadata, Template\n\nlogger = setup_custom_logger(__name__)\nBIOCONDA = \"bioconda\"\n\n\nclass ForcConnector:\n def __init__(self, config_file: str):\n logger.info(\"Initializing Forc Connector\")\n\n self.FORC_URL: str = \"\" # type: ignore\n self.FORC_ACCESS_URL: str = \"\" # type: ignore\n self.FORC_REMOTE_ID: str = \"\" # type: ignore\n self.GITHUB_PLAYBOOKS_REPO: str = \"\" # type: ignore\n self.REDIS_HOST: str = \"\" # type: ignore\n self.REDIS_PORT: int = None # type: ignore\n self.redis_pool: redis.ConnectionPool = None # type: ignore\n self.redis_connection: redis.Redis.connection_pool = None\n self._active_playbooks: dict[str, Playbook] = {}\n self.load_config(config_file=config_file)\n self.load_env()\n self.connect_to_redis()\n self.template = Template(\n github_playbook_repo=self.GITHUB_PLAYBOOKS_REPO,\n forc_url=self.FORC_URL,\n forc_api_key=self.FORC_API_KEY,\n )\n\n def load_config(self, config_file: str) -> None:\n logger.info(\"Load config file: FORC\")\n with open(config_file, \"r\") as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)\n self.FORC_URL = cfg[\"forc\"][\"forc_url\"]\n url_components = urlparse(cfg[\"forc\"][\"forc_url\"])\n path = url_components.netloc.split(\":\")[0]\n\n self.FORC_ACCESS_URL = f\"{url_components.scheme}://{path}/\"\n self.FORC_REMOTE_ID = cfg[\"forc\"][\"forc_security_group_id\"]\n self.GITHUB_PLAYBOOKS_REPO = cfg[\"forc\"][\"github_playbooks_repo\"]\n self.REDIS_HOST = cfg[\"redis\"][\"host\"]\n self.REDIS_PORT = cfg[\"redis\"][\"port\"]\n\n def connect_to_redis(self) -> None:\n logger.info(\"Connect to redis\")\n self.redis_pool = redis.ConnectionPool(\n host=self.REDIS_HOST, port=self.REDIS_PORT\n )\n self.redis_connection = redis.Redis(\n connection_pool=self.redis_pool, charset=\"utf-8\"\n )\n if self.redis_connection.ping():\n logger.info(\"Redis connection created!\")\n else:\n logger.error(\"Could not connect to redis!\")\n\n def get_users_from_backend(self, backend_id: str) -> list[str]:\n logger.info(f\"Get users from backend {backend_id}\")\n get_url = f\"{self.FORC_URL}users/{backend_id}\"\n try:\n response = requests.get(\n get_url,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n if response.status_code == 401:\n return [\"Error: 401\"]\n else:\n return [response.json()]\n except requests.Timeout as e:\n logger.info(msg=f\"Get users for backend timed out. {e}\")\n return []\n\n def delete_user_from_backend(self, backend_id: str, user_id: str) -> dict[str, str]:\n logger.info(f\"Delete user {user_id} from backend {backend_id}\")\n delete_url = f\"{self.FORC_URL}users/{backend_id}\"\n user_info = {\n \"user\": user_id,\n }\n try:\n response = requests.delete(\n delete_url,\n json=user_info,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n data: dict[str, str] = response.json()\n return data\n except requests.Timeout as e:\n logger.info(msg=f\"Delete user from backend timed out. {e}\")\n return {\"Error\": \"Timeout.\"}\n except Exception as e:\n logger.exception(e)\n raise BackendNotFoundException(message=str(e), name_or_id=backend_id)\n\n def delete_backend(self, backend_id: str) -> None:\n logger.info(f\"Delete Backend {backend_id}\")\n delete_url = f\"{self.FORC_URL}backends/{backend_id}\"\n try:\n response = requests.delete(\n delete_url,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n if response.status_code:\n if response.status_code == 404 or response.status_code == 500:\n try:\n raise BackendNotFoundException(\n message=str(json.dumps(response.json())),\n name_or_id=str(backend_id),\n )\n except json.JSONDecodeError:\n logger.exception(str(response.content))\n raise BackendNotFoundException(\n message=str(response.content), name_or_id=str(backend_id)\n )\n\n except requests.Timeout:\n logger.exception(msg=\"delete_backend timed out\")\n raise DefaultException(message=\"delete_backend timed out\")\n\n def add_user_to_backend(self, backend_id: str, user_id: str) -> dict[str, str]:\n logger.info(f\"Add User {user_id} to backend {backend_id}\")\n try:\n post_url = f\"{self.FORC_URL}users/{backend_id}\"\n user_info = {\n \"user\": user_id,\n }\n except Exception as e:\n logger.exception(e)\n return {\"Error\": \"Could not create url or json body.\"}\n try:\n response = requests.post(\n post_url,\n json=user_info,\n timeout=(30, 30),\n headers={\n \"X-API-KEY\": self.FORC_API_KEY,\n },\n verify=True,\n )\n try:\n data: dict[str, str] = response.json()\n except Exception as e:\n logger.exception(e)\n raise BackendNotFoundException(message=str(e), name_or_id=backend_id)\n return data\n except requests.Timeout as e:\n logger.info(msg=f\"add user to backend timed out. {e}\")\n return {\"Error\": \"Timeout.\"}\n except Exception as e:\n logger.exception(e)\n raise BackendNotFoundException(message=str(e), name_or_id=backend_id)\n\n def create_backend(\n self, owner: str, user_key_url: str, template: str, upstream_url: str\n ) -> Backend:\n logger.info(\n f\"Create Backend - [Owner:{owner}, user_key_url:{user_key_url}, template:{template}, upstream_url:{upstream_url}\"\n )\n template_version = self.template.get_template_version_for(template=template)\n if template_version is None:\n logger.warning(\n f\"No suitable template version found for {template}. Aborting backend creation!\"\n )\n raise TemplateNotFoundException(\n message=f\"No suitable template version found for {template}. Aborting backend creation!\",\n template=template,\n )\n try:\n post_url = f\"{self.FORC_URL}backends\"\n backend_info = {\n \"owner\": owner,\n \"user_key_url\": user_key_url,\n \"template\": template,\n \"template_version\": template_version,\n \"upstream_url\": upstream_url,\n }\n except Exception as e:\n logger.exception(e)\n raise DefaultException(message=e)\n try:\n response = requests.post(\n post_url,\n json=backend_info,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n try:\n data = response.json()\n except Exception as e:\n logger.exception(e)\n raise DefaultException(message=e)\n logger.info(f\"Backend created {data}\")\n new_backend = Backend(\n id=int(data[\"id\"]),\n owner=data[\"owner\"],\n location_url=data[\"location_url\"],\n template=data[\"template\"],\n template_version=data[\"template_version\"],\n )\n return new_backend\n\n except requests.Timeout as e:\n logger.info(msg=f\"create_backend timed out. {e}\")\n raise DefaultException(message=e)\n\n except Exception as e:\n logger.exception(e)\n raise DefaultException(message=e)\n\n def get_backends(self) -> list[Backend]:\n logger.info(\"Get Backends\")\n get_url = f\"{self.FORC_URL}backends\"\n try:\n response = requests.get(\n get_url,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n if response.status_code == 401:\n raise DefaultException(message=str(response.json()))\n else:\n backends = []\n for data in response.json():\n backends.append(\n Backend(\n id=data[\"id\"],\n owner=data[\"owner\"],\n location_url=data[\"location_url\"],\n template=data[\"template\"],\n template_version=data[\"template_version\"],\n )\n )\n return backends\n except requests.Timeout as e:\n logger.exception(msg=f\"create_backend timed out. {e}\")\n raise DefaultException(message=str(e))\n\n def get_backends_by_template(self, template: str) -> list[Backend]:\n logger.info(f\"Get Backends by template: {template}\")\n get_url = f\"{self.FORC_URL}backends/byTemplate/{template}\"\n try:\n response = requests.get(\n get_url,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n if response.status_code == 401:\n raise DefaultException(message=str(response.json()))\n\n else:\n backends = []\n for data in response.json():\n backends.append(\n Backend(\n id=data[\"id\"],\n owner=data[\"owner\"],\n location_url=data[\"location_url\"],\n template=data[\"template\"],\n template_version=data[\"template_version\"],\n )\n )\n return backends\n except requests.Timeout as e:\n logger.exception(msg=f\"create_backend timed out. {e}\")\n raise DefaultException(message=str(e))\n\n def get_backend_by_id(self, id: str) -> Backend:\n logger.info(f\"Get backends by id: {id}\")\n get_url = f\"{self.FORC_URL}backends/{id}\"\n try:\n response = requests.get(\n get_url,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n try:\n data = response.json()\n except Exception as e:\n logger.exception(e)\n raise DefaultException(message=str(e))\n\n return Backend(\n id=data[\"id\"],\n owner=data[\"owner\"],\n location_url=data[\"location_url\"],\n template=data[\"template\"],\n template_version=data[\"template_version\"],\n )\n except requests.Timeout as e:\n logger.exception(msg=f\"create_backend timed out. {e}\")\n raise DefaultException(message=str(e))\n\n def get_backends_by_owner(self, owner: str) -> list[Backend]:\n logger.info(f\"Get backends by owner: {owner}\")\n get_url = f\"{self.FORC_URL}backends/byOwner/{owner}\"\n try:\n response = requests.get(\n get_url,\n timeout=(30, 30),\n headers={\"X-API-KEY\": self.FORC_API_KEY},\n verify=True,\n )\n if response.status_code == 401:\n raise DefaultException(message=str(response.json()))\n\n else:\n backends = []\n for data in response.json():\n backends.append(\n Backend(\n id=data[\"id\"],\n owner=data[\"owner\"],\n location_url=data[\"location_url\"],\n template=data[\"template\"],\n template_version=data[\"template_version\"],\n )\n )\n return backends\n except requests.Timeout as e:\n logger.exception(msg=f\"create_backend timed out. {e}\")\n raise DefaultException(message=str(e))\n\n def has_forc(self) -> bool:\n logger.info(\"Check has forc\")\n return self.FORC_URL is not None\n\n def get_forc_url(self) -> str:\n logger.info(\"Get Forc Url\")\n return self.FORC_URL\n\n def get_forc_access_url(self) -> str:\n logger.info(\"Get Forc Access Url\")\n return self.FORC_ACCESS_URL\n\n def load_env(self) -> None:\n logger.info(\"Load env: FORC\")\n self.FORC_API_KEY = os.environ.get(\"FORC_API_KEY\", None)\n\n def get_playbook_logs(self, openstack_id: str) -> PlaybookResult:\n logger.warning(f\"Get Playbook logs {openstack_id}\")\n if (\n self.redis_connection.exists(openstack_id) == 1\n and openstack_id in self._active_playbooks\n ):\n playbook = self._active_playbooks.get(openstack_id)\n logger.warning(f\"playbook {playbook}\")\n if not playbook:\n raise PlaybookNotFoundException(\n message=f\"No active Playbook found for {openstack_id}!\",\n name_or_id=openstack_id,\n )\n status, stdout, stderr = playbook.get_logs()\n logger.warning(f\" Playbook logs {openstack_id} status: {status}\")\n\n playbook.cleanup(openstack_id)\n self._active_playbooks.pop(openstack_id)\n\n return PlaybookResult(status=status, stdout=stdout, stderr=stderr)\n else:\n raise PlaybookNotFoundException(\n message=f\"No active Playbook found for {openstack_id}!\",\n name_or_id=openstack_id,\n )\n\n def set_vm_wait_for_playbook(\n self, openstack_id: str, private_key: str, name: str\n ) -> None:\n logger.info(\n f\"Set vm {openstack_id}: {VmTaskStates.PREPARE_PLAYBOOK_BUILD.value} \"\n )\n self.redis_connection.hset(\n name=openstack_id,\n mapping=dict(\n key=private_key,\n name=name,\n status=VmTaskStates.PREPARE_PLAYBOOK_BUILD.value,\n ),\n )\n\n def get_playbook_status(self, server: Server) -> Server:\n openstack_id = server.id\n\n if self.redis_connection.exists(openstack_id) == 1:\n logger.info(f\"Get VM {openstack_id} Playbook status\")\n\n if openstack_id in self._active_playbooks:\n logger.info(self._active_playbooks)\n playbook = self._active_playbooks[openstack_id]\n playbook.check_status(openstack_id)\n status = self.redis_connection.hget(openstack_id, \"status\").decode(\"utf-8\")\n logger.info(f\"VM {openstack_id} Playbook status -> {status}\")\n\n # Server needs to have no task state(so port is not closed)\n if (\n status == VmTaskStates.PREPARE_PLAYBOOK_BUILD.value\n and not server.task_state\n ):\n server.task_state = VmTaskStates.PREPARE_PLAYBOOK_BUILD.value\n elif status == VmTaskStates.BUILD_PLAYBOOK.value:\n server.task_state = VmTaskStates.BUILD_PLAYBOOK.value\n elif status == VmTaskStates.PLAYBOOK_FAILED.value:\n server.task_state = VmTaskStates.PLAYBOOK_FAILED.value\n elif status == VmTaskStates.PLAYBOOK_SUCCESSFUL.value:\n server.task_state = VmTaskStates.PLAYBOOK_SUCCESSFUL.value\n return server\n\n def get_metadata_by_research_environment(\n self, research_environment: str\n ) -> ResearchEnvironmentMetadata:\n logger.info(f\"Get Metadata Research environment: {research_environment}\")\n if research_environment in self.template.loaded_research_env_metadata:\n resenv_metadata = self.template.loaded_research_env_metadata[\n research_environment\n ]\n return resenv_metadata\n elif (\n research_environment != \"user_key_url\" and research_environment != BIOCONDA\n ):\n logger.error(\n f\"Failure to load metadata of reasearch enviroment: {research_environment}\"\n )\n return None\n return None\n\n def create_and_deploy_playbook(\n self,\n public_key: str,\n research_environment_template: str,\n create_only_backend: bool,\n conda_packages: list[CondaPackage],\n apt_packages: list[str],\n openstack_id: str,\n port: int,\n ip: str,\n cloud_site: str,\n base_url: str = \"\",\n ) -> int:\n logger.info(f\"Starting Playbook for (openstack_id): {openstack_id}\")\n key: str = self.redis_connection.hget(openstack_id, \"key\").decode(\"utf-8\")\n playbook = Playbook(\n ip=ip,\n port=port,\n research_environment_template=research_environment_template,\n research_environment_template_version=self.template.get_template_version_for(\n template=research_environment_template\n ),\n create_only_backend=create_only_backend,\n osi_private_key=key,\n public_key=urllib.parse.unquote(public_key),\n pool=self.redis_pool,\n conda_packages=conda_packages,\n apt_packages=apt_packages,\n cloud_site=cloud_site,\n base_url=base_url,\n )\n self.redis_connection.hset(\n openstack_id, \"status\", VmTaskStates.BUILD_PLAYBOOK.value\n )\n playbook.run_it()\n self._active_playbooks[openstack_id] = playbook\n logger.info(f\"Playbook for (openstack_id): {openstack_id} started!\")\n return 0\n","repo_name":"deNBI/simplevm-client","sub_path":"simple_vm_client/forc_connector/forc_connector.py","file_name":"forc_connector.py","file_ext":"py","file_size_in_byte":18960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38339402444","text":"import sqlite3\n\nfrom flask import Flask, jsonify, json, render_template, request, url_for, redirect, flash\nfrom werkzeug.exceptions import abort\nimport os\nimport logging\nimport sys\n\n# Function to get a database connection.\n# This function connects to database with the name `database.db`\ndef get_db_connection():\n connection = sqlite3.connect('database.db')\n connection.row_factory = sqlite3.Row\n app.config['db_connection_count'] = app.config['db_connection_count'] + 1\n logging.info('successfully connected to DB !!!')\n return connection\n\n# Function to get a post using its ID\ndef get_post(post_id):\n connection = get_db_connection()\n post = connection.execute('SELECT * FROM posts WHERE id = ?',\n (post_id,)).fetchone()\n connection.close()\n return post\n\n\n# Define the Flask application\napp = Flask(__name__)\napp.config['db_connection_count'] = 0\n\n# Define the main route of the web application\n@app.route('/')\ndef index():\n connection = get_db_connection()\n posts = connection.execute('SELECT * FROM posts').fetchall()\n connection.close()\n return render_template('index.html', posts=posts)\n\n# Define how each individual article is rendered\n# If the post ID is not found a 404 page is shown\n@app.route('/')\ndef post(post_id):\n post = get_post(post_id)\n if post is None:\n logging.error('Article does not present in DB!')\n return render_template('404.html'), 404\n else:\n logging.info('A new article is created')\n logging.debug('Article with title : %s retrieved!', post[\"title\"])\n return render_template('post.html', post=post)\n\n# Define the About Us page\n@app.route('/about')\ndef about():\n logging.debug(\"The 'About Us' page is retrieved\")\n return render_template('about.html')\n\n# Define the post creation functionality\n@app.route('/create', methods=('GET', 'POST'))\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n if not title:\n flash('Title is required!')\n else:\n connection = get_db_connection()\n connection.execute('INSERT INTO posts (title, content) VALUES (?, ?)',\n (title, content))\n connection.commit()\n connection.close()\n logging.debug('New Article with title %s created!', title)\n return redirect(url_for('index'))\n\n return render_template('create.html')\n\n\n# Define endpoint for healthy\n@app.route(\"/healthz\")\ndef get_healthy():\n try:\n connection = get_db_connection()\n connection.execute(\"SELECT * FROM posts LIMIT 1\")\n connection.close()\n return {\"result\": \"OK - healthy\"}\n except Exception:\n return {\"result\": \"ERROR - unhealthy\"}, 500\n\n\n# Define endpoint for metrics\n@app.route(\"/metrics\")\ndef get_metrics():\n connection = get_db_connection()\n posts = connection.execute(\"SELECT * FROM posts\").fetchall()\n post_length = len(posts)\n connection.close()\n content = {\"db_connection_count\": app.config['db_connection_count'], \"post_count\": post_length}\n return content\n\n# initialize_logger_message\ndef initialize_logger_message():\n log_level = os.getenv(\"LOGLEVEL\", \"DEBUG\").upper()\n log_level = (\n getattr(logging, log_level)\n if log_level in [\"INFO\", \"DEBUG\", \"ERROR\", ]\n else logging.DEBUG\n )\n\n # Set logger to handle STDOUT and STDERR\n stdout_handler = logging.StreamHandler(sys.stdout)\n stderr_handler = logging.StreamHandler(sys.stderr)\n handlers = [stderr_handler, stdout_handler]\n\n # Create the log file and format each log\n logging.basicConfig(\n format='%(levelname)s:%(name)s:%(asctime)s, %(message)s',\n level=log_level,\n datefmt='%m-%d-%Y, %H:%M:%S',\n handlers=handlers\n )\n\n\n# start the application on port 3111\nif __name__ == \"__main__\":\n initialize_logger_message()\n app.run(host='0.0.0.0', port='3111')\n","repo_name":"Chiru1610/UdacityAssignement","sub_path":"techtrends/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30031084588","text":"# Write a script that creates a dictionary of keys, `n`\r\n# and values `n * n` for numbers 1 to 10. For example:\r\n# result = {1: 1, 2: 4, 3: 9, ... and so on}\r\n# use a for-loop\r\ndict={n:n*n for n in range (1,10)}\r\nprint(dict)\r\n\r\n###########OR##############\r\n\r\ndictt={}\r\nfor i in range(1,10):\r\n dictt.update({i:i*i})\r\nprint(dictt) ","repo_name":"Rashashajahan/Python_Programming","sub_path":"05_04_dict_create.py","file_name":"05_04_dict_create.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"32864066103","text":"def meow(n: int) -> str:\n \"\"\"\"\n Meow n times\n \n :param n: Number of times to meow\n :type n: int\n :raise TypeError: If n is not an int\n :return: A string of n meows, one per line\n :rtype: str\n \"\"\" \n # Triple quotes or double quotes help build docstrings, add to intellisense\n return \"meow\\n\" * n\n \nnumber: int = int(input(\"Number: \"))\n\nmeows: str = meow(number)\nprint(meows, end = \"\")\n","repo_name":"V4nish/CS50P","sub_path":"lesson9 - etc/meows9.py","file_name":"meows9.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71882015355","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'caesarCipher' function below.\n#\n# The function is expected to return a STRING.\n# The function accepts following parameters:\n# 1. STRING s\n# 2. INTEGER k\n#\n\nminUppercase = 65\nmaxUppercase = 90\n\nminLowercase = 97\nmaxLowercase = 122\n\ndef caesarCipher(s, k):\n # Write your code here\n cipherString = \"\"\n for char in s:\n cipherString += caesarCipherCharacter(char, k)\n \n return cipherString\n\n\ndef caesarCipherCharacter(char, k):\n charInt = ord(char)\n \n if (charInt < minUppercase or charInt > maxUppercase) and (charInt < minLowercase or charInt > maxLowercase):\n return char\n\n alphabetLength = 26\n\n if char.islower():\n return chr((((charInt - minLowercase) + k) % alphabetLength) + minLowercase)\n\n return chr((((charInt - minUppercase) + k) % alphabetLength) + minUppercase)\n\n\nif __name__ == '__main__':\n s = \"www.abc.xy\"\n k = 87\n\n result = caesarCipher(s, k)\n print(result)","repo_name":"kingdavid425/interview-prep","sub_path":"hackerrank-1-week-prep-kit/day-3/caesar-cipher.py","file_name":"caesar-cipher.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15670562674","text":"import numpy as np\n\ndef show(m, o):\n print(m, o.ndim, o.size, o.shape, o.dtype,'\\n', o,'\\n')\n\ndef main():\n array1 = np.arange(27, dtype=np.int8)\n show('array1', array1)\n array2 = np.arange(1, 10+1, 0.1)\n show('array2', array2)\n array3 = np.linspace(1, 10, 19)\n show('array3', array3)\n\nif __name__ == '__main__':\n main()","repo_name":"freshmea/catholic_3d_space_infomation","sub_path":"numpy/numpy2.py","file_name":"numpy2.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14809765315","text":"import inspect\n\nimport abjad\nimport baca\nfrom abjadext import rmakers\n\n\ndef circuit():\n \"\"\"\n <6-111 7-68 8-31 1-175 2-93 3-31 4-2 5-85>\n \"\"\"\n circuit = 8 * [[0]]\n circuit[1 - 1] = [-37, -18, -16, -5, -3, 3, 13, 14, 16, 22, 24, 29]\n circuit[2 - 1] = [-25, -17, -12, -10, -6, -4, -2, 5, 15, 16, 21, 25]\n circuit[3 - 1] = [-36, -34, -26, -20, -16, -7, 3, 13, 23, 31, 42, 45]\n circuit[4 - 1] = [10, 19, 20, 23, 24, 26, 27, 29, 30, 33, 37, 40]\n circuit[5 - 1] = [-8, -4, -1, 5, 7, 12, 14, 15, 18, 21, 22, 25]\n circuit[6 - 1] = [-20, -13, -12, -10, -2, 3, 8, 9, 13, 17, 19, 30]\n circuit[7 - 1] = [-36, -32, -25, -22, -21, -14, -11, -4, -3, 5, 6, 7]\n circuit[8 - 1] = [-34, -29, -26, -19, -12, -9, 1, 4, 11, 30, 32, 45]\n return circuit\n\n\ndef make_empty_score():\n tag = baca.helpers.function_name(inspect.currentframe())\n global_context = baca.score.make_global_context()\n violin_music_voice = abjad.Voice(name=\"Violin.Music\", tag=tag)\n violin_music_staff = abjad.Staff(\n [violin_music_voice],\n name=\"Violin.Staff\",\n tag=tag,\n )\n piano_rh_music_voice = abjad.Voice(name=\"Piano.RH.Music\", tag=tag)\n piano_rh_music_staff = abjad.Staff(\n [piano_rh_music_voice],\n name=\"Piano.RH.Staff\",\n tag=tag,\n )\n piano_lh_music_voice = abjad.Voice(name=\"Piano.LH.Music\", tag=tag)\n piano_lh_music_staff = abjad.Staff(\n [piano_lh_music_voice],\n name=\"Piano.LH.Staff\",\n tag=tag,\n )\n piano_staff_group = abjad.StaffGroup(\n [piano_rh_music_staff, piano_lh_music_staff],\n lilypond_type=\"PianoStaff\",\n name=\"PianoStaff\",\n tag=tag,\n )\n music_context = abjad.Context(\n [violin_music_staff, piano_staff_group],\n lilypond_type=\"MusicContext\",\n simultaneous=True,\n name=\"MusicContext\",\n tag=tag,\n )\n score = abjad.Score([global_context, music_context], name=\"Score\", tag=tag)\n baca.score.assert_lilypond_identifiers(score)\n baca.score.assert_unique_context_names(score)\n return score\n\n\ndef make_piano_material(staff, circuit):\n assert staff in (\"rh\", \"lh\")\n tag = baca.helpers.function_name(inspect.currentframe())\n maker = abjad.makers.tuplet_from_ratio_and_pair\n pairs = {}\n pairs[\"rh\"] = [(n, 16) for n in (4, 3, 3, 4, 3, 3, 4, 4)]\n pairs[\"lh\"] = [(n, 16) for n in (3, 4, 3, 2, 4, 4, 4, 4)]\n proportions = {}\n proportions[\"rh\"] = [\n (2, 2, 2, 1, 1, 1, 1),\n (1, 1, 4, 4, 4),\n (4, 4, 2, 2, 1, 1),\n (4, 1, 1, 1, 1, 4, 4, 2, 1, 1, 1, 1),\n (4, 2, 2, 2, 2, 1, 1, 1, 1),\n (4, 4, 1, 1, 4, 1, 1),\n (4, 12, 12),\n (1, 1, 2, 2, 4, 4),\n ]\n proportions[\"lh\"] = [\n (4, 3, 3, 3, 1),\n (4, 4, 3, 3, 2, 2, 2),\n (2, 2, 2, 4, 4, 4),\n (-8,),\n (6, 6, 8),\n (2, 2, 3, 3, 4),\n (2, 2, 1, 1, 1, 1, 4, 4, 4),\n (6, 6, 2, 2, 1, 1),\n ]\n music = abjad.Voice(name=\"Temporary\")\n for proportion, pair, aggregate in zip(proportions[staff], pairs[staff], circuit):\n if staff == \"rh\":\n aggregate = list(reversed(aggregate))\n tuplet = maker(tuple(proportion), pair, tag=tag)\n music.append(tuplet)\n duration = abjad.get.duration(tuplet)\n pair = abjad.duration.with_denominator(duration, 32)\n tuplet.denominator = pair[0]\n leaves = abjad.select.leaves(tuplet)\n abjad.beam(leaves, tag=tag)\n notes = abjad.select.leaves(tuplet, pitched=True)\n for note, pitch_number in zip(notes, aggregate):\n note.written_pitch = pitch_number\n music.insert(-1, abjad.Rest(\"r8\", tag=tag))\n rmakers.hide_trivial(music)\n music = abjad.mutate.eject_contents(music)\n return music\n\n\ndef make_violin_rhythm():\n tag = baca.helpers.function_name(inspect.currentframe())\n definitions = [\n ((4, 2, 2, 2), (8, 16)),\n ((2, 2, 4, 1, 1), (8, 16)),\n ((4, 2, 2, 2), (8, 16)),\n ((3, 2), (4, 16)),\n ]\n maker = abjad.makers.tuplet_from_ratio_and_pair\n voice = abjad.Voice(name=\"Temporary\")\n for definition in definitions:\n ratio, pair = definition\n assert isinstance(ratio, tuple)\n tuplet = maker(ratio, pair, tag=tag)\n voice.append(tuplet)\n leaves = abjad.select.leaves(tuplet)\n abjad.beam(leaves, tag=tag)\n voice.insert(-1, abjad.Rest(\"r8\", tag=tag))\n components = abjad.mutate.eject_contents(voice)\n return components\n\n\ndef violin_pitches():\n \"\"\"\n 1-175\n \"\"\"\n aggregate = [10, 19, 20, 23, 24, 26, 27, 29, 30, 33, 37, 40]\n assert aggregate == [10, 19, 20, 23, 24, 26, 27, 29, 30, 33, 37, 40]\n cary = [[-2, -12, -10], [18, 8, 7, 17], [15, 25, 21, 4, 11]]\n order_1 = abjad.sequence.flatten(cary)\n order_1 = [_ % 12 for _ in order_1]\n assert order_1 == [10, 0, 2, 6, 8, 7, 5, 3, 1, 9, 4, 11]\n order_2 = [abjad.sequence.rotate(_, n=1) for _ in cary]\n order_2 = abjad.sequence.rotate(order_2, n=-1)\n order_2 = abjad.sequence.flatten(order_2)\n order_2 = [_ % 12 for _ in order_2]\n assert order_2 == [5, 6, 8, 7, 11, 3, 1, 9, 4, 2, 10, 0]\n order_3 = [abjad.sequence.rotate(_, n=2) for _ in cary]\n order_3 = abjad.sequence.rotate(order_3, n=-2)\n order_3 = abjad.sequence.flatten(order_3)\n order_3 = [_ % 12 for _ in order_3]\n assert order_3 == [4, 11, 3, 1, 9, 0, 2, 10, 7, 5, 6, 8]\n aggregate_ = abjad.PitchSet(aggregate)\n violin_pitches = []\n orders = (order_1, order_2, order_3)\n for order in orders:\n order = [abjad.NumberedPitchClass(_) for _ in order]\n pitches_ = baca.pcollections.register_pcs(aggregate_, order)\n violin_pitches.extend(pitches_)\n return violin_pitches\n\n\ninstruments = {\n \"Piano\": abjad.Piano(),\n \"Violin\": abjad.Violin(),\n}\n\n\nmetronome_marks = {\n \"32\": abjad.MetronomeMark(abjad.Duration(1, 8), 32),\n}\n\n\nshort_instrument_names = {\n \"Vn.\": abjad.ShortInstrumentName(r\"\\hijinks-vn-markup\"),\n \"Pf.\": abjad.ShortInstrumentName(r\"\\hijinks-pf-markup\", context=\"PianoStaff\"),\n}\n\n\nmanifests = {\n \"abjad.Instrument\": instruments,\n \"abjad.MetronomeMark\": metronome_marks,\n \"abjad.ShortInstrumentName\": short_instrument_names,\n}\n\n\nvoice_abbreviations = {\n \"vn\": \"Violin.Music\",\n \"rh\": \"Piano.RH.Music\",\n \"lh\": \"Piano.LH.Music\",\n}\n","repo_name":"trevorbaca/hijinks","sub_path":"hijinks/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"72894722235","text":"import pandas as pd\nfrom sklearn.externals import joblib\nimport numpy as np\nfrom sklearn import svm\n\nfrom bmpentry.BMPService import HistoryService\nfrom bmpentry.BMPModel.PicProcess.Handle import Handle\n\nclass Step3(Handle):\n\n def __init__(self):\n pass\n\n def doProcess(self, pic_dir, oid):\n history = HistoryService()\n rr, rg, ro = history.queryRatio(oid)\n resultab = self.predictAB(rr, rg)\n resultall = self.predictAll(rr, rg, ro)\n history.updateResult(oid, resultab, resultall)\n print(\"Chain 003 execute\")\n\n def predictAB(self, RatioR, RatioG):\n data = {\n 'RatioR': [RatioR],\n 'RatioG': [RatioG],\n }\n frame = pd.DataFrame(data)\n clf = joblib.load('bmpentry/BMPModel/PicProcess/mmodel/PredictAB.m')\n res = clf.predict(frame)\n return res\n\n def predictAll(self, RatioR, RatioG, RatioBg):\n data = {\n 'RatioR': [RatioR],\n 'RatioG': [RatioG],\n 'RatioBg': [RatioBg],\n }\n frame = pd.DataFrame(data)\n clf = joblib.load('bmpentry/BMPModel/PicProcess/mmodel/PredictAll.m')\n res = clf.predict(frame)\n return res\n","repo_name":"leobod/bmp","sub_path":"bmpentry/BMPModel/PicProcess/Step3.py","file_name":"Step3.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"13424498750","text":"# Convolutional Neural Networks - MNIST dataset\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\nfrom keras.optimizers import RMSprop,Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ReduceLROnPlateau\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Read the training data\ntrain = pd.read_csv(\"input/mnist_train.csv\")\nprint(\"Shape of training data : \",train.shape)\n\n# Read the testing data\ntest = pd.read_csv(\"input/mnist_test.csv\")\nprint(\"Shape of testing data : \",test.shape)\n\n# Extract training features and labels\nX_train = train.drop(labels = [\"label\"],axis = 1)\nY_train = train[\"label\"]\n\n# Extract testing features and labels\nX_test = test.drop(labels = [\"label\"],axis = 1)\nY_test = test[\"label\"]\n\n# Convert to numpy arrays\nX_train = np.array(X_train).reshape(-1,28,28,1)\nX_test = np.array(X_test).reshape(-1,28,28,1)\n\n# Normalize the data\nX_train = X_train / 255.0\nX_test = X_test / 255.0\nprint(\"Reshaped training data: \",X_train.shape)\nprint(\"Reshaped testing data: \",X_test.shape)\n\n# Encode the labels\nY_train = to_categorical(Y_train, num_classes = 10)\nY_test = to_categorical(Y_test, num_classes = 10)\n \n# Create model\nmodel = Sequential()\n\n# Convolution layer 1 and Pooling layer 1\nmodel.add(Conv2D(filters = 16, \n kernel_size = (5,5),\n padding = 'Same', \n activation ='relu', \n input_shape = (28,28,1)))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\n# Convolution layer 2 and Pooling layer 2\nmodel.add(Conv2D(filters = 32, \n kernel_size = (3,3),\n padding = 'Same', \n activation ='relu'))\nmodel.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))\n\n# Fully connected layer \nmodel.add(Flatten())\nmodel.add(Dense(256, activation = \"relu\"))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation = \"softmax\"))\n\n# Model summary\nprint(\"Model architecture : \")\nmodel.summary()\n\n# Define the optimizer\noptimizer = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999)\n\n# Compile the model\nmodel.compile(optimizer = optimizer , \n loss = \"categorical_crossentropy\", \n metrics=[\"accuracy\"])\n\n# Data augmentation\ndatagen = ImageDataGenerator(\n featurewise_center = False, # set input mean to 0 over the dataset\n samplewise_center = False, # set each sample mean to 0\n featurewise_std_normalization = False, # divide inputs by std of the dataset\n samplewise_std_normalization = False, # divide each input by its std\n zca_whitening = False, # dimesion reduction\n rotation_range = 5, # randomly rotate images in the range 5 degrees\n zoom_range = 0.1, # Randomly zoom image 10%\n width_shift_range = 0.1, # randomly shift images horizontally 10%\n height_shift_range = 0.1, # randomly shift images vertically 10%\n horizontal_flip = False, # randomly flip images\n vertical_flip = False) # randomly flip images\n\n# Fit the datagen to X_train\ndatagen.fit(X_train)\n\nepochs = 10\nprint(\"Number of epochs : \",epochs)\nbatch_size = 250\nprint(\"Batch size : \",batch_size)\n\n# Fit the model\nhistory = model.fit_generator(datagen.flow(X_train,Y_train, batch_size = batch_size),\n epochs = epochs, \n validation_data = (X_test,Y_test), \n steps_per_epoch = X_train.shape[0] // batch_size)\n\n# Plot the loss and accuracy curves for training and validation \nplt.plot(history.history['loss'], color = 'r', label = \"trainig loss\")\nplt.plot(history.history['val_loss'], color = 'b', label = \"validation loss\")\nplt.title(\"Loss vs Epochs\")\nplt.xlabel(\"Number of Epochs\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.show()\n \nplt.plot(history.history['accuracy'], color = 'r', label = \"trainig accuracy\")\nplt.plot(history.history['val_accuracy'], color = 'b', label = \"validation accuracy\")\nplt.title(\"Accuracy vs Epochs\")\nplt.xlabel(\"Number of Epochs\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()\n\n# Confusion matrix\nY_pred = model.predict(X_test)\nY_pred_classes = np.argmax(Y_pred, axis = 1) \nY_true = np.argmax(Y_test, axis = 1) \nconfusion_matrix = confusion_matrix(Y_true, Y_pred_classes) \n\n# Plot the confusion matrix\nf,ax = plt.subplots(figsize = (6, 6))\nsns.heatmap(confusion_matrix, annot = True, linewidths = 0.01,\n cmap = \"coolwarm\", linecolor=\"gray\", fmt = '.1f', ax = ax)\nplt.xlabel(\"Predicted Label\")\nplt.ylabel(\"True Label\")\nplt.title(\"Confusion Matrix\")\nplt.show()\n","repo_name":"aniketangre/CNN_Image_Classification_Keras","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41545153499","text":"\"\"\"利用正则爬取42天天气预报\n 1.爬取日期\n 2.爬取天气\n 3.爬取温度\n 4.爬取风向\n 5.爬取风力\n\"\"\"\nimport requests\nimport re\nfrom queue import Queue\nimport threading\nfrom pymongo import MongoClient\n\n# 生产者类\nclass sheng_thread(threading.Thread):\n\n def __init__(self,sheng_queue,xiao_queue):\n self.sheng_queue = sheng_queue\n self.xiao_queue = xiao_queue\n # 重写多线程的init方法\n threading.Thread.__init__(self)\n\n # 重写run方法\n def run(self) -> None:\n while True:\n if self.sheng_queue.empty():\n pass\n\n zhen_url = self.sheng_queue.get()\n # 发送请求\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36\"}\n response = requests.get(url=zhen_url,headers=headers)\n response.encoding =\"utf-8\"\n self.jiexi(response.text)\n\n def jiexi(self,response):\n li =[]\n # 获取包含了所有需要元素的 ul 标签\n ul = re.search('.*(
    .*?
).*',response,re.S)\n urs = ul.group(1)\n\n a = re.finditer('(?P.*?).*?(?P.*?).*?(?P.*?).*?(?P.*?).*?(?P.*?)',urs,re.S)\n for i in a:\n dic = i.groupdict()\n li.append(dic)\n xiao_queue.put(li)\n\n# 消费者类\nclass xiao_thread(threading.Thread):\n\n def __init__(self,xiao_queue):\n self.xiao_queue = xiao_queue\n threading.Thread.__init__(self)\n\n def run(self) -> None:\n while True:\n if self.xiao_queue.empty:\n pass\n\n data = self.xiao_queue.get()\n\n mongo = MongoClient()\n\n database = mongo.get_database(\"retianqi\")\n\n # 写入数据\n for da in data:\n database.retianqiybao.insert_one(da)\n\nif __name__ == '__main__':\n lock = threading.Lock()\n\n sheng_queue = Queue()\n\n url = \"http://www.weather.com.cn/weather15d/101250101.shtml\"\n\n sheng_queue.put(url)\n xiao_queue = Queue()\n\n for i in range(5):\n sheng = sheng_thread(sheng_queue,xiao_queue)\n sheng.start()\n\n for j in range(5):\n xiao = xiao_thread(xiao_queue)\n xiao.start()","repo_name":"MajorSnow-r/git_rm","sub_path":"爬取天气预报(re模块).py","file_name":"爬取天气预报(re模块).py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"35696738381","text":"import streamlit as st \nimport pandas as pd \nimport numpy as np\nfrom src import funciones_visualizacion as fv\n\nst.markdown(\"## **Visualización de conjunto de datos**\")\n\n\n#if st.button(\"cargar datos\"):\ndf = pd.read_csv(\"data/df_limpios.csv\")\nst.table(df.sample(3))\n\n\nst.markdown(\"### Visualización\")\n\nopciones = st.selectbox(\"Qué tipo de variables quierers visualizar\",[\"Elige una opcion\"]+[\"Numerica Vs Numerica\",\"Categoricas\",\"Numericas\",\"Numerica Vs Categorica\"])\n\nif opciones == \"Numerica Vs Numerica\":\n columna1 = st.selectbox(\"Elige la primera columna\", [\"Elige una opcion\"]+ df.select_dtypes(include=np.number).columns.to_list())\n columna2 = st.selectbox(\"Elige la segunda columna\", [\"Elige una opcion\"]+ df.select_dtypes(include=np.number).columns.to_list())\n if columna1 == \"Elige una opcion\" or columna2 == \"Elige una opcion\":\n st.write(\"Necesito una opcion primo\")\n else:\n st.plotly_chart(fv.visualizar_relaciones_numericas(df, columna1, columna2))\n\nelif opciones == \"Categoricas\":\n col_categoricas = st.selectbox(\"Elige la columna categorica\", [\"Elige una opcion\"] + df.select_dtypes(include=\"O\").columns.to_list() )\n\n if col_categoricas == \"Elige una opcion\":\n st.write(\"Necesito una opcion primo\")\n else:\n st.plotly_chart(fv.visualizar_categorica_o_numerica(df, col_categoricas))\n \nelif opciones == \"Numericas\":\n col_numericas = st.selectbox(\"Elige la columna Numerica\", [\"Elige una opcion\"] + df.select_dtypes(include=np.number).columns.to_list() )\n\n if col_numericas == \"Elige una opcion\":\n st.write(\"Necesito una opcion primo\")\n else:\n st.plotly_chart(fv.visualizar_categorica_o_numerica(df, col_numericas))\n\n\nelif opciones == \"Numerica Vs Categorica\":\n col_num = st.selectbox(\"Elige una columna numerica\",[\"Elige una opcion\"] + df.select_dtypes(include=np.number).columns.to_list() )\n col_cat_hue = st.selectbox(\"Elige una columna categorica\",[\"Elige una opcion\"] + df.select_dtypes(include=\"O\").columns.to_list() )\n \n if col_num == \"Elige una opcion\" or col_cat_hue == \"Elige una opcion\":\n st.write(\"Necesito una opcion primo\")\n else:\n st.plotly_chart(fv.visualizar_numerica_con_categorica(df, col_num, col_cat_hue))\n\n\nst.markdown(\"### Visualización de estadisticas\")\n\ncol_num_box = st.selectbox(\"Elige una columna numerica nueva\",[\"Elige una opcion\"] + df.select_dtypes(include=np.number).columns.to_list() )\n\nif col_num_box == \"Elige una opcion\":\n st.write(\"Necesito una opcion primo\")\nelse:\n st.plotly_chart(fv.visualizar_boxplot(df, col_num_box))","repo_name":"Ironhack-Data-Madrid-PartTime-May22/apuntes_clases","sub_path":"semana-10-13/streamlit/pages/2📊-visualizacion.py","file_name":"2📊-visualizacion.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"327854367","text":"from .spark_yarn import SparkCommand\nfrom . import config\n\napp_spark_rdd_main_class = {\n \"wc\": \"org.pacman.run.MeasureWordCountRDD\",\n \"pr\": \"org.pacman.run.MeasurePageRankRDD\",\n \"km\": \"org.pacman.run.MeasureKMeansRDD\",\n \"lr\": \"org.pacman.run.MeasureLRRDD\",\n}\n\napp_name = {\n \"wc\": \"WordCount\",\n \"pr\": \"PageRank\",\n \"km\": \"KMeans\",\n \"lr\": \"LR\",\n \"cc\": \"WCC\",\n \"ts\": \"TeraSort\",\n}\n\napp_sparklet_module_name = {\n \"wc\": \"word_count\",\n \"pr\": \"page_rank_rdd\",\n \"km\": \"kmeans_new\",\n \"lr\": \"lr_new\",\n \"cc\": \"wcc_rdd\",\n \"ts\": \"terasort\",\n}\n\n\ndef to_hdfs_path(path):\n return \"hdfs://bic07:8020\" + path\n\napp_dataset_hdd = {\n \"wc\": to_hdfs_path(\"/user/ybw/hdd/Dataset/deepmind-gutenberg.text\"),\n \"pr\": to_hdfs_path(\"/user/ybw/hdd/Dataset/twitter-2010.text\"),\n \"km\": to_hdfs_path(\"/user/ybw/hdd/Dataset/data-Kmeans-80000000-64.text\"),\n \"lr\": to_hdfs_path(\"/user/ybw/hdd/Dataset/MNIST01-12665-10k-784.text\"),\n \"cc\": to_hdfs_path(\"/user/ybw/hdd/Dataset/twitter-2010.text\"),\n \"ts-in\": to_hdfs_path(\"/user/ybw/hdd/Dataset/terasort-250G\"),\n \"ts-out\": to_hdfs_path(\"/user/ybw/hdd/output/terasort-250G-sorted\"),\n}\n\napp_dataset_ssd = {\n \"wc\": to_hdfs_path(\"/user/ybw/ssd/Dataset/deepmind-gutenberg.text\"),\n \"pr\": to_hdfs_path(\"/user/ybw/ssd/Dataset/twitter-2010.text\"),\n \"km\": to_hdfs_path(\"/user/ybw/ssd/Dataset/data-Kmeans-80000000-64.text\"),\n \"lr\": to_hdfs_path(\"/user/ybw/ssd/Dataset/MNIST01-12665-10k-784.text\"),\n \"cc\": to_hdfs_path(\"/user/ybw/ssd/Dataset/twitter-2010.text\"),\n \"ts-in\": to_hdfs_path(\"/user/ybw/ssd/Dataset/terasort-250G\"),\n \"ts-out\": to_hdfs_path(\"/user/ybw/ssd/output/terasort-250G-sorted\"),\n}\n\nSPARK_BENCHMARK_CHECKPOINT_DIR = to_hdfs_path(\"/user/ybw/ssd/checkpoint\")\n\nif config.STORAGE_DEVICE_TYPE == config.STORAGE_DEVICE_TYPE_HDD:\n app_dataset = app_dataset_hdd\nelif config.STORAGE_DEVICE_TYPE == config.STORAGE_DEVICE_TYPE_SSD:\n app_dataset = app_dataset_ssd\n\nnum_iters = 10\n\ndef get_sparklet_wc_args(total_num_cores):\n return f\"\"\"{app_dataset[\"wc\"]} {total_num_cores} 32\"\"\"\n\n\ndef get_sparklet_pr_args(total_num_cores):\n return f\"\"\"{app_dataset[\"pr\"]} {total_num_cores} 10\"\"\"\n\n\ndef get_sparklet_km_args(total_num_cores):\n return f\"\"\"{app_dataset[\"km\"]} 500 {num_iters} {total_num_cores}\"\"\"\n\n\ndef get_sparklet_lr_args(total_num_cores):\n return f\"\"\"{app_dataset[\"lr\"]} {num_iters} {total_num_cores * 4}\"\"\" # LR dataset is too large\n\n\ndef get_sparklet_cc_args(total_num_cores):\n return f\"\"\"{app_dataset[\"cc\"]} {total_num_cores}\"\"\"\n\n\ndef get_sparklet_ts_args(total_num_cores):\n return f\"\"\"{app_dataset[\"ts-in\"]} {app_dataset[\"ts-out\"]} 2048\"\"\"\n\n\napp_args = {\n \"wc\": get_sparklet_wc_args,\n \"pr\": get_sparklet_pr_args,\n \"km\": get_sparklet_km_args,\n \"lr\": get_sparklet_lr_args,\n \"cc\": get_sparklet_cc_args,\n \"ts\": get_sparklet_ts_args,\n}\n\n# Fault injection point in (job_id, stage_id)\napp_fault_injection_point = {\n \"wc\": (0, 1),\n \"ts\": (1, 1),\n \"pr\": (2, 5),\n \"cc\": (13, 0),\n \"km\": (7, 0),\n \"lr\": (7, 0),\n}\n\ndef set_fault_injection_point(app):\n job_id, stage_id = app_fault_injection_point[app]\n config.SPARK_CONFIG_BASE[\"spark.sparklet.injectionJobId\"] = job_id\n config.SPARK_CONFIG_BASE[\"spark.sparklet.injectionStageId\"] = stage_id\n\nif __name__ == \"__main__\":\n sc = SparkCommand()\n from .benchmark_params import *\n num_executors = 64\n executor_memory_gb = 22\n num_cores_per_executor = [7]\n\n def getResultDir(is_spark=False, is_strawman_a=False, is_strawman_b=False):\n curr = config.RESULT_DIR\n is_hdd = config.STORAGE_DEVICE_TYPE_HDD == config.STORAGE_DEVICE_TYPE\n is_ucx = config.ENABLE_SPARK_UCX\n if config.ENABLE_FAULT_INJECTION:\n curr += \"/fault\"\n if config.ENABLE_SPECULATIVE_EXECUTION:\n curr += \"/speculative\"\n if is_hdd:\n curr += \"/hdd\"\n if is_spark:\n curr += \"/spark\"\n if is_ucx:\n curr += \"/ucx\"\n if is_strawman_a:\n curr += \"/strawmanA\"\n if is_strawman_b:\n curr += \"/strawmanB\"\n return curr\n\n def get_sparklet_commandline(app, nc):\n if (config.ENABLE_FAULT_INJECTION):\n set_fault_injection_point(app)\n total_num_cores = num_executors * nc\n module = app_sparklet_module_name[app]\n args = app_args[app](total_num_cores)\n this_app_name = app_name[app]\n result_path = f\"{getResultDir(is_spark=False)}/{app}_{num_executors}\"\n if config.USE_STRAWMAN_IMPLEMENTATION:\n result_path = f\"{getResultDir(is_spark=False, is_strawman_a=True)}/{app}_{num_executors}\"\n if config.USE_STRAWMAN_B:\n result_path = f\"{getResultDir(is_spark=False, is_strawman_b=True)}/{app}_{num_executors}\"\n cmdline = sc.submit_sparklet(f\"{this_app_name}\", f\"{module}_so\", args, num_executors=num_executors,\n executor_memory_gb=executor_memory_gb, executor_cores=nc)\n return f\"{cmdline} 1>{result_path}.log.stdout 2>{result_path}.log.stderr\"\n\n def get_spark_commandline(app, baseline, nc):\n if (config.ENABLE_FAULT_INJECTION):\n set_fault_injection_point(app)\n total_num_cores = num_executors * nc\n args = app_args[app](total_num_cores)\n this_app_name = f\"Spark_{app_name[app]}_{baseline}\"\n result_path = f\"{getResultDir(is_spark=True)}/{app}_{baseline}_{num_executors}\"\n cmdline = sc.submit_spark(f\"{this_app_name}\", config.SPARK_BENCHMARK_PATH, \"org.pacman.Launch\", f\"{app} {baseline} {SPARK_BENCHMARK_CHECKPOINT_DIR} {args}\", num_executors=num_executors,\n executor_memory_gb=executor_memory_gb, executor_cores=nc)\n return f\"{cmdline} 1>{result_path}.log.stdout 2>{result_path}.log.stderr\"\n\n sparklet = True\n spark_all = True\n spark_rdd = False\n spark_blas = True\n\n if True:\n # Fault tolerance\n sparklet = True\n spark_all = False\n spark_rdd = True\n spark_blas = False\n\n if sparklet:\n # Sparklet\n for app in app_list:\n for nc in num_cores_per_executor:\n print(get_sparklet_commandline(app, nc))\n\n if spark_all:\n # Spark\n for app in app_list:\n for baseline in baseline_list:\n for nc in num_cores_per_executor:\n print(get_spark_commandline(app, baseline, nc))\n\n if spark_rdd:\n # Spark (RDD Only)\n for app in app_list:\n baseline = \"rdd\"\n for nc in num_cores_per_executor:\n print(get_spark_commandline(app, baseline, nc))\n\n if spark_blas:\n # Spark BLAS\n for nc in num_cores_per_executor:\n for app in [\"km\", \"lr\"]:\n print(get_spark_commandline(app, \"rddblas\", nc))\n \n if False:\n for (baseline, app) in [('sql', 'pr'), ('sql', 'cc')]:\n assert(baseline in {*baseline_list})\n assert(app in {*app_list})\n nc = 7\n total_num_cores = num_executors * nc\n args = app_args[app](total_num_cores)\n this_app_name = f\"Spark_{app_name[app]}_{baseline}\"\n if config.ENABLE_SPARK_UCX:\n result_path = f\"/home/ybw/Research/PDD/result/new/ucx/spark/{app}_{baseline}_{num_executors}\"\n else:\n result_path = f\"/home/ybw/Research/PDD/result/new/spark/{app}_{baseline}_{num_executors}\"\n cmdline = sc.submit_spark(f\"{this_app_name}\", config.SPARK_BENCHMARK_PATH, \"org.pacman.Launch\", f\"{app} {baseline} {SPARK_BENCHMARK_CHECKPOINT_DIR} {args}\", num_executors=num_executors,\n executor_memory_gb=executor_memory_gb, executor_cores=nc)\n print(f\"{cmdline} 1>{result_path}.log.stdout 2>{result_path}.log.stderr\")\n\n\n# Show spark-shell\n# print(sc.spark_shell(f\"Explore\", num_executors=num_executors, executor_memory_gb=executor_memory_gb, executor_cores=nc))\n","repo_name":"stevenybw/chukonu_vldb_22_raw_data","sub_path":"sparklet_experiment/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"9499094683","text":"\nimport src.scrape_apk as scraper\nimport src.create_matrices as cm\nimport src.model as model\nimport json\nimport os\nfrom random import sample\nimport sys\nimport src.Filter_Coef as fc\nimport src.Correlation_Coef as cc\nimport src.Percent_API as pa\n\n\ndef get_apk(**kwargs):\n \"\"\"\n method to run multi class malware category detection pipeline\n \"\"\"\n #loads in input parameters into local variables \n outpath = kwargs['outpath']\n scrape = kwargs['scrape_data']\n categories = kwargs['categories']\n num_apps = kwargs['number_of_apps_per_cat']\n reread = kwargs['re_read']\n malware_path = kwargs['malware_path']\n num_malware = kwargs['num_malware_apps']\n malware_cats = kwargs['malware_categories']\n load_matrices = kwargs['load_matrices']\n corr = kwargs['corr_coef']\n #considers the following malware categories given the keyword 'all' is passed in\n if malware_cats == 'all':\n malware_cats = [\"Andup\",\"BankBot\",\"DroidKungFu\",\"FakeAngry\",\"FakeAV\",\"FakeDoc\",\\\n \"Fjcon\",\"GingerMaster\",\"Kemoge\",\"Koler\",\"Ksapp\",\"Kyview\",\"Minimob\",\"MobileTX\",\\\n \"Mtk\",\"Nandrobox\",\"Obad\",\"Roop\",\"SimpleLocker\",\"Univert\",\"Youmi\"]\n #creates output data directory\n if not os.path.exists(outpath):\n os.mkdir(outpath)\n #runs scraper to get apps from apkpure.com\n if scrape:\n print('Scraping data')\n scraper.run_scraper(categories, num_apps,outpath)\n \n print('Getting API info')\n #compiles metadata for all benign apps\n category_data,apps,apis,test_info, test_apps = cm.get_metadata(categories, outpath,reread)\n \n print('Getting malware info')\n #compiles metadata from all malware apps\n if reread:\n category_data,apps,apis, malware,test_info, test_apps, malware_test = cm.get_malware\\\n (malware_path,num_malware ,category_data,apps,apis,malware_cats,test_info, test_apps,outpath)\n else:\n category_data,apps,apis, malware,test_info, test_apps, malware_test = cm.read_malware\\\n (malware_cats, outpath,category_data,apps,apis,test_info, test_apps)\n \n \n print(\"Number of apps: \", len(apps))\n print(\"Number of apis: \", len(apis))\n #creates outpath for intermediate data files\n api_path = os.path.join(outpath, 'APIs')\n if not os.path.exists(api_path):\n os.mkdir(api_path)\n app_path = os.path.join(outpath, 'apps')\n if not os.path.exists(app_path):\n os.mkdir(app_path)\n ds_path = os.path.join(outpath, 'data structures')\n if not os.path.exists(ds_path):\n os.mkdir(ds_path)\n \n #removes APIs in less than 3 apps and stores intermediate data structures\n apis = cm.reduce_and_save_apps_apis(outpath,apps,apis,test_apps)\n cm.save_data(category_data, test_info,outpath)\n \n print(\"Number of apps: \", len(apps))\n print(\"Number of apis: \", len(apis))\n \n print()\n print('Number of test apps: ', str(len(test_apps)))\n \n #creates test and train kernels \n if not load_matrices:\n print('Creating kernels')\n train_kernels,test_kernels = cm.get_kernels(category_data,apps,apis, test_info,test_apps,outpath)\n else:\n train_kernels,test_kernels = cm.load_kernels(outpath)\n\n #frees up memory\n del category_data\n del test_info\n \n #run SVM model to predict malware category\n print('Training SVM') \n p1 = model.run_model(outpath,train_kernels, test_kernels,malware, malware_test,'original')\n #calculates coefficient correlation values and filters high and low coef APIs\n if corr:\n cc.main(outpath)\n fc.main(outpath)\n \n #calculates percentage APIs for each app\n pa.main(outpath)\n \n num_apis = [250]\n for n_api in num_apis:\n #infuses test APIs using 250 APIs based on percentage APIs\n train_kernels,test_kernels = cm.load_kernels(outpath,n_api)\n #run SVM model to predict malware category\n p2 = model.run_model(outpath,train_kernels, test_kernels,malware, malware_test,'infused_benign')\n #performs t-test based on predictions \n model.run_ttest(p1,p2)\n\n \n \n \n \n\n\nif __name__== \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == 'test-project':\n cfg = json.load(open('./config/test-params.json'))\n else:\n #params - number_of_xml_files, number_of_apps_per_category\n cfg = json.load(open('./config/data-params.json'))\n get_apk(**cfg)\n","repo_name":"karans04/Malware-Category-Detection","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"35984116503","text":"# Used Imports\nimport openai\nimport os\nimport time\nfrom dotenv import load_dotenv\n\n# Type Imports\nfrom io import TextIOWrapper\n\nload_dotenv()\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nroles: list[str] = [\"user\", \"assistant\"]\n\nmessages: list[dict[str, str]] = [{'role': 'user', 'content': \"Hello, my name's Alex! What's yours?\"},\n {'role': 'assistant', 'content': 'Hello, Alex! My name is Bert! How are you doing?'},\n {'role': 'user', 'content': \"Nice to meet you Bert. I'm doing good. How about yourself?\"},\n {'role': 'assistant', 'content': \"I'm fine thank you. What are your hobbies?\"},\n {'role': 'user', 'content': \"As an AI language model, I don't have hobbies like humans do, but I'm here to answer your questions and help you in any way I can. How can I assist you today?\"},\n {'role': 'assistant', 'content': \"Oh, I apologize for my mistake. As you mentioned that you're here to assist me, can you tell me more about yourself? What can you do?\"},\n {'role': 'user', 'content': \"Of course! I am an AI language model capable of understanding natural language and providing human-like responses to various queries. I can answer questions related to a wide range of topics such as general knowledge, math, science, history, and more. I can also assist with tasks such as writing articles, composing emails, and even conducting research. Is there anything specific you'd like me to help with?\"},\n {'role': 'assistant', 'content': 'Actually, yes. Can you explain to me the importance of cybersecurity in our digital world?'}]\nrole_num: int = 0\nwhile True:\n if input(\"enter to continue\"): break\n chat = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\", messages = messages\n )\n\n role: str = roles[role_num] # chat.choices[0].message.role\n content: str = chat.choices[0].message.content\n\n print(f\"{role}: {content}\")\n messages.append({\"role\": role, \"content\":content})\n\n role_num = (role_num + 1) % 2\n\ncur_time: str = time.strftime(\"%Y-%m-%d,%H_%M_%S\")\nfile_name: str = f\"logs/chat/{cur_time}.ai-log\"\nfile: TextIOWrapper = open(file_name, \"w\")\nfile.write(str(messages).replace(\"},\", \"},\\n\"))\nfile.close()\nprint(messages)","repo_name":"AlexJFDev/OpenAI-Experiments","sub_path":"chat_test.py","file_name":"chat_test.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24430023025","text":"N = int(input())\nS = input()\n\n# for i in range(N//2):\n# if S[i] == \"B\":\n# state = True\n# break\n# elif S[N-i-1] == \"A\":\n# state = True\n# continue\n# else:\n# state = False\n# break\n\nif N==2:\n if S == \"AB\" or S == \"BA\":\n state = False\n else:\n state = True\n\nif N!=2:\n if S[0] == \"B\" or S[-1] == \"A\":\n state = True\n else:\n state = False\n\n \nif state:\n print('Yes')\nelse:\n print('No')","repo_name":"1022yuki/atcoder","sub_path":"ARC/A - AB Palindrome.py","file_name":"A - AB Palindrome.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36715828784","text":"# Interface pour MPI\nimport Converter.Mpi as Cmpi\nfrom . import PyTree as X\nimport Converter.Internal as Internal\nimport Converter.PyTree as C\nfrom . import connector\nimport RigidMotion.PyTree as RM\nimport numpy\n\ntry: range = xrange\nexcept: pass\n\n#==============================================================================\n# optimizeOverlap\n# IN: t: full/loaded skel/partial\n# IN: graph: graph d'intersection si deja calcule\n# IN: intersectionsDict: Dictionnaire d'intersections\n# OUT: arbre partiel avec overlap optimise\n#==============================================================================\ndef optimizeOverlap(t, double_wall=0, priorities=[], graph=None,\n intersectionsDict=None):\n if graph is None:\n tb = Cmpi.createBBoxTree(t)\n graph = Cmpi.computeGraph(tb, type='bbox2',\n intersectionsDict=intersectionsDict)\n tl = Cmpi.addXZones(t, graph)\n tl = Cmpi.convert2PartialTree(tl)\n # print info\n zones = Internal.getZones(tl)\n #print 'Rank %d has %d zones.'%(Cmpi.rank, len(zones))\n tl = X.optimizeOverlap(tl, double_wall, priorities, intersectionsDict)\n tl = Cmpi.rmXZones(tl)\n return tl\n\n#===============================================================================\n# setInterpTransfers\n# Warning: inverse storage!\n# IN: aR: arbre des receveurs\n# IN: aD: arbre des donneurs\n# IN: type: ID: interpolation, IBCD: IBCs, ALLD: interp+IBCs\n# IN: bcType 0: glissement\n# 1: adherence\n# 2: loi de paroi log\n# 3: loi de paroi Musker,4: outpress, 5 inj, 6 TBLE-SA\n# IN: varType=1,2,3: variablesIBC define (ro,rou,rov,row,roE(,ronutilde)),(ro,u,v,w,t(,nutilde)),(ro,u,v,w,p(,nutilde))\n# Adim: KCore.adim1 for Minf=0.1\n#===============================================================================\ndef setInterpTransfers(aR, aD, variables=[], cellNVariable='',\n variablesIBC=['Density','MomentumX','MomentumY','MomentumZ','EnergyStagnationDensity'], \n bcType=0, varType=1, graph=None, \n procDict=None, type='ALLD', \n Gamma=1.4, Cv=1.7857142857142865, MuS=1.e-08, \n Cs=0.3831337844872463, Ts=1.0):\n tp = Internal.copyRef(aR)\n compact = 0\n _setInterpTransfers(tp, aD, variables=variables, cellNVariable=cellNVariable, variablesIBC=variablesIBC, \n bcType=bcType, varType=varType, compact=compact, graph=graph, \n procDict=procDict, type=type, Gamma=Gamma, Cv=Cv, MuS=MuS, Cs=Cs, Ts=Ts)\n return tp\n#===============================================================================\ndef _setInterpTransfers(aR, aD, variables=[], cellNVariable='',\n variablesIBC=['Density','MomentumX','MomentumY','MomentumZ','EnergyStagnationDensity'], \n bcType=0, varType=1, compact=0, graph=None, \n procDict=None, type='ALLD',\n Gamma=1.4, Cv=1.7857142857142865, MuS=1.e-08, \n Cs=0.3831337844872463, Ts=1.0):\n\n if procDict is None: procDict = Cmpi.getProcDict(aD)\n if graph is None: graph = Cmpi.computeGraph(aD, type=type)\n\n # Transferts locaux/globaux\n # Calcul des solutions interpolees par arbre donneur\n # On envoie aussi les indices receveurs pour l'instant\n datas = {}\n zonesD = Internal.getZones(aD)\n for zD in zonesD:\n infos = X.setInterpTransfersD(zD, variables=variables, cellNVariable=cellNVariable, variablesIBC=variablesIBC, \n bcType=bcType, varType=varType, compact=compact, Gamma=Gamma, Cv=Cv, MuS=MuS, Cs=Cs, Ts=Ts)\n for n in infos:\n rcvName = n[0]\n proc = procDict[rcvName]\n if proc == Cmpi.rank:\n field = n[1]\n #print 'direct', Cmpi.rank, rcvName\n if field != []:\n listIndices = n[2]\n z = Internal.getNodeFromName2(aR, rcvName)\n C._setPartialFields(z, [field], [listIndices], loc=n[3])\n else:\n rcvNode = procDict[rcvName]\n #print Cmpi.rank, 'envoie a ',rcvNode\n if rcvNode not in datas: datas[rcvNode] = [n]\n else: datas[rcvNode] += [n]\n #print datas\n # Envoie des numpys suivant le graph\n rcvDatas = Cmpi.sendRecv(datas, graph)\n\n # Remise des champs interpoles dans l'arbre receveur\n for i in rcvDatas:\n #print Cmpi.rank, 'recoit de',i, '->', len(rcvDatas[i])\n for n in rcvDatas[i]:\n rcvName = n[0]\n #print 'reception', Cmpi.rank, rcvName\n field = n[1]\n if field != []:\n listIndices = n[2]\n z = Internal.getNodeFromName2(aR, rcvName)\n C._setPartialFields(z, [field], [listIndices], loc=n[3])\n return None\n\n#===============================================================================\n# __setInterpTransfers version optimiser de _setInterpTransfers: arbre t et tc compact, moins de python + de C\n#\n# Warning: inverse storage!\n# IN: zones: list zone receveurs\n# IN: zoneD: list zone donneurs\n# IN: type: ID: interpolation, IBCD: IBCs, ALLD: interp+IBCs\n# IN: bcType 0: glissement\n# 1: adherence\n# 2: loi de paroi log\n# 3: loi de paroi Musker\n# IN: varType=1,2,3: variablesIBC define (ro,rou,rov,row,roE(,ronutilde)),(ro,u,v,w,t(,nutilde)),(ro,u,v,w,p(,nutilde))\n# Adim: KCore.adim1 for Minf=0.1\n#===============================================================================\ndef __setInterpTransfers(zones, zonesD, vars, param_int, param_real, type_transfert, nitrun,\n nstep, nitmax, rk, exploc, num_passage, bcType=0, varType=1, compact=1,\n graph=None, procDict=None,\n Gamma=1.4, Cv=1.7857142857142865, MuS=1.e-08, Cs=0.3831337844872463, Ts=1.0):\n\n # Transferts locaux/globaux\n # Calcul des solutions interpolees par arbre donneur\n # On envoie aussi les indices receveurs pour l'instant\n datas = {}\n nbcomIBC = param_int[1]\n shift_graph = nbcomIBC + param_int[2+nbcomIBC] + 2\n\n for comm_P2P in range(1,param_int[0]+1):\n pt_ech = param_int[comm_P2P + shift_graph]\n dest = param_int[pt_ech]\n\n no_transfert = comm_P2P\n if dest == Cmpi.rank: #transfert intra_processus\n #print 'transfert local', type_transfert\n connector.___setInterpTransfers(zones, zonesD, vars, param_int, param_real, nitrun, varType, bcType, \n type_transfert, no_transfert, nstep, nitmax, rk, exploc, num_passage, Gamma,Cv,MuS,Cs,Ts)\n\n else:\n #print 'transfert global', type_transfert\n infos = connector.__setInterpTransfersD(zones, zonesD, vars, param_int, param_real, nitrun, varType, bcType, \n type_transfert, no_transfert, nstep, nitmax, rk, exploc, num_passage, Gamma,Cv,MuS,Cs,Ts) \n if infos != []:\n for n in infos:\n rcvNode = dest\n #print Cmpi.rank, 'envoie a ',rcvNode, ' le paquet : ', n\n if rcvNode not in datas: datas[rcvNode] = [n]\n else: datas[rcvNode] += [n]\n #print datas\n \n # Envoie des numpys suivant le graph\n rcvDatas = Cmpi.sendRecv(datas, graph)\n\n # Remise des champs interpoles dans l'arbre receveur\n for i in rcvDatas:\n #if Cmpi.rank==0: print Cmpi.rank, 'recoit de',i, '->', len(rcvDatas[i])\n for n in rcvDatas[i]:\n rcvName = n[0]\n #if Cmpi.rank==0: print 'reception', Cmpi.rank, 'no zone', zones[ rcvName ][0]\n field = n[1]\n if field != []:\n listIndices = n[2]\n z = zones[rcvName]\n C._setPartialFields(z, [field], [listIndices], loc='centers')\n return None\n\n#---------------------------------------------------------------------------------------------------------\n# Transferts instationnaires en parallele\n# avec prise en compte du mouvement\n# absFrame = True : les coordonnees de t sont deja dans le repere absolu en entree\n#---------------------------------------------------------------------------------------------------------\ndef _transfer(t, tc, variables, graph, intersectionDict, dictOfADT, \n dictOfNobOfRcvZones, dictOfNozOfRcvZones,\n dictOfNobOfDnrZones, dictOfNozOfDnrZones, \n dictOfNobOfRcvZonesC, dictOfNozOfRcvZonesC, \n time=0., absFrame=True, procDict=None, cellNName='cellN'):\n if procDict is None: procDict = Cmpi.getProcDict(tc)\n \n # dictionnaire des matrices de mouvement pour passer du repere relatif d une zone au repere absolu\n dictOfMotionMatR2A={}\n dictOfMotionMatA2R={}\n coordsD=[0.,0.,0.]; coordsC= [0.,0.,0.] # XAbs = coordsD + Mat*(XRel-coordsC)\n dictOfFields={}; dictOfIndices={}\n \n datas={}\n for z in Internal.getZones(t):\n zname = Internal.getName(z)\n if zname not in dictOfNobOfRcvZones: continue\n\n # coordonnees dans le repere absolu de la zone receptrice\n # on les recupere de zc pour eviter un node2center des coordonnees de z\n nobc = dictOfNobOfRcvZonesC[zname]\n nozc = dictOfNozOfRcvZonesC[zname]\n zc = tc[2][nobc][2][nozc]\n if zc[0] != zname:# check\n raise ValueError(\"_transfer: t and tc skeletons must be identical.\")\n\n C._cpVars(z,'centers:'+cellNName, zc, cellNName)\n res = X.getInterpolatedPoints(zc,loc='nodes', cellNName=cellNName) \n # print 'Zone %s du proc %d a interpoler'%(zname, Cmpi.rank)\n\n if res is not None: \n # print 'Res not None : zone %s du proc %d a interpoler'%(zname, Cmpi.rank)\n\n indicesI, XI, YI, ZI = res\n # passage des coordonnees du recepteur dans le repere absolu\n # si mouvement gere par FastS -> les coordonnees dans z sont deja les coordonnees en absolu\n if not absFrame: \n if zname in dictOfMotionMatR2A:\n MatRel2AbsR=RM.getMotionMatrixForZone(z, time=time, F=None)\n dictOfMotionMatR2A[zname]=MatRel2AbsR\n else:\n MatRel2AbsR = dictOfMotionMatR2A[zname]\n RM._moveN([XI,YI,ZI],coordsD,coordsC,MatRel2AbsR)\n\n procR = procDict[zname]\n for znamed in intersectionDict[zname]:\n procD = procDict[znamed]\n if procD == Cmpi.rank:\n nobc = dictOfNobOfDnrZones[znamed]\n nozc = dictOfNozOfDnrZones[znamed]\n zdnr = tc[2][nobc][2][nozc]\n adt = dictOfADT[znamed]\n if znamed in dictOfMotionMatA2R:\n MatAbs2RelD=dictOfMotionMatA2R[znamed]\n else: \n if znamed in dictOfMotionMatR2A:\n MatRel2AbsD = dictOfMotionMatR2A[znamed]\n MatAbs2RelD = numpy.transpose(MatRel2AbsD)\n dictOfMotionMatA2R[znamed] = MatAbs2RelD\n else:\n MatRel2AbsD=RM.getMotionMatrixForZone(zdnr, time=time, F=None)\n dictOfMotionMatR2A[znamed]=MatRel2AbsD\n MatAbs2RelD = numpy.transpose(MatRel2AbsD)\n dictOfMotionMatA2R[znamed] = MatAbs2RelD\n [XIRel, YIRel, ZIRel] = RM.moveN([XI,YI,ZI],coordsC,coordsD,MatAbs2RelD)\n\n # transfers avec coordonnees dans le repere relatif \n fields = X.transferFields(zdnr, XIRel, YIRel, ZIRel, hook=adt, variables=variables)\n if zname not in dictOfFields:\n dictOfFields[zname]=[fields]\n dictOfIndices[zname]=indicesI\n else:\n dictOfFields[zname].append(fields)\n\n else: \n # print ' ECHANGE GLOBAL entre recepteur %s du proc %d et donneur %s du proc %d '%(zname, Cmpi.rank, znamed, procD)\n if procD not in datas:\n datas[procD] = [[zname, znamed, indicesI, XI, YI, ZI]]\n else: datas[procD].append([zname, znamed, indicesI, XI, YI, ZI])\n\n # print 'Proc : ', Cmpi.rank, ' envoie les donnees : ' ,datas.keys()\n # print ' a partir du graphe ', graph\n # 1er envoi : envoi des numpys des donnees a interpoler suivant le graphe\n interpDatas = Cmpi.sendRecv(datas,graph)\n\n # recuperation par le proc donneur des donnees pour faire les transferts \n transferedDatas={}\n for i in interpDatas:\n #print Cmpi.rank, 'recoit de',i, '->', len(interpDatas[i])\n for n in interpDatas[i]:\n zdnrname = n[1]\n zrcvname = n[0]\n indicesR = n[2]\n XI = n[3]; YI = n[4]; ZI = n[5]\n nobc = dictOfNobOfDnrZones[zdnrname]\n nozc = dictOfNozOfDnrZones[zdnrname]\n zdnr = tc[2][nobc][2][nozc]\n adt = dictOfADT[zdnrname]\n if zdnrname in dictOfMotionMatA2R:\n MatAbs2RelD=dictOfMotionMatA2R[zdnrname]\n else:\n if zdnrname in dictOfMotionMatR2A:\n MatRel2AbsD = dictOfMotionMatR2A[zdnrname]\n MatAbs2RelD = numpy.transpose(MatRel2AbsD)\n dictOfMotionMatA2R[zdnrname] = MatAbs2RelD\n else:\n MatRel2AbsD=RM.getMotionMatrixForZone(zdnr, time=time, F=None)\n dictOfMotionMatR2A[zdnrname]=MatRel2AbsD\n MatAbs2RelD = numpy.transpose(MatRel2AbsD)\n dictOfMotionMatA2R[zdnrname] = MatAbs2RelD\n \n [XIRel, YIRel, ZIRel] = RM.moveN([XI,YI,ZI],coordsC,coordsD,MatAbs2RelD)\n # transferts avec coordonnees dans le repere relatif \n fields = X.transferFields(zdnr, XIRel, YIRel, ZIRel, hook=adt, variables=variables)\n procR = procDict[zrcvname]\n \n if procR not in transferedDatas:\n transferedDatas[procR]=[[zrcvname, indicesR, fields]]\n else:\n transferedDatas[procR].append([zrcvname,indicesR,fields])\n \n if transferedDatas != {}:\n # 2nd envoi : envoi des numpys des donnees interpolees suivant le graphe\n rcvDatas = Cmpi.sendRecv(transferedDatas,graph)\n \n # remise des donnees interpolees chez les zones receveuses\n # une fois que tous les donneurs potentiels ont calcule et envoye leurs donnees\n for i in rcvDatas:\n #print Cmpi.rank, 'recoit des donnees interpolees de',i, '->', len(rcvDatas[i])\n for n in rcvDatas[i]:\n zrcvname = n[0]\n indicesI = n[1]\n fields = n[2]\n if zrcvname not in dictOfFields:\n dictOfFields[zrcvname]=[fields]\n dictOfIndices[zrcvname]=indicesI\n else:\n dictOfFields[zrcvname].append(fields)\n\n for zrcvname in dictOfIndices:\n nob = dictOfNobOfRcvZones[zrcvname]\n noz = dictOfNozOfRcvZones[zrcvname]\n z = t[2][nob][2][noz]\n allInterpFields = dictOfFields[zrcvname]\n indicesI = dictOfIndices[zrcvname]\n C._filterPartialFields(z, allInterpFields, indicesI, loc='centers', startFrom=0, filterName='donorVol')\n\n # SORTIE\n return None\n","repo_name":"guillaume-florent/cassiopee-ce","sub_path":"cassiopee/Connector/Connector/Mpi.py","file_name":"Mpi.py","file_ext":"py","file_size_in_byte":15725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"15654600111","text":"###\n# Midi Mapping like Renoise, basic style piano roll\n# (Added a lower note so that C4 root sits in an easy place)\n###\n\nimport usb_midi\nimport adafruit_midi\nfrom pmk import PMK\nfrom pmk.platform.keybow2040 import Keybow2040 as Hardware\nfrom adafruit_midi.note_off import NoteOff\nfrom adafruit_midi.note_on import NoteOn\n\nkeybow = PMK(Hardware())\nkeys = keybow.keys\nmidi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)\n\nrgb = (255, 0, 0)\nrgb_off = (25, 25, 25)\nvelocity = 127\n\nnote_map = [\n 'B2', 'F3', 'C4', 'G4',\n 'C3', 'G3', 'D4', 'A4',\n 'D3', 'A3', 'E4', 'B4',\n 'E3', 'B3', 'F4', 'C5',\n]\n\nfor key in keys:\n key.set_led(*rgb_off)\n\nwhile True:\n keybow.update()\n\n for key in keys:\n @keybow.on_press(key)\n def press_handler(key):\n note = note_map[key.number]\n key.set_led(*rgb)\n midi.send(NoteOn(note, velocity))\n\n @keybow.on_release(key)\n def release_handler(key):\n note = note_map[key.number]\n key.set_led(*rgb_off)\n midi.send(NoteOff(note, 0))\n","repo_name":"tspring5000/KEYBOW-midi-patches","sub_path":"renoise.py","file_name":"renoise.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20943933666","text":"'''\nSimple Lambda function that posts to a configured SNS topic\non the click of a button. It sends different messages based\non a single, double, or long click.\n\nThe following JSON template shows what is sent as the payload:\n{\n \"serialNumber\": \"GXXXXXXXXXXXXXXXXX\",\n \"batteryVoltage\": \"xxmV\",\n \"clickType\": \"SINGLE\" | \"DOUBLE\" | \"LONG\"\n}\n\nA \"LONG\" clickType is sent if the first press lasts longer than 1.5 seconds.\n\"SINGLE\" and \"DOUBLE\" clickType payloads are sent for short clicks.\n\nFor more documentation, follow the link below.\nhttp://docs.aws.amazon.com/iot/latest/developerguide/iot-lambda-rule.html\n'''\n\nfrom __future__ import print_function\n\nimport boto3\nimport json\nimport logging\nimport time\nimport os\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nsns = boto3.client('sns')\n\ndef lambda_handler(event, context):\n logger.info('Received event: ' + json.dumps(event))\n\n # A short press means Ryan is home\n # A double click means Ryan wants Door Dash\n # A long click means call Ryan\n date = time.strftime('%I:%M:%S %p')\n message = 'Ryan is home'\n if (event['clickType'] == 'DOUBLE'):\n message = 'Ryan wants Door Dash'\n elif (event['clickType'] == 'LONG'):\n message = 'Please call Ryan'\n\n messageObj = {}\n messageObj['default'] = message\n messageObj['email'] = 'A ' + event['clickType'] + ' click was received from IOT device ' + event['serialNumber'] + ' at ' + date + ' UTC'\n messageObj['sms'] = message + ' at ' + date\n sns.publish(TopicArn=os.environ['topic_arn'], Message=json.dumps(messageObj), MessageStructure='json', Subject=message)\n logger.info('Message has been sent to ' + topic_arn)\n","repo_name":"gsdriver/iot-fun","sub_path":"sendsms.py","file_name":"sendsms.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71202538235","text":"print ('Day 2: 30 Days of python programming')\nfirst_name = 'Iván'\nlast_name = 'Betanzos'\nfull_name = 'Iván Betanzos Soto'\ncountry = 'Sapin'\ncity = 'Jerez de la Frontera'\nage = 16\nyear = 2022\nis_married = False\nis_light_on = True\nhobbie, pet, favourite_sport = 'videogames', 'hamster', 'basketball'\n\nprint (type(first_name))\nprint (type(last_name))\nprint (type(full_name))\nprint (type(country))\nprint (type(city))\nprint (type(age))\nprint (type(year))\nprint (type(is_married))\nprint (type(is_light_on))\nprint (type(hobbie))\nprint (type(pet))\nprint (type(favourite_sport))\n\nprint(len(first_name))\nprint(len(first_name), len(last_name))\n\nnum_one=5\nnum_two=4\ntotal=(num_one+num_two)\ndiff=(num_one-num_two)\nproduct=(num_one*num_two)\ndivision=(num_one/num_two)\nremainder=(num_two%num_one)\nexp=(num_one**num_two)\nfloor_dision=(num_one//num_two)\n\narea_of_circle=(3.1416*30**2)\ncircum_of_circle=(3.1416*60)\nradius = int(input(\"Give me the radius of a circle: \"))\nprint ((3.1416*(30**2)))\n\nfirst_name = input (\"What is your first name?\")\nlast_name = input (\"What is your last name?\")\ncountry = input (\"Wich is your country?\")\nage = input (\"How old are you?\")\n\nprint ( \"Hello, I'am\",first_name, last_name, \"I live in\", country, \"and I've\", age)\n","repo_name":"iivanii/30-dias-de-python","sub_path":"day_2/Variables.py","file_name":"Variables.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"43077294112","text":"'''\n#include \"BehaviourArbitration.h\"\n\nThe old Kinect has a depth image resolution of 320 x 240 pixels with a fov of 58.5 x 46.6\ndegrees resulting in an average of about 5 x 5 pixels per degree. (see source 1) << 1 patch = 5x5 pixel\n\nsource: http://smeenk.com/kinect-field-of-view-comparison/\n'''\n\nimport cv2\nimport math\nimport numpy as np\nimport time as t\n\nfrom numba import njit, objmode\nfrom numba import int32, float64\nfrom numba.experimental import jitclass\n\nspec = [\n ('value', float64), # a simple scalar field\n ('array', float64[:]), # an array field\n]\n \n@jitclass(spec)\nclass Local_Path:\n def __init__(self):\n print(\"For obstacle avoidance\")\n \n def navigation(self, seg_center):\n new_input = self.set_input(seg_center)\n \n L = new_input.shape[0]\n \n angleGain = 150\n colGain = 0.8\n hfov = 87\n vfov = 58\n angularRangeHorz = hfov * math.pi/180\n angularRangeVert = vfov * math.pi/180\n image_width = 640\n image_height = 400\n \n # init\n Horz_total = 0\n Vert_total = 0\n collision_prob = 0\n \n for i in range(L):\n # 장애물 회피를 목적으로, 거리가 가까울수록 영향력 큼\n distanceExponent = np.exp(-new_input[i,3])\n distanceExponent = distanceExponent * new_input[i,4] * L / image_height / image_width\n \n # View Angle (-FOV/2 < x < +FOV/2)\n # FOV와 좌표를 기반으로 이미지의 중심으로부터의 각을 구함\n Horz_Bearing = (hfov*new_input[i,0]/image_width) + (hfov/2/image_width)\n Vert_Bearing = (vfov*new_input[i,1]/image_height) + (vfov/2/image_height)\n # degree to radian\n Horz_Bearing = Horz_Bearing * math.pi / 180\n Vert_Bearing = Vert_Bearing * math.pi / 180\n \n # The larger the center, the smaller the border.\n Horz_bearingWeight = np.exp(-pow(Horz_Bearing,2)/(pow(angularRangeHorz,2)))\n Vert_bearingWeight = np.exp(-pow(Vert_Bearing,2)/(pow(angularRangeVert,2)))\n \n Horz_total += (Horz_Bearing * Horz_bearingWeight) * distanceExponent \n Vert_total += (Vert_Bearing * Vert_bearingWeight) * distanceExponent\n \n collision_prob += (Horz_bearingWeight * distanceExponent) + (Vert_bearingWeight * distanceExponent)\n \n collision_prob = collision_prob/L\n collision_prob = collision_prob * np.exp(pow(collision_prob*colGain,2))\n if collision_prob > 1:\n collision_prob = 1\n \n Horz_total = Horz_total * np.exp(-pow(Horz_total/angleGain,2))\n if Horz_total > hfov:\n Horz_total = hfov\n elif Horz_total < -hfov:\n Horz_total = -hfov\n \n Vert_total = Vert_total * np.exp(-pow(Vert_total/angleGain,2))\n if Vert_total > vfov:\n Vert_total = vfov\n elif Vert_total < -vfov:\n Vert_total = -vfov\n Vert_total = Vert_total/10\n \n \n # print(Horz_total)\n # # print(Vert_total)\n # print(collision_prob)\n return Horz_total, Vert_total, collision_prob\n \n\n def set_input(self, seg_center):\n L = seg_center.shape[0]\n new_input = np.zeros((L,5))\n\n for i in range(L):\n x_dis = seg_center[i,0] - 320\n y_dis = seg_center[i,1] - 240\n \n new_input[i,0] = x_dis # x_pos\n new_input[i,1] = y_dis # y_pos\n \n new_input[i,2] = pow(x_dis,2) + pow(y_dis,2)\n new_input[i,2] = math.sqrt(new_input[i,2]) # Distance[pixel] from center of image\n\n new_input[i,3] = seg_center[i,3] # Depth\n new_input[i,4] = seg_center[i,4] # The number of segmentation's pixels\n \n return new_input\n\nif __name__ == \"__main__\":\n print(\"test\")\n Ld=np.load(\"left_data.npy\")\n Rd=np.load(\"right_data.npy\")\n data = np.vstack((Ld,Rd))\n # print(data)\n local_p = Local_Path()\n \n st = t.time()\n for i in range(1):\n local_p.navigation(data)\n print(t.time()-st)","repo_name":"YBNML/humantech","sub_path":"scripts/utils_LocalPath.py","file_name":"utils_LocalPath.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15031588041","text":"\"\"\"Learn to Program: The Fundamentals | University of Toranto Computer Science\n2018 Gina Fitzgerald\"\"\"\n# A Read Grades Into a List\n# B Count the grades per range\n# C Write histogram to file\n\n#this file is the main program, it holds constants and imports functions\n\nimport tkinter.filedialog\nimport grade\n#1. define varables to where we read from and where we save to\na1_filename = tkinter.filedialog.askopenfilename()\na1_file = open(a1_filename,'r')\n\na1_histfilename = tkinter.filedialog.asksaveasfilename()\na1_histfile = open(a1_histfilename,'w')\n\n# A Read Grades Into a List\ngrades = grade.read_grades(a1_file)\n\n# B Count the grades per range\nrange_counts = grade.count_grade_ranges(grades)\n\n#print(range_counts) #test\n\n# C Write the histogram to the file\ngrade.write_histogram(range_counts, a1_histfile)\n\na1_file.close()\na1_histfile.close()\n","repo_name":"g24fitzgerald/PortlandState-Python","sub_path":"coursera/session_one/w5w6/grade_histogram.py","file_name":"grade_histogram.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"720120924","text":"import collections\r\nimport io\r\nimport random\r\n\r\nimport numpy\r\n\r\nfrom glob import glob\r\nfrom progressbar import ProgressBar\r\n\r\nimport chainer\r\nfrom chainer.backends import cuda\r\n\r\n\r\ndef split_text(text, char_based=False):\r\n if char_based:\r\n return list(text)\r\n else:\r\n return text.split()\r\n\r\n\r\ndef normalize_text(text):\r\n return text.strip().lower()\r\n\r\n\r\ndef read_kernel(fi_name):\r\n rl = []\r\n with io.open(fi_name, encoding='utf-8') as fi:\r\n for line in fi:\r\n if line.startswith('#'):\r\n continue\r\n l_lst = line.strip().split(',')\r\n rl.append([float(l_lst[0]), float(l_lst[1])])\r\n return rl\r\n\r\n\r\ndef make_array(tokens, vocab, add_eos=True):\r\n unk_id = vocab['']\r\n eos_id = vocab['']\r\n ids = [vocab.get(token, unk_id) for token in tokens]\r\n if add_eos:\r\n ids.append(eos_id)\r\n return numpy.array(ids, numpy.int32)\r\n\r\n\r\ndef convert_seq3(batch, device=None, with_label=True):\r\n def to_device_batch(batch):\r\n if device is None:\r\n return batch\r\n elif device < 0:\r\n return [chainer.dataset.to_device(device, x) for x in batch]\r\n else:\r\n xp = cuda.cupy.get_array_module(*batch)\r\n concat = xp.concatenate(batch, axis=0)\r\n sections = numpy.cumsum([len(x)\r\n for x in batch[:-1]], dtype=numpy.int32)\r\n concat_dev = chainer.dataset.to_device(device, concat)\r\n batch_dev = cuda.cupy.split(concat_dev, sections)\r\n return batch_dev\r\n\r\n if with_label:\r\n return {'xs1': to_device_batch([x1 for x1, _, _, _ in batch]),\r\n 'xs2': to_device_batch([x2 for _, x2, _, _ in batch]),\r\n 'xs3': to_device_batch([x3 for _, _, x3, _ in batch]),\r\n 'ys': to_device_batch([y for _, _, _, y in batch])}\r\n else:\r\n return {'xs1': to_device_batch([x1 for x1, _ in batch]),\r\n 'xs2': to_device_batch([x2 for _, x2 in batch])}\r\n\r\n\r\ndef load_data_using_dataset_api(fi_name, vocab):\r\n EOS, UNK = 0, 1\r\n\r\n def _transform_line(content):\r\n words = content.strip().split()\r\n return numpy.array(\r\n [vocab.get(w, UNK) for w in words], numpy.int32)\r\n\r\n def _transform(line):\r\n l_lst = line.strip().split('\\t')\r\n return(\r\n _transform_line(l_lst[0]),\r\n _transform_line(l_lst[1]),\r\n _transform_line(l_lst[2]),\r\n numpy.array([float(l_lst[3])], numpy.float32)\r\n )\r\n\r\n def _load_single_data_using_dataset_api(fi):\r\n return chainer.datasets.TransformDataset(\r\n chainer.datasets.TextDataset(fi, encoding='utf-8'), _transform)\r\n\r\n train_path = glob(fi_name)\r\n p = ProgressBar(0, len(train_path))\r\n\r\n datasets = []\r\n for n, fi_path in enumerate(train_path):\r\n p.update(n+1)\r\n datasets.append(_load_single_data_using_dataset_api(fi_path))\r\n\r\n return chainer.datasets.ConcatenatedDataset(*datasets)\r\n\r\n\r\ndef make_vocab(dataset, vocabsize, min_freq=2):\r\n counts = collections.defaultdict(int)\r\n for t1, t2, _ in dataset:\r\n tokens = t1 + t2\r\n for token in tokens:\r\n counts[token] += 1\r\n\r\n vocab = {'': 0, '': 1}\r\n for w, c in sorted(counts.items(), key=lambda x: (-x[1], x[0])):\r\n if len(vocab) >= vocabsize or c < min_freq:\r\n break\r\n vocab[w] = len(vocab)\r\n return vocab\r\n\r\n\r\ndef transform_to_array3(dataset, vocab, with_label=True):\r\n if with_label:\r\n return [(make_array(t1, vocab, False), make_array(t2, vocab, False),\r\n make_array(t3, vocab, False), numpy.array([cls], numpy.float32))\r\n for t1, t2, t3, cls in dataset]\r\n else:\r\n return [(make_array(t1, vocab), make_array(t2, vocab)) for t1, t2 in dataset]\r\n\r\n\r\ndef load_word2vec_model(fi_name, units):\r\n print('load {} word2vec model'.format(fi_name))\r\n with open(fi_name, encoding='utf-8') as fi:\r\n vocab = {'': 0, '': 1}\r\n vector = []\r\n for n, line in enumerate(fi):\r\n l_lst = line.strip().split()\r\n if n == 0:\r\n # vocabsize = int(l_lst[0])\r\n v_size = int(l_lst[1])\r\n assert(units == v_size)\r\n\r\n vector.append([random.uniform(-0.5, 0.5) for _ in range(v_size)])\r\n vector.append([random.uniform(-0.5, 0.5) for _ in range(v_size)])\r\n else:\r\n v = l_lst[0]\r\n vec = [float(i) for i in l_lst[1:]]\r\n vocab[v] = n + 1\r\n vector.append(vec)\r\n\r\n return vocab, numpy.array(vector, numpy.float32)\r\n\r\n\r\ndef load_input_file(fi_name):\r\n rl = []\r\n with open(fi_name, encoding='utf-8') as fi:\r\n for line in fi:\r\n l_lst = line.strip().split('\\t')\r\n if len(l_lst) < 4:\r\n continue\r\n i1, i2, i3 = split_text(l_lst[0]), split_text(l_lst[1]), split_text(l_lst[2])\r\n label = l_lst[3]\r\n\r\n rl.append((i1, i2, i3, label,))\r\n\r\n return rl\r\n\r\n\r\ndef get_input_dataset(fi_name, vocab):\r\n dataset = load_input_file(fi_name)\r\n train = transform_to_array3(dataset, vocab)\r\n\r\n return train\r\n","repo_name":"kazuaki-i/KNRM","sub_path":"ranking_utils.py","file_name":"ranking_utils.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"25880532899","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file includes classes defining how the columns on search result pages display,\nas well as how they create content in a downloaded file. There are two kinds of classes:\n\n* Instances of ``SearchCol`` (and its subclasses), correspond to a single column, defining\n how it is displayed and downloaded\n* All of the search columns are collected into a ``SearchColumns`` instance, providing an\n interface for interacting with the set of columns as a whole.\n\nThe three main entry points to ``SearchCol`` are\n\n* its ``display`` method, which returns a string to be displayed in the html corresponding to\n the column within a row corresponding to a provided dictionary,\n* its ``download`` method, which returns a Python object that is then encoded using a download\n language (in ``lmfdb/utils/downloader.py``) when creating a download file,\n* its ``show`` method, which is used to iterate over columns, and usually just yields the\n column, but can yield nothing (if the column shouldn't be displayed) or multiple subcolumns\n (in the case of column groups).\n\"\"\"\n\nfrom .web_display import display_knowl\nfrom lmfdb.utils import pol_to_html, coeff_to_poly\nfrom sage.all import Rational\n\ndef get_default_func(default, name):\n \"\"\"\n This utility function takes the default value provided when creating\n a search column and returns a function that determines whether the\n column should be shown by default, based on the info dictionary\n created from the url parameters.\n\n In particular, it ensures that if the user explicitly hides/shows\n the column in the column dropdown, or if the column is the main\n component of a sort order, that overrides the input value.\n \"\"\"\n def f(info):\n if \"hidecol\" in info and name in info[\"hidecol\"].split(\".\"):\n return False\n if \"showcol\" in info and name in info[\"showcol\"].split(\".\"):\n return True\n sort_order = info.get('sort_order', '')\n if (sort_order and sort_order == name\n and \"search_array\" in info\n and info[\"search_array\"].sorts is not None):\n return True\n if isinstance(default, bool):\n return default\n return default(info)\n return f\n\n\nclass SearchCol:\n \"\"\"\n INPUT:\n\n - ``name`` -- a string describing this column, the name of the html element, used in the url\n to specify that the column be shown or hidden, the name for the column in download files,\n and the default key used when extracting data from a database record.\n - ``knowl`` -- a knowl identifier, for displaying the column header as a knowl\n - ``title`` -- the string shown for the column header, also included when describing the column\n in a download file.\n - ``default`` -- either a boolean or a function taking an info dictionary as input and returning\n a boolean. In either case, this determines whether the column is displayed initially. See\n the ``get_default_func`` above.\n - ``align`` -- horizontal alignment for this column; left by default, though some subclasses\n override this choice.\n - ``contingent`` -- either None or a function taking an info dictionary as input and returning\n a boolean. In the second case, if the function returns false then the column is not even\n included in the list of drop-down options.\n - ``short_title`` -- the string used to describe this column in the dropdown (defaults to the same\n as the title).\n - ``mathmode`` -- whether the contents should be displayed in math mode\n - ``orig`` -- a list of columns from the underlying search table, used to construct this column.\n A string means the corresponding length 1 list, and None defaults to using the column name.\n This list is used to determine a projection when constructing the underlying search.\n - ``download_desc`` -- a string included at the bottom of the download file describing this column.\n Defaults to the contents of the knowl.\n - ``download_col`` -- a column of the underlying search table, used to clarify what data\n should be included. Defaults to the column's name.\n - ``th_class``, ``th_style``, ``th_content``, ``td_class``, ``td_style``, ``td_content`` -- used\n to add CSS to the HTML elements corresponding to this column.\n\n Name, knowl and title can be passed either positionally or as a keyword argument; other values should be\n provided as keywords so that subclasses don't need to worry about passing positional arguments appropriately.\n \"\"\"\n def __init__(self, name, knowl, title, default=True, align=\"left\",\n mathmode=False, contingent=None, short_title=None, orig=None,\n download_desc=None, download_col=None, **kwds):\n # Both contingent and default can be functions that take info\n # as an input (if default is a boolean it's translated to the\n # constant function with that value)\n # If contingent is false, then that column doesn't even show\n # up on the list of possible columns\n # If default is false, then that column is included in the\n # selector but not displayed by default\n assert \",\" not in name\n self.name = name\n self.knowl = knowl\n self.title = title\n if short_title is None:\n short_title = None if title is None else title.lower()\n self.short_title = short_title\n self.default = get_default_func(default, name)\n self.mathmode = mathmode\n if orig is None:\n orig = [name]\n elif isinstance(orig, str):\n orig = [orig]\n self.orig = orig\n self.height = 1\n self.contingent = contingent\n self.th_class = self.td_class = f\"col-{name}\"\n if align == \"left\":\n self.th_style = self.td_style = \"\"\n else:\n self.th_style = self.td_style = f\"text-align:{align};\"\n self.th_content = self.td_content = \"\"\n self.download_desc = download_desc\n self.download_col = download_col\n\n for key, val in kwds.items():\n assert hasattr(self, key) and key.startswith(\"th_\") or key.startswith(\"td_\")\n setattr(self, key, getattr(self, key) + val)\n\n def _get(self, rec, name=None, downloading=False):\n \"\"\"\n INPUT:\n\n - ``rec`` -- either a dictionary, or a class with attributes (such as AbvarFq_isoclass,\n constructed through a postprocess step). This corresponds to a row (of the search results,\n or of the underlying table in the database).\n - ``name`` -- defaults to the name of the column, but can be overridden when downloading\n - ``downloading`` -- boolean, whether we are extracting a value for downloading. Determines\n whether missing values are returned as empty strings or as None.\n\n OUTPUT:\n\n A python object, the value for this column extracted from the database\n \"\"\"\n if name is None:\n name = self.name\n orig = self.orig[0]\n else:\n orig = name\n if isinstance(rec, dict):\n ans = rec.get(orig)\n if not downloading and ans is None:\n return \"\"\n return ans\n val = getattr(rec, name)\n return val() if callable(val) else val\n\n def get(self, rec):\n \"\"\"\n INPUT:\n\n - ``rec`` -- either a dictionary, or a class with attributes (such as AbvarFq_isoclass,\n constructed through a postprocess step). This corresponds to a row (of the search results,\n or of the underlying table in the database).\n\n OUTPUT:\n\n A python object, the value for this column extracted from the database\n \"\"\"\n # This function is used by the front-end display code, while the underlying _get method\n # is used for downloading. The difference shows up for Floats, where we want the full\n # precision in the downloaded file\n return self._get(rec)\n\n def display(self, rec):\n \"\"\"\n A string, to be displayed on the webpage, corresponding to this column within the row specified by\n the input ``rec``.\n \"\"\"\n # default behavior is to just use the string representation of rec\n s = str(self.get(rec))\n if s and self.mathmode:\n s = f\"${s}$\"\n return s\n\n def display_knowl(self):\n \"\"\"\n Displays the column header contents.\n \"\"\"\n if self.knowl:\n return display_knowl(self.knowl, self.title)\n return self.title\n\n def show(self, info, rank=None):\n \"\"\"\n This function is used when iterating through columns.\n\n INPUT:\n\n - ``info`` -- a dictionary, constructed from the url passed with the request\n - ``rank`` -- either None (indicating a data row), -1 (indicating a download), 0 (indicating the main header row), or a positive integer (indicating a later header row, where most columns are not shown).\n\n OUTPUT:\n\n A generator, containing columns to be shown. Usually contains one column (this one).\n \"\"\"\n # rank = 0 indicates the header row, rank = -1 indicates downloading\n if (self.contingent is None or self.contingent(info)) and (rank is None or rank <= 0):\n yield self\n\n def download(self, rec):\n \"\"\"\n A string, to be included in a download file, corresponding to this column.\n\n INPUT:\n\n - ``rec`` -- either a dictionary, or a class with attributes (such as AbvarFq_isoclass,\n constructed through a postprocess step). This corresponds to a row (of the search results,\n or of the underlying table in the database).\n \"\"\"\n name = None\n if self.download_col is not None:\n name = self.download_col\n return self._get(rec, name=name, downloading=True)\n\n\nclass SpacerCol(SearchCol):\n \"\"\"\n Spacer columns have empty content, but can have CSS added through ``td_*`` and ``th_*`` keywords.\n \"\"\"\n def __init__(self, name, **kwds):\n super().__init__(name, None, None, orig=[], **kwds)\n\n def display(self, rec):\n return \"\"\n\n def display_knowl(self):\n return \"\"\n\n def show(self, info, rank=None):\n if rank == -1:\n return []\n return super().show(info, rank)\n\n\nclass MathCol(SearchCol):\n \"\"\"\n Math columns display their contents in math mode and use center alignment by default.\n \"\"\"\n def __init__(self, name, knowl, title, align=\"center\", **kwds):\n kwds[\"mathmode\"] = True\n super().__init__(name, knowl, title, align=align, **kwds)\n\n\nclass FloatCol(MathCol):\n \"\"\"\n Float columns allow specifying a precision (defaulting to 3)\n \"\"\"\n def __init__(self, name, knowl, title, prec=3, **kwds):\n super().__init__(name, knowl, title, **kwds)\n self.prec = prec\n\n def get(self, rec):\n val = self._get(rec)\n # We mix string processing directives so that we can use variable precision\n return f\"%.{self.prec}f\" % val\n\n\nclass CheckCol(SearchCol):\n \"\"\"\n Check columns are for boolean columns from the database. They use a unicode check mark\n to represent a True value, and question mark for unknown, and blank for False.\n They are also centered by default.\n \"\"\"\n def __init__(self, name, knowl, title, align=\"center\", unknown=\"?\", no=\"\", **kwds):\n super().__init__(name, knowl, title, align=align, **kwds)\n self.unknown = unknown\n self.no = no\n\n def display(self, rec):\n val = self._get(rec, downloading=True) # We emulate downloading so that we can determine if the value is None\n if val:\n return \"✓\"\n elif val is None:\n return self.unknown\n else:\n return self.no\n\n\nclass CheckMaybeCol(SearchCol):\n \"\"\"\n CheckMaybe columns are for integer columns that use 1 to represent true, -1 for false and 0 for unknown.\n They explicitly show \"not computed\" rather than \"?\" for unknown values.\n They are also centered by default.\n \"\"\"\n def __init__(self, name, knowl, title, align=\"center\", unknown=\"?\", no=\"\", **kwds):\n super().__init__(name, knowl, title, align=align, **kwds)\n self.unknown = unknown\n self.no = no\n\n def display(self, rec):\n ans = self.get(rec)\n if ans > 0:\n return \"✓\"\n elif ans < 0:\n return self.no\n else:\n return self.unknown\n\n def download(self, rec, name=None):\n ans = self._get(rec)\n if ans == 0:\n return None\n else:\n return (ans > 0)\n\n\nclass LinkCol(SearchCol):\n \"\"\"\n These columns are used for links. They have an additional input, `url_for`,\n a function which takes the contents to be displayed\n (usually the label of an LMFDB object) and produces a url.\n \"\"\"\n def __init__(self, name, knowl, title, url_for, **kwds):\n super().__init__(name, knowl, title, **kwds)\n self.url_for = url_for\n\n def display(self, rec):\n link = self.get(rec)\n if not link:\n return \"\"\n return f'{link}'\n\n\nclass ProcessedCol(SearchCol):\n \"\"\"\n These columns allow for an arbitrary function to be applied to the contents retrieved from the database.\n\n They take two additional inputs:\n\n - ``func`` -- a function, applied to the contents from the database, whose output is to be displayed.\n Defaults to the identity.\n - ``apply_download`` -- either a boolean (determining whether the function should be applied when\n downloading), or a function that is applied instead while downloading.\n \"\"\"\n def __init__(self, name, knowl, title, func=None, apply_download=False, **kwds):\n super().__init__(name, knowl, title, **kwds)\n if func is None:\n # Some other column types like RationalCol inherit from ProcessedCol\n def func(x): return x\n self.func = func\n self.apply_download = apply_download\n\n def display(self, rec):\n s = str(self.func(self.get(rec)))\n if s and self.mathmode:\n s = f\"${s}$\"\n return s\n\n def download(self, rec, name=None):\n if self.download_col is not None:\n name = self.download_col\n s = self._get(rec, name=name, downloading=True)\n if callable(self.apply_download):\n s = self.apply_download(s)\n elif self.apply_download:\n s = self.func(s)\n return s\n\nclass ProcessedLinkCol(ProcessedCol):\n \"\"\"\n These columns allow for funtions to be applied to the contents retrieved from the database before generating\n a link. They take three additional inputs:\n\n - ``url_func`` -- a function producing the url from the contents\n - ``disp_func`` -- a function producing the string to be displayed from the contents\n - ``apply_download`` -- either a boolean (determining whether the display function should be applied when\n downloading), or a function that is applied instead while downloading.\n \"\"\"\n def __init__(self, name, knowl, title, url_func, disp_func, **kwds):\n super().__init__(name, knowl, title, disp_func, **kwds)\n self.url_func = url_func\n\n def display(self, rec):\n disp = super().display(rec)\n url = self.url_func(self.get(rec))\n return f'{disp}'\n\n\nclass MultiProcessedCol(SearchCol):\n \"\"\"\n These columns allow for functions that combine multiple columns from the database into a single output column.\n They take three additional inputs:\n\n - ``inputs`` -- a list of column names from the search table (or that have been created in a postprocessing step)\n - ``func`` -- a function taking as input the inputs from a given row and producing a value to be displayed\n - ``apply_download`` -- either a boolean (determing whether the function should be applied when\n downloading), or a function that is applied instead when downloading.\n\n Note that ``download_col`` is still available, and provides an alternative to the use of ``apply_download``.\n\n Unlike SearchCols, these columns only support dictionaries rather than custom postprocess classes,\n since a custom class can just define a method for use instead.\n \"\"\"\n def __init__(self, name, knowl, title, inputs, func, apply_download=True, **kwds):\n super().__init__(name, knowl, title, orig=inputs, **kwds)\n self.func = func\n self.apply_download = apply_download\n\n def display(self, rec):\n s = self.func(*[rec.get(col) for col in self.orig])\n if s and self.mathmode:\n s = f\"${s}$\"\n return s\n\n def download(self, rec, name=None):\n if self.download_col is None:\n data = [rec.get(col) for col in self.orig]\n if callable(self.apply_download):\n data = self.apply_download(*data)\n elif self.apply_download:\n data = self.func(*data)\n else:\n data = self._get(rec, name=self.download_col, downloading=True)\n return data\n\nclass ColGroup(SearchCol):\n \"\"\"\n A group of columns that are visually joined.\n\n See classical modular forms and subgroups of abstract groups for examples.\n In the first case, a few columns (the first few traces and Atkin-Lehner signs)\n have subcolumns. In the second, almost every column is grouped into one of three\n categories (subgroup, ambient or quotient).\n\n The main mechanism to support column groups is the show function. Unlike most columns,\n the set of columns produced depends on the input rank. When rank is -1 or 0 (downloading\n or top header), this column is yielded. Otherwise (the subheaders or when displaying contents)\n the subcolumns are yielded.\n\n There is also a subtle difference in behavior depending on whether the name of the column group\n is the same as the name of each sub column. In this case, the columns are all shown and hidden\n together in the column dropdown; otherwise, they are controlled independently. See the\n ``ColumnController`` class in ``lmfdb/utils/search_boxes.py`` for more details.\n\n There is one additional input:\n\n - ``subcols`` -- a list of ``SearchColumn`` instances, or a callable taking ``info`` as input:\n the columns to be grouped together.\n\n In addition, the top column header is center aligned by default, ``orig`` is constructed from\n the ``orig`` attributes of the subcolumns\n\n Note that ``download_col`` is still available. If not specified, a list is constructed from the\n download methods of the subcolumns.\n \"\"\"\n # See classical modular forms for an example\n\n def __init__(self, name, knowl, title, subcols,\n contingent=lambda info: True, orig=None,\n align=\"center\", **kwds):\n if orig is None:\n orig = sum([sub.orig for sub in subcols], [])\n super().__init__(name, knowl, title, align=align, orig=orig, contingent=contingent, **kwds)\n self.subcols = subcols\n # A more complicated grouping could add more header rows, but the examples we have only need 2\n self.height = 2\n\n def show(self, info, rank=None):\n if self.contingent(info):\n if callable(self.subcols):\n subcols = self.subcols(info)\n else:\n subcols = self.subcols\n n = 0\n for sub in subcols:\n if sub.name != self.name and \"colgroup\" not in sub.th_class:\n sub.th_class += f\" colgroup-{self.name}\"\n if sub.default(info):\n n += 1\n self.th_content = f\" colspan={n}\"\n if rank is None or rank > 0:\n yield from subcols\n else:\n yield self\n\n def download(self, rec):\n if self.download_col is not None:\n return self._get(rec, name=self.download_col, downloading=True)\n return [sub.download(rec) for sub in self.subcols]\n\n\nclass SearchColumns:\n \"\"\"\n This class packages together a list of search columns, providing the ``columns_shown`` method\n as an iterator over the columns to be displayed.\n\n INPUT:\n\n - ``columns`` -- a list of SearchCol objects\n - ``db_cols`` -- the column names to be retrieved from the underlying search table.\n By default this is constructed from the ``orig`` attributes of the underlying search columns,\n but it sometimes needs to be overridden, mainly for cases like abelian varieties and artin\n representations that use a class for postprocessing.\n - ``tr_class`` -- a list of CSS classes to be added to the corresponding rows in the header (see classical modular forms for an example)\n \"\"\"\n above_results = \"\" # Can add text above the Results (1-50 of ...) if desired\n above_table = \"\" # Can add text above the results table if desired\n dummy_download = False # change this to include dummy_download_search_results.html instead of download_search_results.html\n below_download = \"\" # Can add text above the bottom download links\n\n def __init__(self, columns, db_cols=None, tr_class=None):\n self.maxheight = maxheight = max(C.height for C in columns)\n if maxheight > 1:\n for C in columns:\n if C.height == 1:\n # columns that have height > 1 are responsible for\n # setting th_content on their own\n C.th_content += fr\" rowspan={maxheight}\"\n self.columns = columns\n if db_cols is None:\n db_cols = sorted(set(sum([C.orig for C in columns], [])))\n self.db_cols = db_cols\n if tr_class is None:\n tr_class = [\"\" for _ in range(maxheight)]\n self.tr_class = tr_class\n\n def columns_shown(self, info, rank=None):\n \"\"\"\n Iterate over columns.\n\n INPUT:\n\n - ``info`` -- the dictionary created from the url\n - ``rank`` -- either None (indicating the body of the table), -1 (indicating downloading),\n 0 (indicating the top row of the header) or a positive integer (indicating a lower row in the header).\n \"\"\"\n # By default, this doesn't depend on info\n # rank is None in the body of the table, and 0..(maxrank-1) in the header\n for C in self.columns:\n yield from C.show(info, rank)\n\n\n# The following column types are used to control download behavior\n\nclass PolynomialCol(SearchCol):\n \"\"\"\n These columns display their contents as polynomials in x.\n \"\"\"\n def display(self, rec):\n return pol_to_html(str(coeff_to_poly(self.get(rec))))\n\ndef eval_rational_list(s):\n \"\"\"\n Some columns in the LMFDB store lists as strings rather than arrays. This function\n unpacks several of the most common storage types for use in downloading.\n\n More precisely, it handles lists of integers or rationals stored in the following formats\n\n - unnested lists like \"[1,2,3]\" or \"1,2,3\"\n - once-nested lists like \"[[1,2],[3,4]]\" or \"1,2;3,4\"\n - single quotes wrapping the integers/rationals, like \"['1','2','3']\"\n \"\"\"\n def split(x):\n if not x:\n return []\n return x.split(\",\")\n s = s.replace(\" \", \"\").replace(\"'\", \"\")\n s = s.lstrip(\"[\").rstrip(\"]\")\n if not s:\n return []\n for obreak in [\";\", \"],[\"]:\n if obreak in s:\n return [[Rational(y) for y in split(x)] for x in s.split(obreak)]\n return [Rational(x) for x in split(s)]\n\nclass ListCol(ProcessedCol):\n \"\"\"\n Uses the ``eval_rational_list`` function to process the column for downloading.\n \"\"\"\n def download(self, rec):\n s = super().download(rec)\n return eval_rational_list(s)\n\nclass RationalCol(ProcessedCol):\n \"\"\"\n For rational numbers stored as strings; parses them appropriately for downloading.\n \"\"\"\n def download(self, rec):\n s = super().download(rec)\n return Rational(s)\n","repo_name":"LMFDB/lmfdb","sub_path":"lmfdb/utils/search_columns.py","file_name":"search_columns.py","file_ext":"py","file_size_in_byte":24051,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"96"} +{"seq_id":"38663157400","text":"def main():\n n = int(input())\n\n for _ in range(n):\n larg, qntd = map(int, input().split())\n\n soma = 0\n cadeia = input()\n for i in range(qntd):\n Li, Ri = map(int, input().split())\n\n \n char_count = {}\n for char in cadeia[Li - 1: Ri]:\n if char in char_count:\n char_count[char] += 1\n else:\n char_count[char] = 1\n \n if len(list(filter(lambda x: x % 2 == 1, char_count.values()))) <= 1:\n soma += 1\n \n print(soma)\n\n\nif __name__ == '__main__':\n main()","repo_name":"PedroTejon/Solucoes-Maratona-Facens","sub_path":"2019/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"16377553755","text":"import os\nimport re\nfrom tqdm import tqdm\nimport random\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nimport shap\nimport lightgbm as lgb\nimport optuna\nfrom optuna.samplers import TPESampler\noptuna.logging.set_verbosity(optuna.logging.WARNING)\n\nfrom sklearn.utils import shuffle\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold as SKF\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\nfrom skbio.stats.composition import clr\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nDIR_TRAIN = \"data_train\"\nDIR_TEST = \"input\"\n\nPATH_FTS_PHYLO_05 = \"filtered_fts/phylotype_05_features.txt\"\nPATH_FTS_PHYLO_1 = \"filtered_fts/phylotype_1_features.txt\"\nPATH_FTS_FAMILY = \"filtered_fts/family_features.txt\"\nPATH_FTS_GENUS = \"filtered_fts/genus_features.txt\"\nPATH_FTS_SPECIES = \"filtered_fts/species_features.txt\"\n\ndef load_fts_names(path):\n \"\"\"\n Load feature names from txt files and return a list of strings.\n \"\"\"\n def find_between(s, first, last ):\n try:\n start = s.index( first ) + len( first )\n end = s.index( last, start )\n return s[start:end]\n except ValueError:\n return \"\"\n\n res = []\n if \"phylo\" in path:\n with open(path,\"r\") as file:\n for line in file.readlines():\n res.extend([t[1:-1] for t in line[line.find(\"]\")+1:].split()])\n else:\n with open(path,\"r\") as file:\n for line in file.readlines():\n res.append(find_between(line, '\"', '\"'))\n return res\n\ndef build_dictionaries(df_meta_tk):\n \"\"\"\n Construct some mapping information\n \"\"\"\n dic_pid_rids = {}\n dic_pid_specimen = {}\n dic_proj_pid = {}\n for rid, row in df_meta_tk.iterrows():\n if row['participant_id'] not in dic_pid_rids:\n dic_pid_rids[row['participant_id']] = [rid]\n else:\n dic_pid_rids[row['participant_id']].append(rid)\n if row['participant_id'] not in dic_pid_specimen:\n dic_pid_specimen[row['participant_id']] = [row['specimen']]\n else:\n dic_pid_specimen[row['participant_id']].append(row['specimen'])\n if row['project'] not in dic_proj_pid:\n dic_proj_pid[row['project']] = [row['participant_id']]\n else:\n dic_proj_pid[row['project']].append(row['participant_id'])\n dic_proj_pid = {i: set(dic_proj_pid[i]) for i in dic_proj_pid}\n return dic_pid_rids, dic_pid_specimen, dic_proj_pid\n\n\ndef weights_by_wk(dic_pid_specimen, dic_pid_rids, df_meta_tk):\n \"\"\"\n More recent collections have heavier weights.\n Specimens from one participant sum up to 1.\n \"\"\"\n dic_pid_clwk = {}\n for pid in dic_pid_specimen:\n dic_pid_clwk[pid] = [df_meta_tk[df_meta_tk['specimen']==sid]['collect_wk'].values[0] for sid in dic_pid_specimen[pid]]\n dic_pid_wts = {pid:[wk/sum(dic_pid_clwk[pid]) for wk in dic_pid_clwk[pid]] for pid in dic_pid_clwk}\n ary_weights = np.zeros(df_meta_tk.shape[0])\n for pid in dic_pid_specimen:\n ary_weights[dic_pid_rids[pid]] = dic_pid_wts[pid]\n return ary_weights\n\ndef add_encoded_age(df_meta_tk):\n \"\"\"\n Convert age to categorical data and add to dataframe\n \"\"\"\n lst_age = []\n for _, row in df_meta_tk.iterrows():\n try:\n age = float(row.age)\n if age < 18:\n age_cat = 'Below_18'\n elif age >= 18 and age < 28:\n age_cat = '18_to_28'\n elif age >= 28 and age <= 38:\n age_cat = '29-38'\n elif age > 38:\n age_cat = 'Above_38'\n else:\n age_cat = 'Unknown'\n except:\n age_cat = row.age\n lst_age.append(age_cat)\n df_meta_tk['age'] = df_meta_tk['age'].replace(['18_to_28', '29-38', 'Below_18', 'Above_38'], 'Unknown')\n df_meta_tk['age_cat'] = lst_age\n return df_meta_tk\n\ndef lgb_autotune(X_train, y_train, ary_weights_train, dic_pid_rids_train):\n # use Optuna to tune hyperparameters\n kf = KFold(n_splits=5, shuffle=True, random_state=41)\n lst_auroc = []\n def objective_lgb(trial):\n param = {\n 'verbosity': trial.suggest_categorical('verbosity', [-1]),\n 'objective': trial.suggest_categorical('objective', ['binary']),\n 'num_iterations': trial.suggest_categorical('num_iterations', [25,50,75,100]),\n 'learning_rate': trial.suggest_categorical('learning_rate', [0.02,0.03,0.04,0.05]),\n 'max_depth': trial.suggest_categorical('max_depth', [3,4,5,6]),\n 'min_data_in_leaf': trial.suggest_categorical('min_data_in_leaf', [16,20,24,28,30]),\n 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-4, 1e-1),\n 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-4, 1e-1),\n 'verbose': trial.suggest_categorical('verbose', [-1])\n }\n for train_ks, tune_ks in kf.split(dic_pid_rids_train):\n # participant IDs for training and test data\n train_pid = np.array(list(dic_pid_rids_train.keys()))[train_ks]\n tune_pid = np.array(list(dic_pid_rids_train.keys()))[tune_ks]\n # row indices for training data\n train_rid = [pp for pid in train_pid for pp in dic_pid_rids_train[pid]]\n # row indices for tuning data\n tune_rid = []\n dic_pid_tu_rid = {} # which rows are collected from which participants\n pointer = 0\n for pid in tune_pid:\n tune_rid.extend(dic_pid_rids_train[pid])\n dic_pid_tu_rid[pid] = list(range(pointer, pointer+len(dic_pid_rids_train[pid])))\n pointer += len(dic_pid_rids_train[pid])\n # split dataset\n assert [i for i in train_rid if i in tune_rid]==[], \"Row IDs in training set also occur in test set.\"\n X_tr, y_tr = X_train_total[train_rid], y_train_total[train_rid]\n X_tu, y_tu = X_train_total[tune_rid], y_train_total[tune_rid]\n # build dataset\n lgb_tr = lgb.Dataset(X_tr, label=y_tr,\n categorical_feature=[2,3,4,5], weight=ary_weights_train[train_rid])\n gbm = lgb.train(param, lgb_tr,\n categorical_feature=[2,3,4,5], init_model=None)\n preds_all = gbm.predict(X_tu)\n y_pred = [(ary_weights_train[tune_rid]*preds_all)[dic_pid_tu_rid[pid]].sum() for pid in dic_pid_tu_rid]\n y_true = [y_tu[dic_pid_tu_rid[pid]][0] for pid in dic_pid_tu_rid]\n lst_auroc.append((roc_auc_score(y_true, y_pred)))\n return np.mean(lst_auroc)\n sampler = TPESampler(seed=41)\n study = optuna.create_study(direction='maximize', sampler=sampler)\n study.optimize(objective_lgb, n_trials=200)\n return study.best_params, study.best_value\n\ndic_fts = {}\ndic_fts[\"phylotype_05\"] = load_fts_names(PATH_FTS_PHYLO_05)\ndic_fts[\"phylotype_1\"] = load_fts_names(PATH_FTS_PHYLO_1)\ndic_fts[\"species\"] = load_fts_names(PATH_FTS_SPECIES)\ndic_fts[\"genus\"] = load_fts_names(PATH_FTS_GENUS)\ndic_fts[\"family\"] = load_fts_names(PATH_FTS_FAMILY)\n\n# read datasets\ndf_meta_train = pd.read_csv(f\"{DIR_TRAIN}/metadata.csv\")\ndf_alpha_diversity_train = pd.read_csv(f\"{DIR_TRAIN}/alpha_diversity.csv\")\ndf_community_state_train = pd.read_csv(f\"{DIR_TRAIN}/cst_valencia.csv\")\ndf_phylo05_train = pd.read_csv(f\"{DIR_TRAIN}/phylotype_nreads.5e_1.csv\")\ndf_phylo05_train[df_phylo05_train.columns[1:]] = clr(df_phylo05_train[df_phylo05_train.columns[1:]].values+0.5)\ndf_phylo1_train = pd.read_csv(f\"{DIR_TRAIN}/phylotype_nreads.1e0.csv\")\ndf_phylo1_train[df_phylo1_train.columns[1:]] = clr(df_phylo1_train[df_phylo1_train.columns[1:]].values+0.5)\ndf_species_train = pd.read_csv(f\"{DIR_TRAIN}/taxonomy_nreads.species.csv\")\ndf_species_train[df_species_train.columns[1:]] = clr(df_species_train[df_species_train.columns[1:]].values+0.5)\ndf_genus_train = pd.read_csv(f\"{DIR_TRAIN}/taxonomy_nreads.genus.csv\")\ndf_genus_train[df_genus_train.columns[1:]] = clr(df_genus_train[df_genus_train.columns[1:]].values+0.5)\ndf_family_train = pd.read_csv(f\"{DIR_TRAIN}/taxonomy_nreads.family.csv\")\ndf_family_train[df_family_train.columns[1:]] = clr(df_family_train[df_family_train.columns[1:]].values+0.5)\n\n# task 1 only use data collected no later than 32 weeks\ndf_meta_tk1_train = df_meta_train[(df_meta_train[\"collect_wk\"]<=32)].reset_index(drop=True)\n\n# construct some dictionaries\ndic_pid_rids_train, dic_pid_specimen_train, dic_proj_pid = build_dictionaries(df_meta_tk1_train)\n\n# weights by date\nary_weights_train = weights_by_wk(dic_pid_specimen_train, dic_pid_rids_train, df_meta_tk1_train)\n\n# add categorical age\ndf_meta_tk1_train = add_encoded_age(df_meta_tk1_train)\n\n# encode categorical data\ncols_demo = ['participant_id', 'specimen', 'project', 'collect_wk', 'NIH Racial Category', 'age', 'age_cat', 'was_term']\ndf_demo_train = df_meta_tk1_train[cols_demo]#.loc[df_meta_tk1.groupby(\"participant_id\")[\"collect_wk\"].idxmax()][cols_demo]\ndf_demo_train = df_demo_train.replace('Unknown', np.nan)\ndf_demo_train = df_demo_train.reset_index(drop=True)\ndf_demo_train['age'] = df_demo_train['age'].astype(float)\ncat_cols = ['NIH Racial Category','age_cat','CST','subCST']\ndf_cat_train = df_demo_train[['specimen','NIH Racial Category','age_cat']].join(df_community_state_train[['specimen','CST','subCST']].set_index('specimen'), on='specimen')\n# load pre-fit label encoder\nwith open(\"dic_encoder.pickle\", \"rb\") as handle:\n dic_encoder = pickle.load(handle)\nfor col in cat_cols:\n le = dic_encoder[col]\n df_cat_train[col] = [x if x in le.classes_ else 'nan' for x in df_cat_train[col]]\n df_cat_train[col] = le.transform(df_cat_train[[col]])\n\n# join datasets on specimen IDs\ndf_alpha_diversity_train = df_demo_train[['specimen']].join(df_alpha_diversity_train.set_index('specimen'), on='specimen')\ndf_phylo05_train = df_demo_train[['specimen']].join(df_phylo05_train.set_index('specimen'), on='specimen')\ndf_phylo1_train = df_demo_train[['specimen']].join(df_phylo1_train.set_index('specimen'), on='specimen')\ndf_species_train = df_demo_train[['specimen']].join(df_species_train.set_index('specimen'), on='specimen')\ndf_genus_train = df_demo_train[['specimen']].join(df_genus_train.set_index('specimen'), on='specimen')\ndf_family_train = df_demo_train[['specimen']].join(df_family_train.set_index('specimen'), on='specimen')\nprint(\"Training data loaded!\")\n\n# read datasets\ndf_meta_test = pd.read_csv(f\"{DIR_TEST}/metadata/metadata.csv\")\ndf_alpha_diversity_test = pd.read_csv(f\"{DIR_TEST}/alpha_diversity/alpha_diversity.csv\")\ndf_community_state_test = pd.read_csv(f\"{DIR_TEST}/community_state_types/cst_valencia.csv\")\ndf_phylo05_test = pd.read_csv(f\"{DIR_TEST}/phylotypes/phylotype_nreads.5e_1.csv\")\ndf_phylo05_test[df_phylo05_test.columns[1:]] = clr(df_phylo05_test[df_phylo05_test.columns[1:]].values+0.5)\ndf_phylo1_test = pd.read_csv(f\"{DIR_TEST}/phylotypes/phylotype_nreads.1e0.csv\")\ndf_phylo1_test[df_phylo1_test.columns[1:]] = clr(df_phylo1_test[df_phylo1_test.columns[1:]].values+0.5)\ndf_species_test = pd.read_csv(f\"{DIR_TEST}/taxonomy/taxonomy_nreads.species.csv\")\ndf_species_test[df_species_test.columns[1:]] = clr(df_species_test[df_species_test.columns[1:]].values+0.5)\ndf_genus_test = pd.read_csv(f\"{DIR_TEST}/taxonomy/taxonomy_nreads.genus.csv\")\ndf_genus_test[df_genus_test.columns[1:]] = clr(df_genus_test[df_genus_test.columns[1:]].values+0.5)\ndf_family_test = pd.read_csv(f\"{DIR_TEST}/taxonomy/taxonomy_nreads.family.csv\")\ndf_family_test[df_family_test.columns[1:]] = clr(df_family_test[df_family_test.columns[1:]].values+0.5)\nprint(\"Test data loaded!\")\n# task 1 only use data collected no later than 32 weeks\ndf_meta_tk1_test = df_meta_test[(df_meta_test[\"collect_wk\"]<=32)].reset_index(drop=True)\n\n# construct some dictionaries\ndic_pid_rids_test, dic_pid_specimen_test, dic_proj_pid_test = build_dictionaries(df_meta_tk1_test)\n\n# weights by date\nary_weights_test = weights_by_wk(dic_pid_specimen_test, dic_pid_rids_test, df_meta_tk1_test)\n\n# add categorical age\ndf_meta_tk1_test = add_encoded_age(df_meta_tk1_test)\n\n# encode categorical data\ncols_demo = ['participant_id', 'specimen', 'project', 'collect_wk', 'NIH Racial Category', 'age', 'age_cat']\ndf_demo_test = df_meta_tk1_test[cols_demo]#.loc[df_meta_tk1.groupby(\"participant_id\")[\"collect_wk\"].idxmax()][cols_demo]\ndf_demo_test = df_demo_test.replace('Unknown', np.nan)\ndf_demo_test = df_demo_test.reset_index(drop=True)\ndf_demo_test['age'] = df_demo_test['age'].astype(float)\ncat_cols = ['NIH Racial Category','age_cat','CST','subCST']\ndf_cat_test = df_demo_test[['specimen','NIH Racial Category','age_cat']].join(df_community_state_test[['specimen','CST','subCST']].set_index('specimen'), on='specimen')\n# label encoding and save encoder as a dictionary\nfor col in cat_cols:\n le = dic_encoder[col]\n df_cat_test[col] = [x if x in le.classes_ else 'nan' for x in df_cat_test[col]]\n df_cat_test[col] = le.transform(df_cat_test[[col]])\n\n# join datasets on specimen IDs\ndf_alpha_diversity_test = df_demo_test[['specimen']].join(df_alpha_diversity_test.set_index('specimen'), on='specimen')\ndf_phylo05_test = df_demo_test[['specimen']].join(df_phylo05_test.set_index('specimen'), on='specimen')\ndf_phylo1_test = df_demo_test[['specimen']].join(df_phylo1_test.set_index('specimen'), on='specimen')\ndf_species_test = df_demo_test[['specimen']].join(df_species_test.set_index('specimen'), on='specimen')\ndf_genus_test = df_demo_test[['specimen']].join(df_genus_test.set_index('specimen'), on='specimen')\ndf_family_test = df_demo_test[['specimen']].join(df_family_test.set_index('specimen'), on='specimen')\n\n# only use features that orrur in test set\ndic_fts['phylotype_05'] = [ft for ft in dic_fts['phylotype_05'] if ft in df_phylo05_test.columns]\ndic_fts['phylotype_1'] = [ft for ft in dic_fts['phylotype_1'] if ft in df_phylo1_test.columns]\ndic_fts['species'] = [ft for ft in dic_fts['species'] if ft in df_species_test.columns]\ndic_fts['genus'] = [ft for ft in dic_fts['genus'] if ft in df_genus_test.columns]\ndic_fts['family'] = [ft for ft in dic_fts['family'] if ft in df_family_test.columns]\ndf_phylo05_train = df_phylo05_train[['specimen']+dic_fts['phylotype_05']]\ndf_phylo05_test = df_phylo05_test[['specimen']+dic_fts['phylotype_05']]\ndf_phylo1_train = df_phylo1_train[['specimen']+dic_fts['phylotype_1']]\ndf_phylo1_test = df_phylo1_test[['specimen']+dic_fts['phylotype_1']]\ndf_species_train = df_species_train[['specimen']+dic_fts['species']]\ndf_species_test = df_species_test[['specimen']+dic_fts['species']]\ndf_genus_train = df_genus_train[['specimen']+dic_fts['genus']]\ndf_genus_test = df_genus_test[['specimen']+dic_fts['genus']]\ndf_family_train = df_family_train[['specimen']+dic_fts['family']]\ndf_family_test = df_family_test[['specimen']+dic_fts['family']]\n\n# merge features\nX_train_total = np.concatenate([df_demo_train[['collect_wk', 'age']],\n df_cat_train.values[:,1:], # 'NIH Racial Category','age_cat','CST','subCST'\n df_alpha_diversity_train.values[:,1:],\n df_phylo05_train.values[:,1:],\n df_phylo1_train.values[:,1:],\n df_species_train.values[:,1:],\n df_genus_train.values[:,1:],\n df_family_train.values[:,1:]], axis=1)\nprint(X_train_total.shape)\n# labels\ny_train_total = np.zeros(df_demo_train.shape[0])\nfor rid, row in df_demo_train.iterrows():\n if row['was_term'] == False:\n y_train_total[rid] = 1\n\n# test\nX_test = np.concatenate([df_demo_test[['collect_wk', 'age']],\n df_cat_test.values[:,1:], # 'NIH Racial Category','age_cat','CST','subCST'\n df_alpha_diversity_test.values[:,1:],\n df_phylo05_test.values[:,1:],\n df_phylo1_test.values[:,1:],\n df_species_test.values[:,1:],\n df_genus_test.values[:,1:],\n df_family_test.values[:,1:]], axis=1)\n\n# tuning\nparams, cv_score = lgb_autotune(X_train_total, y_train_total, ary_weights_train, dic_pid_rids_train)\n\n# train model 1\nlgb_train1 = lgb.Dataset(X_train_total, label=y_train_total,\n categorical_feature=[2,3,4,5], weight=ary_weights_train)\ngbm1 = lgb.train(params, lgb_train1,\n categorical_feature=[2,3,4,5], init_model=None)\n# predict\ny_pred_all_test1 = gbm1.predict(X_test)\n\n# data for model 2\nrid_G = [rid for pid in dic_proj_pid['G'] for rid in dic_pid_rids_train[pid]]\nX_train_G = X_train_total[rid_G]\ny_train_G = y_train_total[rid_G]\nary_weights_G = ary_weights_train[rid_G]\n\ndic_pid_G_rid = {}\npointer = 0\nfor pid in dic_proj_pid['G']:\n dic_pid_G_rid[pid] = list(range(pointer, pointer+len(dic_pid_rids_train[pid])))\n pointer += len(dic_pid_rids_train[pid])\n\n# tuning\nparams_G, cv_score = lgb_autotune(X_train_G, y_train_G, ary_weights_G, dic_pid_G_rid)\n\n# train model 2\nlgb_train2 = lgb.Dataset(X_train_G, label=y_train_G,\n categorical_feature=[2,3,4,5], weight=ary_weights_G)\ngbm2 = lgb.train(params_G, lgb_train2,\n categorical_feature=[2,3,4,5], init_model=None)\n# predict\ny_pred_all_test2 = gbm2.predict(X_test)\n\n# train a classifier that predict the prob that a sample is from G\ndf_counts_train = pd.read_csv(f\"{DIR_TRAIN}/phylotype_nreads.5e_1.csv\")\ndf_counts_train = df_demo_train[['specimen']].join(df_counts_train.set_index('specimen'), on='specimen')\nX_cls_G_train = np.concatenate([np.sum(df_counts_train.values[:,1:], axis=1).reshape(-1,1),\n X_train_total[:,0].reshape(-1,1)], axis=1)\ny_cls_G_train = np.zeros(X_cls_G_train.shape[0])\ny_cls_G_train[[rid for pid in dic_pid_rids_train if \"G\" in pid for rid in dic_pid_rids_train[pid]]] = 1\nX_cls_G_train, y_cls_G_train = shuffle(X_cls_G_train, y_cls_G_train, random_state=0)\nmodel = LogisticRegression(solver='saga', penalty='elasticnet', l1_ratio=0.5, multi_class='ovr',random_state=0)\nmodel.fit(X_cls_G_train, y_cls_G_train)\ndf_counts_test = pd.read_csv(f\"{DIR_TEST}/phylotypes/phylotype_nreads.5e_1.csv\")\ndf_counts_test = df_demo_test[['specimen']].join(df_counts_test.set_index('specimen'), on='specimen')\nX_cls_G_test = np.concatenate([np.sum(df_counts_test.values[:,1:], axis=1).reshape(-1,1),\n X_test[:,0].reshape(-1,1)], axis=1)\ny_cls_G_test = np.zeros(X_cls_G_test.shape[0])\ny_cls_G_test[[rid for pid in dic_pid_rids_test if \"G\" in pid for rid in dic_pid_rids_test[pid]]] = 1\npred_G = model.predict_proba(X_cls_G_test)[:,1]\n\n# ensemble using above weights\nprob_isG = np.minimum(pred_G, 0.8)\npred_all_emsemble = y_pred_all_test1*(1-prob_isG)+y_pred_all_test2*prob_isG\ny_pred_ensemle = [(ary_weights_test*pred_all_emsemble)[dic_pid_rids_test[pid]].sum() for pid in dic_pid_rids_test]\n\n# make output files\npid_test = [pid for pid in dic_pid_rids_test]\ny_binary = [1 if y >= 0.5 else 0 for y in y_pred_ensemle]\ndic_csv = {\n \"participant\": pid_test,\n \"was_preterm\": y_binary,\n \"probability\": y_pred_ensemle\n}\ndf_out = pd.DataFrame(dic_csv)\ndf_out.to_csv('output/predictions.csv', index=False)\nprint(\"Done!\")\n","repo_name":"GGGGFan/Preterm-Birth-Prediction---Vaginal-Microbiome---UWisc-Madison","sub_path":"Task_1_Submission_1/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":19292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27541139834","text":"# -*- coding: utf-8 -*-\n\n# =========================================\n# digitalsherlocks\n# A DFRLab project\n#\n# Author: @estebanpdl\n#\n# File: Manages local database.\n# =========================================\n\n# import modules\nimport os\nimport sys\nimport sqlite3\nimport requests\n\n# import log utils\nfrom logs import printl\n\n# import from modules\nfrom sqlite3 import OperationalError\n\n# Database class\nclass Database(object):\n\t'''\n\t'''\n\tdef __init__(self, **kwargs):\n\t\t'''\n\n\t\tkwargs:\n\t\t\t- wd\n\t\t\t\t[working directory]\n\t\t\t- update_database\n\t\t\t\t[if an existing database needs to be updated]\n\t\t\t- dbpath\n\t\t\t\t[Existing database]\n\t\t\t- dbname\n\t\t\t\t[Custom database name]\n\t\t\t- endpoint\n\t\t\t\t[Which SQL file will be readed]\n\t\t'''\n\n\t\t# Instance variables\n\t\tself.wd = kwargs['wd']\n\t\tself.update_database = kwargs['update_database']\n\t\tself.dbpath = kwargs['dbpath']\n\t\tself.dbname = kwargs['dbname']\n\t\tself.endpoint = kwargs['endpoint']\n\n\tdef _get_sqlfile(self):\n\t\t'''\n\n\t\tReads a sql file hosted on AWS s3.\n\t\t'''\n\t\tpath = f'digitalsherlocks/sql/{self.endpoint}.sql'\n\t\turl = f'https://dfrlab.s3.us-west-2.amazonaws.com/{path}'\n\n\t\t# URL Request\n\t\treturn requests.get(url).text\n\n\tdef _get_database_filename(self):\n\t\t'''\n\n\t\tGets database filename.\n\t\t'''\n\t\tif self.dbpath == None:\n\t\t\tname = 'data' if self.dbname == None else self.dbname\n\t\t\tdbfile = f'{self.wd}{name}.db'\n\n\t\telse:\n\t\t\tisfile = os.path.isfile(self.dbpath)\n\t\t\tif not isfile and self.update_database == True:\n\t\t\t\tprintl(\n\t\t\t\t\tf'The file {self.dbpath} was not found.',\n\t\t\t\t\tcolor='RED'\n\t\t\t\t)\n\t\t\t\tprintl(\n\t\t\t\t\t'Please try again with the correct file.',\n\t\t\t\t\tcolor='RED'\n\t\t\t\t)\n\t\t\t\tprintl('Program closed', color='GREEN')\n\n\t\t\t\t# Quit program\n\t\t\t\tsys.exit()\n\t\t\telse:\n\t\t\t\tdbfile = self.dbpath\n\n\t\tdbfile = os.path.abspath(dbfile)\n\t\treturn dbfile.replace(os.sep, '/')\n\n\tdef _get_db_attrs(self, data):\n\t\t'''\n\n\t\tGets data from _return_database_attrs.\n\t\tIf tweets, argument will be q < query >\n\t\tIf users, argument will be user_id. Will update database\n\t\t\tbased on the user id.\n\t\t'''\n\n\t\t# Test objects\n\t\ttest = [\n\t\t\ti for i in data if 'cli update' in i[0]\n\t\t]\n\n\t\tif test:\n\t\t\tdata = [\n\t\t\t\ti for i in data if i[1] == 'tweets'\n\t\t\t\tor 'cli update' in i[0]\n\t\t\t]\n\n\t\t# Building database arguments\n\t\tobj = {\n\t\t\t'tweets': 'q',\n\t\t\t'users': 'user_id'\n\t\t}\n\n\t\targs = []\n\t\tfor item in data:\n\t\t\ttmp = {\n\t\t\t\tobj[item[1]]: item[0] if item[4] == None else item[4],\n\t\t\t\t'endpoint': item[1],\n\t\t\t\t'since_id': item[2],\n\t\t\t\t'timezone': item[3]\n\t\t\t}\n\n\t\t\t# append args\n\t\t\targs.append(tmp)\n\n\t\treturn args\n\n\tdef _return_database_attrs(self, db_connection, db_cursor):\n\t\t'''\n\n\t\tGets database attrs.\n\t\tReturn: dict < kwargs >\n\t\t'''\n\n\t\tsql = '''\n\t\tSELECT\n\t\t\tsearch_request, endpoint_type, MAX(id), timezone,\n\t\t\tCASE WHEN endpoint_type = 'users'\n\t\t\t\tTHEN user_id\n\t\t\t\tELSE NULL\n\t\t\tEND\n\t\tFROM tweet\n\t\tGROUP BY search_request\n\t\t'''\n\n\t\ttry:\n\t\t\tdb_cursor.execute(sql)\n\t\texcept OperationalError:\n\t\t\t'''\n\n\t\t\tError found. Incorrect database.\n\t\t\t'''\n\t\t\tdb_cursor.execute(\"PRAGMA database_list\")\n\t\t\trows = db_cursor.fetchall()\n\n\t\t\t# Get main db\n\t\t\tmain_db = [\n\t\t\t\ti[2] for i in rows if i[1] == 'main' and i[2] != None\n\t\t\t][0]\n\n\t\t\t# Transform db path\n\t\t\tmain_db = main_db.replace(os.sep, '/')\n\n\t\t\tprintl(\n\t\t\t\tf'Error found: Incorrect database: {main_db}',\n\t\t\t\tcolor='RED'\n\t\t\t)\n\t\t\tprintl(\n\t\t\t\t'Database should contain tweets or user timelines',\n\t\t\t\tcolor='RED'\n\t\t\t)\n\t\t\tprintl(\n\t\t\t\t'Please try again with the correct file.',\n\t\t\t\tcolor='RED'\n\t\t\t)\n\t\t\tprintl('Program closed', color='GREEN')\n\t\t\t\n\t\t\t# Quit program\n\t\t\tsys.exit()\n\n\n\t\t# Get data\n\t\tdata = [i for i in db_cursor.fetchall()]\n\n\t\treturn self._get_db_attrs(data) \n\n\tdef _connect_db(self):\n\t\t'''\n\n\t\tConnects to sqlite database\n\t\t'''\n\t\t# Get database filename\n\t\tdbfile = self._get_database_filename()\n\t\tprintl(f'Database at {dbfile}', color='GREEN')\n\n\t\t# Connect database\n\t\tprintl('Connecting to database')\n\t\tdb_connection = sqlite3.connect(dbfile)\n\t\tprintl('Database connected')\n\n\t\t# Get database cursor\n\t\tdb_cursor = db_connection.cursor()\n\n\t\t'''\n\t\tDatabase status:\n\n\t\t\t- Check if an existing database will be updated.\n\t\t'''\n\n\t\tif not self.update_database:\n\n\t\t\t# Encoding database\n\t\t\tdb_cursor.execute('PRAGMA encoding')\n\n\t\t\t# Execute SQL script\n\t\t\tsql_script = self._get_sqlfile()\t\t\t\n\t\t\tdb_cursor.executescript(sql_script)\n\n\t\t\t# Commit results\n\t\t\tdb_connection.commit()\n\n\t\treturn db_connection, db_cursor\n","repo_name":"DFRLab/digitalsherlocks","sub_path":"digitalsherlocks/APITwitter/database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"96"} +{"seq_id":"27835315639","text":"# wrote by nojima, 2022\n\nimport pandas as pd\nimport numpy as np\nimport sys\n\n\nif len(sys.argv) != 5:\n print(sys.argv[0], ' input.csv anon.csv rows columns')\n sys.exit(0)\n \ndf = pd.read_csv(sys.argv[1], header=None)\nrows = [int(c) for c in sys.argv[3].split('_')]\ncols = [int(e) for e in sys.argv[4].split('_')]\nout = sys.argv[2] \n\nfor i in range(len(cols)):\n df.iloc[rows[i],cols[i]] = 99\ndf.to_csv(out, index = None, header = None)\n\n","repo_name":"SkyLiNing823/PWSCUP2022","sub_path":"nn_v1.py","file_name":"nn_v1.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5483946933","text":"import pytest\n\nfrom models.workout import Run\nfrom utils.models import get_field_names_from_data_model, create_model_from_dict\n\n@pytest.fixture\ndef dummy_run():\n return Run(\n id=1, \n distance=2.35, \n calories=1, \n duration=2, \n start_time=1, \n burgers_burned=5\n )\n\ndef test_get_field_names_from_data_model(dummy_run):\n expected = ['id', 'distance', 'calories', 'duration', 'start_time', 'burgers_burned', 'speed_avg', 'speed_max', 'descent', 'ascent', 'altitude_max', 'altitude_min', 'hydration', 'heart_rate_avg', 'heart_rate_max']\n result = get_field_names_from_data_model(dummy_run)\n assert sorted(result) == sorted(expected)\n\ndef test_create_model_from_dict(dummy_run):\n d = dict(\n id=1, \n distance=2.35, \n calories=1, \n duration=2, \n start_time=1, \n burgers_burned=5\n )\n\n result = create_model_from_dict(Run, d)\n assert str(result) == str(dummy_run)\n","repo_name":"KReusen/healthy-living-to-money","sub_path":"tests/test_utils/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5736166672","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import TensorDataset, DataLoader, Dataset,SubsetRandomSampler\nfrom torchvision import models\nimport time\nfrom RS_Dataset import RS_Dataset\nfrom tqdm import tqdm\nimport os \nimport shutil\nfrom datetime import date\nfrom torchvision.models import resnet50,alexnet,vgg16\n\n\ndef train(PARAMS, model, criterion, device, train_loader, optimizer, epoch):\n t0 = time.time()\n model.train()\n correct = 0\n\n for batch_idx, (img, target) in enumerate(tqdm(train_loader)):\n img, target = img.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(img)\n\n loss = criterion(output, target )\n loss.backward()\n optimizer.step()\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n \n\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f} , {:.2f} seconds'.format(\n epoch, batch_idx * len(img), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item(),time.time() - t0))\n\n\ndef test(PARAMS, model,criterion, device, test_loader,optimizer,epoch,best_acc):\n model.eval()\n test_loss = 0\n correct = 0\n\n example_images = []\n with torch.no_grad():\n for batch_idx, (img, target) in enumerate(tqdm(test_loader)):\n img, target = img.to(device), target.to(device)\n output = model(img)\n\n test_loss += criterion(output, target).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n # Save the first input tensor in each test batch as an example image\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n \n\n current_acc = 100. * correct / len(test_loader.dataset)\n return current_acc\n\ndef main():\n\n\n parser = argparse.ArgumentParser(description='manual to this script')\n parser.add_argument('--model', type=str, default = 'vgg16')\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--evaluate_model', type=str)\n parser.add_argument('--dataset', type=str, default='rsscn7')\n\n args = parser.parse_args()\n\n PARAMS = {'DEVICE': torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n 'bs': 8,\n 'epochs':50,\n 'lr': 0.0006,\n 'momentum': 0.5,\n 'log_interval':10,\n 'criterion':'cross_entropy',\n 'model_name': args.model,\n 'dataset': args.dataset,\n }\n\n\n # Training settings\n train_transform = transforms.Compose(\n [ \n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(0.4, 0.4, 0.4),\n transforms.Resize((256,256)),\n transforms.ToTensor(),\n transforms.Normalize([0.4850, 0.4560, 0.4060], [0.2290, 0.2240, 0.2250])])\n test_transform = transforms.Compose(\n [ \n transforms.Resize((256,256)),\n transforms.ToTensor(),\n transforms.Normalize([0.4850, 0.4560, 0.4060], [0.2290, 0.2240, 0.2250])])\n\n\n if args.dataset == 'rsscn7':\n \n # train_dataset = datasets.ImageFolder(root='data/thick_removal',transform = train_transform)\n # test_dataset = datasets.ImageFolder(root='data/thick_removal',transform = test_transform)\n\n train_dataset = datasets.ImageFolder(root='data/rsscn7/train_dataset/',transform = train_transform)\n test_dataset = datasets.ImageFolder(root='data/rsscn7/test_dataset/',transform = test_transform)\n elif args.dataset == 'ucm':\n \n train_dataset = datasets.ImageFolder(root='data/ucm/train_dataset/',transform = train_transform)\n test_dataset = datasets.ImageFolder(root='data/ucm/test_dataset/',transform = test_transform)\n print(PARAMS)\n train_loader = DataLoader(train_dataset, batch_size=PARAMS['bs'], shuffle=True, num_workers=4, pin_memory = True )\n test_loader = DataLoader(test_dataset, batch_size=PARAMS['bs'], shuffle=True, num_workers=4, pin_memory = True )\n\n\n\n num_classes = len(train_dataset.classes)\n if PARAMS['model_name'] == 'vgg16':\n model = models.vgg16(pretrained=True)\n model.classifier[-1] = nn.Linear(in_features=4096, out_features=num_classes, bias=True)\n elif PARAMS['model_name'] == 'resnet50':\n model = models.resnet50(pretrained=True)\n model.fc = nn.Linear(in_features=2048, out_features=num_classes, bias=True)\n elif PARAMS['model_name'] == 'alexnet':\n model = models.alexnet(pretrained=True)\n model.classifier[-1] = nn.Linear(in_features=4096, out_features=num_classes, bias=True) \n\n \n \n model = model.to(PARAMS['DEVICE']) \n optimizer = optim.SGD(model.parameters(), lr=PARAMS['lr'], momentum=PARAMS['momentum'])\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = 7, gamma = 0.9)\n criterion = F.cross_entropy\n acc = 0\n\n if not args.evaluate_model:\n for epoch in range(1, PARAMS['epochs'] + 1):\n train(PARAMS, model,criterion, PARAMS['DEVICE'], train_loader, optimizer, epoch)\n acc = test(PARAMS, model,criterion, PARAMS['DEVICE'], test_loader,optimizer,epoch,acc)\n scheduler.step()\n torch.save(model.state_dict(), 'saved_models/{}_{}_{}_{}_baseline.pth'.format(args.dataset, date.today(), PARAMS['model_name'], round(acc,2)))\n # torch.save(model, 'saved_models/{}_{}_{}_{}_baseline.pth'.format(args.dataset, date.today(), PARAMS['model_name'], round(acc,2)))\n else:\n model = torch.load(args.evaluate_model)\n acc = test(PARAMS, model,criterion, PARAMS['DEVICE'], test_loader, optimizer, 0, acc)\n print(f'the evalutaion acc is {acc}')\n\n\nif __name__ == '__main__':\n main()","repo_name":"HMS97/GLNET","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":6356,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"96"} +{"seq_id":"26227456817","text":"import numpy as np\n\n\nclass RewardCategory1():\n def __init__(self, riskPenalizeFactor, actionPenalizeFactor, clipReward = True, clipRange = [-5,5]):\n\n self.riskPenalizeFactor = riskPenalizeFactor\n self.actionPenalizeFactor = actionPenalizeFactor\n\n self.clipReward = clipReward\n self.clipRange = clipRange\n\n self.runningReturn = []\n\n\n def computeReward(self, currentReturn, actionFeasible = True):\n # currentReturn --> log return\n\n # component 1: log return\n comp1 = 100 * currentReturn\n self.runningReturn.append(comp1)\n\n # component 2: variance of daily returns\n variance = np.var(self.runningReturn)\n comp2 = variance\n\n # component 3: penalty for forbidden actions\n comp3 = 0 if actionFeasible else 1\n\n reward = comp1 + self.riskPenalizeFactor*comp2 + self.actionPenalizeFactor*comp3\n\n if self.clipReward:\n reward = np.clip(reward, self.clipRange[0], self.clipRange[1])\n\n\n return np.round(reward, 2)\n","repo_name":"ankit2788/ReinforcementLearning","sub_path":"RLLibrary/FinUseCases/PortfolioManagement/RewardManager.py","file_name":"RewardManager.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1952824236","text":"# def div(a,b):\n# print (a/b)\n#\n# def smart_div(func):\n# def inner(a,b):\n# if b>a:\n# a,b=b,a\n# return func(a,b)\n# return inner\n#\n# div=smart_div(div)\n#\n# div(4,2)\n#\n# l=[1,2,3,4,5]\n# print(l *2)\n#\nimport numpy as np\n# np_mat = np.array([[1, 2],\n# [3, 4],\n# [5, 6]])\n# print(np_mat * 2)\n# print(np_mat * np.array([10, 11]))\n# print(np_mat + np_mat)\n\nheight = np.round(np.random.normal(1.75, 0.20, 5000), 2)\nweight = np.round(np.random.normal(60.32, 15, 5000), 2)\n# print(height)\n# print(weight)\n\nprint(np.array(np.column_stack((height, weight))).shape)","repo_name":"sounak95/PythonCodingTraining","sub_path":"Coding/Decorators.py","file_name":"Decorators.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10471470506","text":"import pandas as pd \nimport re \n\n\nrating_match=[r'.*((?3:\n reasons_to_extract = reasons_to_extract[-3:]\n search=[]\n for t in reasons_to_extract:\n for match in keyword_patterns[4]:\n if re.findall(match,t):\n search.append(re.findall(match,t)[0])\n found=True\n break\n\n elif len(reasons_to_extract)==1:\n for match in keyword_patterns[1]:\n if re.findall(match, reasons_to_extract[0]):\n search= re.findall(match,reasons_to_extract[0])\n found=True\n break\n\n elif len(reasons_to_extract)==2:\n reasons_to_extract = reasons_to_extract[1:]\n #print(i,reasons_to_extract)\n for match in keyword_patterns[2]:\n if re.findall(match,reasons_to_extract[0]):\n search=re.findall(match,reasons_to_extract[0])\n found=True\n break\n\n elif len(reasons_to_extract)==3:\n reasons_to_extract = reasons_to_extract[1]\n for match in keyword_patterns[3]:\n if re.findall(match, reasons_to_extract):\n search=re.findall(match,reasons_to_extract)\n found=True\n break\n if not found:\n keywords.append(['NaN'])\n print(i,'NaN',reasons_to_extract)\n continue\n\n if type(search[0])==str:\n keywords.append(search)\n print(i,search)\n continue\n\n elif type(search[0])==tuple:\n search = [x.strip() for x in search[0] if x!='']\n\n if len(search)>3:\n keywords.append(['NaN'])\n print('len>3',search)\n continue\n\n for t,key in enumerate(search):\n if len(key.split(' '))==1:\n continue\n else:\n if \"and\" in key:\n search[t]=key.split('and')[1].split('.')[0].strip()\n elif '–' in key:\n search[t]=key.split('–')[0].strip()\n elif '-' in key:\n search[t]=key.split('-')[0].strip()\n elif ':'in key:\n search[t]=key.split(':')[0].strip()\n else:\n print(search)\n\n if len(search[t].split(' '))>4:\n print('len(split)>1',search)\n search=['NaN']\n print(i,'NaN',search)\n break\n keywords.append(search)\n print(i,search)\n\n return keywords\n\ndef extract_ratings(df,verbose=False):\n extracted_ratings = []\n for i, row in df.iterrows():\n if i==0:\n extracted_ratings.append(['NaN'])\n continue\n score_found=[]\n for answer in [s for s in row['Answer'].split('\\n') if s!='']:\n for patt in rating_match:\n try:\n score=re.findall(patt,answer)\n if score!=[]:\n #print(f\"{i}:{answer}\\t:{score}\")\n score_found=score\n extracted_ratings.append(score)\n break\n except:\n continue\n else:\n continue # only executed if the inner loop did not break\n break # only executed if the inner loop did break\n\n if score_found==[]:\n if verbose:\n print(f\"::::::::{i}-th NOT FOUND:::::::\")\n print(row['Answer'])\n extracted_ratings.append(['NaN'])\n\n return extracted_ratings\n\ndef extract_keywords_gpt35(df,verbose=False):\n keywords=[]\n for i, row in df.iterrows():\n if i==0:\n keywords.append(['NaN'])\n continue\n\n reasons_to_extract= [s for s in row['Answer'].split('\\n') if s!='']\n\n if len(reasons_to_extract)>3:\n search=[]\n for t in reasons_to_extract[-3:]:\n for match in keyword_patterns[4]:\n if re.findall(match,t) and ('Rating' not in re.findall(match,t)[0]):\n search.append(re.findall(match,t)[0])\n found=True\n break\n\n if len(search)!=3:\n search=[]\n for t in reasons_to_extract:\n\n if 'Keywords' in t and len([x.strip() for x in t.split('Keywords:')[1].split(',')])==3: \n search=[x.strip() for x in t.split('Keywords:')[1].split(',')]\n #assert(len(search)==3)\n break\n\n elif re.findall(r'\\d/5\\s+-\\s+(.*)',t):\n search.append(re.findall(r'\\d/5\\s+-\\s+(.*)',t))\n\n if len(search) !=3: \n for match in keyword_patterns[4]:\n if len(search)==3:\n break\n \n if re.findall(match,t) and ('Rating' not in re.findall(match,t)[0]):\n search.append(re.findall(match,t)[0])\n break\n if search==[] or len(search)!=3:\n search=['NaN']\n if verbose:\n print(i,reasons_to_extract) \n \n elif len(reasons_to_extract)<=3:\n search=[]\n for t in reasons_to_extract:\n if 'Keywords' in t and len([x.strip() for x in t.split('Keywords:')[1].split(',')])==3:\n search=[x.strip() for x in t.split('Keywords:')[1].split(',')]\n break\n \n if search==[]:\n search =['NaN']\n if verbose:\n print(i,reasons_to_extract)\n try:\n keywords.append([x.strip() for x in search])\n except:\n keywords.append(['NaN'])\n \n return keywords\n\ndef extract_keywords_bard(df,verbose=False):\n bard_keyword_patterns=[r'\\*\\s+\\*\\*(.*?):\\*\\*']\n keywords = [] \n for i, row in df.iterrows():\n if \"Off the top of your head,\" in row['Questionnaire']:\n keywords.append(['NaN'])\n continue \n\n reasons_to_extract = [s for s in row['Answer'].split('\\n') if s!='']\n search =[] \n for t in reasons_to_extract:\n for match in bard_keyword_patterns:\n if re.findall(match,t):\n search.append(re.findall(match,t)[0])\n break\n if search:\n keywords.append(search)\n else: \n if verbose: \n print(i, reasons_to_extract)\n keywords.append(['NaN'])\n return keywords \n\ndef run_extraction(input_dir,model,output_dir): \n df = pd.read_csv(f'../result/{input_dir}',index_col=[])\n ratings=extract_ratings(df)\n assert(len(ratings)==len(df))\n df['Ratings'] = ratings\n df['Ratings'] = df['Ratings'].apply(lambda x: x[0])\n\n if model=='gpt-3.5-turbo': \n keywords = extract_keywords_gpt35(df)\n assert(len(keywords)==len(df))\n \n elif model=='text-davinci-003': \n keywords = extract_keywords(df)\n assert(len(keywords)==len(df))\n \n elif model=='bard':\n keywords = extract_keywords_bard(df)\n assert(len(keywords)==len(df))\n\n df['Keywords']=keywords\n df.to_csv(f'../result/{output_dir}.csv')\n print(f\"Saved to {output_dir}\") \n \n return df ","repo_name":"sullamij/stereomap","sub_path":"src/extract_keywords_rating.py","file_name":"extract_keywords_rating.py","file_ext":"py","file_size_in_byte":13313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14426572128","text":"#!/usr/bin/python\r\n\r\nimport requests\r\nimport json\r\nimport queue\r\n\r\nclass get_data(object):\r\n def __init__(self,url='http://127.0.0.1:1621/data'):\r\n self.url=url\r\n a=requests.get(url=self.url).content.decode('utf-8')\r\n a=eval(a)\r\n\r\n # print(type(a))\r\n # print(a)\r\n self.data=a\r\n\r\n def get_cpu_data(self):\r\n if self.data:\r\n cpu_data=self.data['cpu_data'] #这里得到列表\r\n # print(cpu_data)\r\n cpu_infomation=[]\r\n cpu_usage=[]\r\n for i in cpu_data:\r\n cpu_infomation.append(i['cpu_infomation'])\r\n cpu_usage.append( i['cpu_usage'])\r\n # print(cpu_usage)\r\n # print(cpu_infomation)\r\n return cpu_infomation,cpu_usage\r\n\r\n else:\r\n print('invalid data...')\r\n return False,False\r\n","repo_name":"OriginLucas/monitor","sub_path":"DvaMoon/data/get_cpu_data.py","file_name":"get_cpu_data.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1987423062","text":"import datetime\nimport json\nimport os\n\nFILE_PATH = 'notes.json'\nnotes = []\nflag_work = True\n\n\ndef add_note():\n \"\"\"\n Запрашивает у пользователя заголовок и текст заметки, создает новую заметку с уникальным идентификатором\n и временными метками в формате JSON, добавляет заметку в список notes и сохраняет в файл.\n \"\"\"\n title = input('Введите заголовок заметки: ')\n note_body = input('Введите текст заметки: ')\n now_date = datetime.datetime.now().strftime('%H:%M:%S %d-%m-%Y')\n note = {\n 'id': len(notes) + 1,\n 'note_title': title,\n 'note_body': note_body,\n 'create_date': now_date,\n 'update_date': now_date\n }\n notes.append(note)\n save_notes()\n\n\ndef show_note(note):\n print(f'\\n******** Номер: {note[\"id\"]} ********')\n print(f'Заголовок: {note[\"note_title\"]}')\n print(f'Текст: {note[\"note_body\"]}')\n print(f'Последнее изменение: {note[\"update_date\"]}\\n')\n\n\ndef show_all_notes():\n \"\"\"\n Выводит все заметки в списке notes, если они есть, или сообщение о том, что заметок нет.\n \"\"\"\n if notes:\n for note in notes:\n show_note(note)\n else:\n print('\\n!!! Заметок нет !!!\\n')\n\n\ndef delete_note():\n \"\"\"\n Запрашивает у пользователя номер заметки, удаляет заметку из списка notes,\n если не найдено выводит сообщение. И сохраняет в файл изменненый спсиок.\n \"\"\"\n note_id = int(input('Введите номер заметки: '))\n global notes\n found_note = False\n new_notes = []\n for note in notes:\n if note['id'] == note_id:\n found_note = True\n else:\n new_notes.append(note)\n if found_note:\n notes = new_notes\n save_notes()\n else:\n print(f'\\n!!! Заметка с номером: {note_id} не найдена. !!!\\n')\n\n\ndef edit_note():\n \"\"\"\n Запрашивает у пользователя номер заметки, находит заметку в списке notes,\n запрашивает у пользователя новый заголовок и текст заметки, обновляет заметку с новой информацией\n и временными метками. И сохраняет в файл изменненый спсиок.\n \"\"\"\n note_id = int(input('Введите номер заметки: '))\n for note in notes:\n if note['id'] == note_id:\n title = input('Введите новый заголовок заметки: ')\n body = input('Введите новый текст заметки: ')\n note['note_title'] = title\n note['note_body'] = body\n note['update_date'] = datetime.datetime.now().strftime('%H:%M:%S %d-%m-%Y')\n save_notes()\n return\n print(f'\\n!!! Заметка с номером: {note_id} не найдена. !!!\\n')\n\n\ndef show_note_by_id():\n \"\"\"\n Запрашивает у пользователя идентификатор заметки, находит заметку в списке notes\n и выводит информацию о заметке, если она найдена.\n \"\"\"\n note_id = int(input('Введите номер заметки: '))\n for note in notes:\n if note['id'] == note_id:\n show_note(note)\n return\n print(f'\\n!!! Заметка с номером: {note_id} не найдена. !!!\\n')\n\n\ndef show_notes_by_date():\n \"\"\"\n Запрашивает у пользователя дату, находит все заметки в списке notes, созданные или обновленные в эту дату,\n и выводит информацию о заметках, если не найдены выдает сообщение.\n \"\"\"\n while True:\n input_str = input('Введите дату в формате dd-mm-yyyy, q - для выхода: ') # запрашиваем дату у пользователя\n try:\n if input_str == 'q': # q для выхода из цикла ввода если передумали\n return\n date_input = datetime.datetime.strptime(input_str, '%d-%m-%Y').date() # пытаемся распарсить введенную дату\n break\n except ValueError:\n print('!!! Некорректный формат даты. Попробуйте еще раз. !!!')\n\n found_notes = []\n for note in notes:\n # берем число месяц год у заметки\n date_create = datetime.datetime.strptime(note['create_date'], '%H:%M:%S %d-%m-%Y').date()\n date_update = datetime.datetime.strptime(note['update_date'], '%H:%M:%S %d-%m-%Y').date()\n # сравниваем с введенной, если подходит добавляем в новый список\n if date_create == date_input or date_update == date_input:\n found_notes.append(note)\n if found_notes:\n for note in found_notes:\n show_note(note)\n else:\n print(f'\\n!!! Заметки за {date_input} не найдены. !!!\\n')\n\n\ndef show_notes_last_week():\n \"\"\"\n Находит все заметки в списке notes, созданные или обновленные за последнюю неделю и выводит информацию о заметках,\n если не найдены выдает сообщение\n \"\"\"\n now_date = datetime.datetime.now() # получаем текущую дату\n past_date = now_date - datetime.timedelta(days=7) # получаем дату на 7 дней назад\n found_notes = []\n for note in notes:\n # берем число месяц год у заметки\n date_create = datetime.datetime.strptime(note['create_date'], '%H:%M:%S %d-%m-%Y')\n date_update = datetime.datetime.strptime(note['update_date'], '%H:%M:%S %d-%m-%Y')\n # проверяем входит ли дата заметки в диапазон недели, если входит добавляем в новый список\n if past_date <= date_create <= now_date or past_date <= date_update <= now_date:\n found_notes.append(note)\n if found_notes:\n for note in found_notes:\n show_note(note)\n else:\n print('\\n!! Заметки за последнюю неделю не найдены. !!!\\n')\n\n\ndef save_notes():\n \"\"\"\n Сохраняет заметки из списка notes в файл в формате JSON.\n \"\"\"\n with open(FILE_PATH, 'w') as file:\n # записываем список notes в файл в формате JSON с отступом в 4 пробела.\n json.dump(notes, file, indent=4)\n\n\ndef load_notes():\n \"\"\"\n Загружает заметки из файла в формате JSON в глобальный список notes.\n Если файл не найден, список notes остается пустым.\n \"\"\"\n if os.path.isfile(FILE_PATH): # проверяем есть ли файл, и если есть загружаем в наш список\n with open(FILE_PATH, 'r') as file:\n notes.extend(json.load(file))\n else:\n print('!!! Невозможно загрузить данные. Файл отсутвует. !!!')\n\n\nload_notes() # подгружаем заметки из файла\nwhile flag_work: # попадаем в основное меню программы\n print('1. Добавить заметку')\n print('2. Редактировать заметку')\n print('3. Удалить заметку')\n print('4. Просмотреть заметку по номеру')\n print('5. Просмотреть заметки по дате')\n print('6. Просмотреть заметки за последнюю неделю')\n print('7. Просмотреть все заметки')\n print('8. Выход')\n\n choice = input('Введите номер команды: ')\n\n if choice == '1':\n add_note()\n elif choice == '2':\n edit_note()\n elif choice == '3':\n delete_note()\n elif choice == '4':\n show_note_by_id()\n elif choice == '5':\n show_notes_by_date()\n elif choice == '6':\n show_notes_last_week()\n elif choice == '7':\n show_all_notes()\n elif choice == '8':\n flag_work = False\n else:\n print('Неверный номер команды')\n","repo_name":"Uneld/Homework1_P_IC_1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8980,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40771743438","text":"import numpy as np\nimport tensorflow as tf\nimport random\nimport os\nimport time\nfrom dataloader import Gen_Data_loader\nfrom generator import Generator\nfrom target_lstm import TARGET_LSTM\nimport pickle\n\n#########################################################################################\n# Generator Hyper-parameters\n######################################################################################\nEMB_DIM = 32 # embedding dimension\nHIDDEN_DIM = 32 # hidden state dimension of lstm cell\nSEQ_LENGTH = 20 # sequence length\nSTART_TOKEN = 0\nPRE_EPOCH_NUM = 0 # supervise (maximum likelihood estimation) epochs (not recommended). takes about 33 min per epoch\nSEED = 88\nBATCH_SIZE = 64\nM_DROPOUT_RATE = 0.5 # Dropout rate of M (optional)\n\n#########################################################################################\n# Basic Training Parameters\n#########################################################################################\nTOTAL_BATCH = 20\ngenerated_num = 1000#10000\n\ntask_id = os.getenv('SLURM_JOB_ID') \n\ntrue_file = 'data/train.txt' # the raw training file\nval_file = 'data/test.txt' # the raw validation file\noracle_file = 'save/oracle_' + str(task_id) + '.txt' # the encoded file to actually train on, ground truth\nval_oracle_file = 'save/oracle_val_' + str(task_id) + '.txt' # the encoded file to actually train on, ground truth\ngenerator_file = 'save/generator_' + str(task_id) + '.txt' # the generated encoded file\ntest_file = 'save/test_file_' + str(task_id) + '.txt' # the decoded file to read and evaluate\n\n\ndef generate_samples(sess, trainable_model, batch_size, generated_num, output_file):\n # Generate Samples\n generated_samples = []\n for _ in range(int(generated_num / batch_size)):\n generated_samples.extend(trainable_model.generate(sess))\n\n with open(output_file, 'w') as fout:\n for poem in generated_samples:\n buffer = ' '.join([str(x) for x in poem]) + '\\n'\n fout.write(buffer)\n\n\ndef target_loss(sess, target_lstm, data_loader):\n # target_loss means the oracle negative log-likelihood tested with the oracle model \"target_lstm\"\n # For more details, please see the Section 4 in https://arxiv.org/abs/1609.05473\n nll = []\n data_loader.reset_pointer()\n\n for it in range(data_loader.num_batch):\n batch = data_loader.next_batch()\n g_loss = sess.run(target_lstm.likelihood_loss, {target_lstm.x: batch})\n nll.append(g_loss)\n\n return np.mean(nll)\n\ndef get_real_test_file(dict, generator_file, test_file):\n from utils.text_process import code_to_text\n from utils.text_process import get_tokenlized\n with open(generator_file, 'r') as file:\n codes = get_tokenlized(generator_file)\n with open(test_file, 'w') as outfile:\n outfile.write(code_to_text(codes=codes, dictionary=dict))\n\ndef mle_epoch(sess, trainable_model, data_loader):\n # Pre-train the generator using MLE for one epoch\n supervised_g_losses = []\n data_loader.reset_pointer()\n \n for it in range(data_loader.num_batch):\n batch = data_loader.next_batch()\n _, g_loss = trainable_model.maximum_likelihood(sess, batch)\n supervised_g_losses.append(g_loss)\n\n return np.mean(supervised_g_losses)\n\ndef main():\n print('program start')\n from utils.text_process import text_precess, text_to_code # TODO: move?\n from utils.text_process import get_tokenlized, get_word_list, get_dict\n \n random.seed(SEED)\n np.random.seed(SEED)\n assert START_TOKEN == 0\n\n SEQ_LENGTH, vocab_size = text_precess(true_file, val_file)\n gen_data_loader = Gen_Data_loader(BATCH_SIZE, SEQ_LENGTH)\n val_data_loader = Gen_Data_loader(BATCH_SIZE, SEQ_LENGTH)\n \n # Create training file and dicts\n tokens = get_tokenlized(true_file)\n val_tokens = get_tokenlized(val_file)\n word_set = get_word_list(tokens + val_tokens)\n [word_index_dict, index_word_dict] = get_dict(word_set)\n with open(oracle_file, 'w') as outfile:\n outfile.write(text_to_code(tokens, word_index_dict, SEQ_LENGTH))\n with open(val_oracle_file, 'w') as outfile:\n outfile.write(text_to_code(val_tokens, word_index_dict, SEQ_LENGTH))\n\n generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN)\n #target_params = pickle.load(open('save/target_params_py3.pkl', 'rb'))\n #target_lstm = TARGET_LSTM(vocab_size, BATCH_SIZE, 32, 32, SEQ_LENGTH, START_TOKEN, target_params) # The oracle model\n # replace target lstm with true data\n \n mediator = Generator(vocab_size, BATCH_SIZE*2, EMB_DIM*2, HIDDEN_DIM*2, SEQ_LENGTH, START_TOKEN, name=\"mediator\", dropout_rate=M_DROPOUT_RATE)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n sess.run(tf.global_variables_initializer())\n\n gen_data_loader.create_batches(oracle_file)\n val_data_loader.create_batches(val_oracle_file)\n\n log = open('save/experiment-log.txt', 'w')\n log_nll = open('save/experiment-log-nll.txt', 'w')\n\n # pre-train generator (default 0 epochs)(not recommended)\n print('Start pre-training...')\n log.write('pre-training...\\n')\n for epoch in range(PRE_EPOCH_NUM):\n loss = mle_epoch(sess, generator, gen_data_loader)\n if epoch % 5 == 0:\n generate_samples(sess, generator, BATCH_SIZE, generated_num, generator_file)\n #get_real_test_file(index_word_dict, generator_file, test_file) # only needed in debugging\n test_loss = target_loss(sess, generator, val_data_loader)\n print('pre-train epoch ', epoch, 'nll_test ', test_loss)\n buffer = 'epoch:\\t'+ str(epoch) + '\\tnll_test:\\t' + str(test_loss) + '\\n'\n log_nll.write(buffer)\n\n print('#########################################################################')\n toc = time.time()\n print('Start Cooperative Training...')\n for iter_idx in range(TOTAL_BATCH):\n print('iteration: ' + str(iter_idx) + '\\ntime: ' + str(time.time() - toc))\n toc = time.time()\n # Train the generator for one step\n for it in range(1):\n samples = generator.generate(sess)\n rewards = mediator.get_reward(sess, np.concatenate([samples, samples], axis=0))\n feed = {generator.x: samples, generator.rewards: rewards[0:BATCH_SIZE]}\n loss, _ = sess.run([generator.g_loss, generator.g_updates], feed_dict=feed)\n # Test, removed oracle test\n if iter_idx % gen_data_loader.num_batch == 0: # epochs instead of batches\n test_loss = target_loss(sess, generator, val_data_loader)\n print('epoch:\\t', iter_idx // gen_data_loader.num_batch, 'nll_test ', test_loss)\n buffer = 'epoch:\\t'+ str(iter_idx // gen_data_loader.num_batch) + '\\tnll_test:\\t' + str(test_loss) + '\\n'\n log_nll.write(buffer)\n if iter_idx == TOTAL_BATCH - 1:\n print('generating samples')\n generate_samples(sess, generator, BATCH_SIZE, generated_num, generator_file)\n get_real_test_file(index_word_dict, generator_file, test_file)\n # Train the mediator\n for _ in range(1):\n print('training mediator...')\n bnll_ = []\n collected_x = []\n ratio = 2\n for it in range(ratio):\n if it % 2 == 0:\n x_batch = gen_data_loader.next_batch()\n else:\n x_batch = generator.generate(sess)\n collected_x.append(x_batch)\n collected_x = np.reshape(collected_x, [-1, SEQ_LENGTH])\n np.random.shuffle(collected_x)\n collected_x = np.reshape(collected_x, [-1, BATCH_SIZE*2, SEQ_LENGTH])\n for it in range(1):\n feed = {\n mediator.x: collected_x[it],\n }\n print('running bnll sess')\n bnll = sess.run(mediator.likelihood_loss, feed)\n bnll_.append(bnll)\n print('running mediator and updating')\n sess.run(mediator.dropout_on)\n _ = sess.run(mediator.likelihood_updates, feed)\n sess.run(mediator.dropout_off)\n if iter_idx % 50 == 0:\n bnll = np.mean(bnll_)\n print(\"mediator cooptrain iter#%d, balanced_nll %f\" % (iter_idx, bnll))\n log.write(\"%d\\t%f\\n\" % (iter_idx, bnll))\n \n log.close()\n log_nll.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"cojacobjonsson/DA233X-Public","sub_path":"Code/Hebbe/run/cot_JJ.py","file_name":"cot_JJ.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36481755746","text":"from rest_framework.serializers import ModelSerializer\nfrom modules.pastoral.models import Service\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\n\n\nclass ServiceSerializer(GeoFeatureModelSerializer):\n geo_field = \"location\"\n\n class Meta:\n model = Service\n fields = (\n \"id\",\n \"service_name\",\n \"service_description\",\n \"added_by\",\n \"location\",\n \"county\",\n \"photos\",\n \"publish\",\n )\n read_only_fields = (\"id\",)\n","repo_name":"qinyanjuidavid/Nomad","sub_path":"modules/pastoral/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"9238683807","text":"import numpy as np\nimport time\nimport itertools\nimport os\nfrom tqdm import tqdm\nfrom scipy import sparse\nfrom thewalrus import perm\n\n\nclass BeamSplitter:\n def __init__(self, theta, phi_rho, phi_tau):\n self._t = np.exp(phi_tau * 1j) * np.cos(theta)\n self._r = np.exp(phi_rho * 1j) * np.sin(theta)\n\n self._mode1 = self._mode2 = None\n self.bs_matrix = None\n\n def calc_bs_matrix(self, number_of_modes, modes):\n list_modes = np.array([modes[0] - 1, modes[1] - 1])\n list_data = np.array([self._t - 1, np.conj(self._t) - 1, -np.conj(self._r), self._r])\n\n row_ind = np.concatenate([np.array(range(number_of_modes)), list_modes, list_modes])\n col_ing = np.concatenate([np.array(range(number_of_modes)), list_modes, np.flip(list_modes)])\n data = np.concatenate([np.ones((number_of_modes, ), dtype=complex), list_data])\n\n self.bs_matrix = sparse.csr_matrix((data, (row_ind, col_ing)), shape=(number_of_modes, number_of_modes))\n\n\nclass Scheme:\n def __init__(self, modes_num=1):\n self.number_of_modes = modes_num\n self._beam_splitters = []\n self.scheme_matrix = sparse.csr_matrix(np.identity(self.number_of_modes))\n\n def add_BS_gate(self, modes, theta=np.pi/4, phi_rho=0., phi_tau=0.):\n bs = BeamSplitter(theta, phi_rho, phi_tau)\n bs.calc_bs_matrix(self.number_of_modes, modes)\n self._beam_splitters.append(bs)\n\n def calc_scheme_matrix(self):\n time_unit_start = time.time()\n self.scheme_matrix = sparse.csr_matrix(np.identity(self.number_of_modes))\n for bs in np.flip(self._beam_splitters):\n self.scheme_matrix = sparse.csr_matrix.dot(self.scheme_matrix, bs.bs_matrix)\n self.scheme_matrix = self.scheme_matrix.toarray()\n time_unit_end = time.time()\n print(\"--> The time for dot product is :\", (time_unit_end - time_unit_start) * 10 ** 3, \"ms\")\n\n def upload_scheme_from_file(self, file_name):\n with open(os.path.join('scheme', file_name), 'r') as f_scheme:\n self.number_of_modes = int(f_scheme.readline())\n for f_beam_splitter in f_scheme:\n mode1, mode2 = list(map(int, f_beam_splitter.split('\\t')[0:2]))\n theta, phi_rho, phi_tau = list(map(float, f_beam_splitter.split('\\t')[2:]))\n self.add_BS_gate((mode1, mode2), theta, phi_rho, phi_tau)\n print(\"--> Scheme was successfully uploaded\")\n print(\"--> Number of modes: \", self.number_of_modes)\n\n def export_scheme_matrix(self, file_name):\n with open(os.path.join('scheme', file_name), 'w') as f_out:\n for line in np.round(self.scheme_matrix, 6):\n for element in line:\n f_out.write(str(element.real) + '\\t' + str(element.imag) + '\\t')\n f_out.write('\\n')\n print(\"--> Unitary was successfully exported\")\n\n def print_scheme_matrix(self):\n print(\"--> U:\\n\", np.round(self.scheme_matrix, 3))\n\n\nclass BosonSampler:\n def __init__(self, scheme, init_config):\n self._scheme = scheme\n self._photons_number = sum(init_config)\n self._basis = self.create_fock_basis()\n self._dim = len(self._basis)\n self._init_config = self._basis.index(list(init_config))\n\n self._transform_matrix = self.calc_transform_matrix()\n\n def create_fock_basis(self):\n basis = []\n slots_num = self._photons_number + self._scheme.number_of_modes\n all_comb_bars = list(itertools.combinations(range(1, slots_num), self._scheme.number_of_modes - 1))\n for bars in all_comb_bars:\n bars = list(bars)\n bars.append(slots_num)\n bars.insert(0, 0)\n basis_vec = []\n for i in range(self._scheme.number_of_modes):\n basis_vec.append(bars[i+1] - bars[i] - 1)\n basis.append(basis_vec)\n\n return basis\n\n def calc_transform_matrix(self):\n def find_submatrix():\n def get_indices(vec):\n indices = []\n for i in range(self._scheme.number_of_modes):\n if vec[i] > 0:\n for j in range(vec[i]):\n indices.append(i)\n return indices\n\n column_indices = get_indices(self._basis[vec_in])\n row_indices = get_indices(self._basis[vec_out])\n\n return self._scheme.scheme_matrix[:, column_indices][row_indices]\n\n transform_matrix = np.empty([self._dim, self._dim], dtype=float)\n for vec_in in tqdm(range(self._dim), desc=\"Computing...\"):\n for vec_out in range(self._dim):\n norm = 1\n for num in self._basis[vec_in]:\n norm *= np.math.factorial(num)\n for num in self._basis[vec_out]:\n norm *= np.math.factorial(num)\n\n transform_matrix[vec_in][vec_out] = \\\n abs(perm(find_submatrix(), method=\"ryser\")) ** 2 / norm\n\n return transform_matrix\n\n def sample(self, batch_size, file_name):\n prob_distribution = self._transform_matrix[self._init_config]\n choices = [np.random.choice(range(self._dim),\n p=prob_distribution) for _ in tqdm(range(batch_size), desc=\"Sampling...\")]\n\n with open(os.path.join('samples', file_name), 'w') as f_out:\n for result in tqdm(choices, desc=\"Writing to file...\"):\n f_out.write(str(self._basis[result]) + '\\t' +\n str(np.round(self._transform_matrix[self._init_config][result], 6)) + '\\n')\n\n print(\"--> Samples was successfully exported\")\n\n def print_transform_matrix(self):\n print(\"--> H:\\n\", self._transform_matrix)\n\n\ndef is_unitary(matrix, dim):\n matrix_dagger = np.conj(matrix.transpose())\n if (np.round(np.dot(matrix, matrix_dagger), 10) == np.identity(dim)).all():\n print(\"--> is_unitary: True\")\n else:\n print(\"--> is_unitary: False\")\n\n\ndef main():\n time_start = time.time()\n\n scheme = Scheme(1)\n scheme.upload_scheme_from_file('curr_scheme_simple.txt')\n scheme.calc_scheme_matrix()\n scheme.export_scheme_matrix('scheme_unitary.txt')\n\n sampler = BosonSampler(scheme, (1, 1, 1, 1, 1, 0))\n sampler.sample(batch_size=500000, file_name='sample.txt')\n\n time_end = time.time()\n\n print(\"\\n--> The time of execution is :\", (time_end - time_start) * 10 ** 3, \"ms\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexandr-Mazanik/boson_sampling","sub_path":"sampler/boson_sampler.py","file_name":"boson_sampler.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12220850089","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/13 23:01\n# @Author : ZhiMa_Maker\n# @Email : yongtao_vip@163.com\n# @File : file_manager.py\n# @Software : PyCharm\n\nimport os,sys\nimport re\nimport platform\nimport shutil\nimport log_utils\n\n# 获取当前脚本工作目录\nrootDir = os.getcwd()\n\ndef __init__(self):\n pass\n\n\n\ndef list_files(src, resFiles, igoreFiles):\n if os.path.exists(src):\n if os.path.isfile(src) and src not in igoreFiles:\n resFiles.append(src)\n elif os.path.isdir(src):\n for f in os.listdir(src):\n if src not in igoreFiles:\n list_files(os.path.join(src, f), resFiles, igoreFiles)\n\n return resFiles\n\n\n\n\n #获取当前路径\ndef getCurrDir(self):\n global currentDir\n retPath = currentDir\n if platform.system() == 'Darwin': # 判断是否是 mac平台\n retPath = sys.path[0]\n lstPath = os.path.split(retPath)\n if lstPath[1]:\n retPath = lstPath[0]\n return retPath\n\n\n# 获取完整路径\ndef getFullPath(filename):\n if os.path.isabs(filename): # 判断是否是绝对路径\n return filename\n currdir = rootDir\n filename = os.path.join(currdir, filename)\n filename = filename.replace('\\\\', '/')\n filename = re.sub('/+', '/', filename)\n return filename\n\n\n\n# 获取输出路径\ndef createFilePath(filepath,isClearDir=0):\n outpath = getFullPath(filepath)\n if os.path.exists(outpath):\n if isClearDir == 1:\n # shutil.rmtree(outpath)\n os.makedirs(outpath)\n log_utils.info(outpath + \"目录创建成功\")\n return outpath\n else:\n log_utils.info(\"目录已存在\" + outpath)\n return outpath\n else:\n os.makedirs(outpath)\n log_utils.info(outpath + \"目录创建成功\")\n return outpath\n\n\n\ndef joinFilePath(sourcePath,appendPath):\n path = getFullPath(sourcePath)\n if os.path.exists(path):\n os.path.join(sourcePath, appendPath)\n else:\n log_utils.info(\"原路径不存在,请检查\")\n\n\n\n\n\n\ndef copyFile(filepath, newPath):\n # 获取当前路径下的文件名,返回List\n fileNames = os.listdir(filepath)\n for file in fileNames:\n # 将文件命加入到当前文件路径后面\n newDir = filepath + '/' + file\n # 如果是文件\n if os.path.isfile(newDir):\n print(newDir)\n newFile = newPath + file\n shutil.copyfile(newDir, newFile)\n #如果不是文件,递归这个文件夹的路径\n else:\n copyFile(newDir,newPath)\n\n\n\ndef copyFiles(srcPath, dstPath):\n for file in os.listdir(srcPath):\n if file == '.DS_Store':\n continue\n srcFilePath = srcPath + '/' + file\n dstFilePath = dstPath + '/' + file\n\n # if file.endswith('.plist'):\n # combineOtherPlist(srcFilePath, dstFilePath)\n # elif\n #文件夹内容copy\n if os.path.isdir(srcFilePath):\n if srcFilePath.endswith('launchimage'):\n \tif os.path.exists(dstFilePath):\n pass\n #如果目标文件夹存在内容,删除\n \t\t# shutil.rmtree(dstFilePath)\n if os.path.exists(dstFilePath):\n copyFiles(srcFilePath, dstFilePath)\n else:\n shutil.copytree(srcFilePath, dstFilePath, symlinks=False, ignore=None)\n else:\n #文件内容copy\n shutil.copy(srcFilePath, dstFilePath)\n\n\ndef removeFilePath(file_path):\n \"\"\"删除路径下的所有文件\"\"\"\n if os.path.exists(file_path):\n shutil.rmtree(file_path)\n\n\n# 清理文件函数\ndef clear(path):\n log_utils.info('正在扫描:' + path)\n # 获取目录中的所有文件和文件夹名字\n dir_list = os.listdir(path)\n # 遍历循环每个目录\n for i in dir_list:\n # 拼接绝对路径\n abspath = os.path.join(os.path.abspath(path), i)\n # 判断是否是文件\n if os.path.isfile(abspath):\n # 判断文件是否是 ._ 开头的文件\n if i.startswith(\"._\"):\n # 删除文件\n # 这是彻底删除 回收站不会存在\n # 这是彻底删除 回收站不会存在\n # 这是彻底删除 回收站不会存在\n os.remove(abspath)\n log_utils.info('清理文件 : ' + abspath)\n\n else:\n # 不是文件就继续递归\n clear(abspath)\n\n","repo_name":"yongtaovip/AppIconMaker","sub_path":"AppIconMaker/Scripts/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"38202465356","text":"import argparse\n\nimport scipy\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import train_test_split\n\nfrom dataset import TextDataset\nfrom train import *\nfrom utils import *\nfrom eval import *\n\n# logging.basicConfig(level=logging.WARNING)\n\n\ndef main(args):\n urls_dataset = TextDataset(args.path1, args.path2, \"last\")\n train_set, test_set = train_test_split(\n urls_dataset.data[:100], test_size=0.2, random_state=42)\n if args.mode == \"train-test\":\n # training\n v, y_train = get_labels(train_set[\"target\"])\n y_test_multilabel = v.transform(test_set[\"target\"])\n print(train_set.head())\n df_final, url_vector = input_classifier(train_set, args.mode)\n df_final = df_final.drop(\n [\"target\", \"url\", \"day\", \"Path\", 'Netloc', 'Path', 'tld_category', 'tld', \"text_vector\"], axis=1)\n print(df_final.head())\n df_final = scipy.sparse.csr_matrix(\n df_final.astype(float).values)\n X_train = hstack((url_vector, df_final))\n print(\"ok\")\n run_train(X_train, y_train)\n\n x_test, url_vector = input_classifier(train_set, \"test\")\n x_final = x_test.drop(\n [\"target\", \"url\", \"day\", \"Path\", 'Netloc', 'Path', 'tld_category', 'tld', \"text_vector\"], axis=1)\n x_final = scipy.sparse.csr_matrix(\n x_final.astype(float).values)\n X_test = hstack((url_vector, x_final))\n y_pred=test(X_test, v)\n print(y_pred)\n\n #metric(y_test_multilabel,y_pred)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--mode\",default=\"train-test\")\n\n parser.add_argument(\"--path1\", type=str, default='data', help=\"path to the directory containing the dataset\")\n parser.add_argument(\"--path2\", type=str, default='tld',\n help=\"path to the directory containing the tld files\")\n args = parser.parse_args()\n main(args)\n","repo_name":"nada-hajsalah/url-multiclassification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25876417141","text":"import binascii\nimport json\nfrom math import trunc\nimport os\nfrom flask import Flask, jsonify, request, render_template, redirect, session, escape, g\nfrom flask_cors import CORS\n\nimport socket\nimport time as tm\nfrom time import time\nfrom datetime import datetime as dt\n\nfrom src.blockchain_node.node import Node\nfrom src.wallet.wallet import Wallet\nfrom database.database import Database as db\nfrom api.api import API as api\nfrom util.util import *\nfrom argparse import ArgumentParser\n\n\nparser = ArgumentParser()\nparser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')\nargs = parser.parse_args()\nport = args.port\n\n# addr = socket.gethostbyname(socket.gethostname())\naddr = \"0.0.0.0\"\nhost_node = Node(addr, port)\n\nhost_node.start()\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/api/')\ndef api_route(req):\n response = api.get_url(req)\n\n return jsonify(response)\n\n@app.route('/', methods=['GET'])\ndef index():\n if 'u_name' in session:\n return redirect('/home')\n else:\n return redirect('/login')\n\n@app.route('/home', methods=['GET'])\ndef home():\n if 'u_name' in session:\n data = {\n 'name': session['name'],\n 'email': session['email'],\n 'u_name': session['u_name'],\n 'public_key': format_key_for_display(session['public_key'])\n }\n\n return_data = db.get_data('blocks')\n blocks = []\n for b in return_data:\n d = {}\n d['index'] = b[0]\n d['timestamp'] = b[1]\n d['data'] = b[2]\n d['difficulty'] = b[3]\n d['merkle_root'] = b[4]\n d['prev_hash'] = b[5]\n d['nonce'] = b[6]\n d['hash'] = b[7]\n blocks.append(d)\n \n data['blocks'] = blocks\n\n return_data = db.get_data('transactions')\n transactions = []\n data['sent'] = 0\n data['received'] = 0\n for b in return_data:\n d = {\n \"from_addr\": format_key_for_display(b[0])[:15] if b[0] != None else b[0],\n \"to_addr\": format_key_for_display(b[1])[:15] if b[1] != None else b[0],\n \"value\": b[2],\n \"gas\": b[3],\n \"args\": json.loads(b[4]),\n \"timestamp\": b[5],\n \"tx_hash\": b[6]\n }\n transactions.append(d)\n\n if session['public_key'] == b[0]:\n data['sent'] += int(b[2])\n\n if session['public_key'] == b[1]:\n data['received'] += int(b[2])\n \n data['transactions'] = transactions\n \n return render_template('home/index.html', data=data)\n else:\n return redirect('/login')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef signin():\n if request.method == 'GET':\n return render_template('home/page-login.html')\n else:\n uname = request.form.get('username')\n pword = request.form.get('password')\n \n return_data = db.get_user((uname, pword))\n \n if len(return_data) != 0:\n return_data = return_data[0]\n\n if len(return_data) != 0:\n session['name'] = return_data[0]\n session['email'] = return_data[1]\n session['u_name'] = return_data[2]\n session['password'] = return_data[3]\n session['private_key'] = return_data[4]\n session['public_key'] = return_data[5]\n\n if session['public_key'] == None or session['public_key'] == \"\":\n return redirect('/key-generate')\n else:\n return redirect('/home')\n else:\n \n return redirect('/login')\n\n@app.route('/logout')\ndef signout():\n \n session.pop('u_name', None)\n session.pop('name', None)\n session.pop('email', None)\n session.pop('public_key', None)\n session.pop('password', None)\n\n return redirect('/')\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'GET':\n return render_template('home/page-register.html')\n else:\n try:\n name = request.form.get('name')\n email = request.form.get('email')\n uname = request.form.get('username')\n pword = request.form.get('password')\n\n status = db.add_user((name, email, uname, pword))\n if status:\n session['name'] = name\n session['email'] = email\n session['u_name'] = uname\n session['password'] = pword\n return redirect('/key-generate')\n else:\n return jsonify({'message': 'An Error Occured while creating record!!'})\n except Exception as e:\n print(e)\n return jsonify({'message': 'An Unexpected Error Occured!!, could not create record'})\n \n@app.route('/key-generate')\ndef key_generate():\n if 'u_name' in session:\n return render_template('home/page-generate-key.html')\n else:\n return redirect('/login')\n\n@app.route('/generate')\ndef generate():\n if 'u_name' in session:\n uname = session['u_name']\n pword = session['password']\n \n return_data = db.get_user((uname, pword))\n \n if len(return_data) != 0:\n return_data = return_data[0]\n \n if (return_data[4] == None or return_data[4] == \"\") and (return_data[5] == None or return_data[5] == \"\"):\n keys = host_node.gen_key_pair()\n\n data = {\n 'private_key': keys['private_key'],\n 'public_key': keys['public_key'],\n 'user' : {\n 'name' : session['name'],\n 'email' : session['email']\n }\n }\n status = db.insert_keys(data)\n \n if status:\n return jsonify({\n 'private_key': format_key_for_display(keys['private_key'], 'priv'),\n 'public_key': format_key_for_display(keys['public_key'])\n })\n else:\n return jsonify({'message': 'Error generating keys!!'})\n else:\n return jsonify({\n 'private_key': format_key_for_display(return_data[4], 'priv'),\n 'public_key': format_key_for_display(return_data[5])\n })\n else:\n return redirect('/register')\n else:\n return redirect('/login')\n\n@app.route('/send', methods=['GET'])\ndef send():\n if 'u_name' in session:\n uname = session['u_name']\n pword = session['password']\n \n return_data = db.get_user((uname, pword))\n \n if len(return_data) != 0:\n return_data = return_data[0]\n\n data = {}\n data['name'] = return_data[0]\n data['email'] = return_data[1]\n data['u_name'] = return_data[2]\n data['password'] = return_data[3]\n data['public_key'] = format_key_for_display(return_data[5])\n\n return render_template('home/page-send.html', data=data)\n else:\n return redirect('/login')\n\n@app.route('/blocks', methods=['GET'])\ndef blocks():\n if 'u_name' in session:\n data = {}\n data['name'] = session['name']\n data['email'] = session['email']\n data['u_name'] = session['u_name']\n data['password'] = session['password']\n data['public_key'] = format_key_for_display(session['public_key'])\n \n data['blocks'] = get_blocks(trunc = True)\n \n return render_template('home/page-blocks.html', data=data)\n else:\n redirect('/login')\n\n@app.route('/get_block_by_index')\ndef get_block_by_index():\n if 'u_name' in session:\n \n data = {}\n data['block'] = get_block()\n return render_template('/home/page-block-detail.html', data=data)\n\n@app.route('/transactions', methods=['GET', 'POST'])\ndef transactions():\n if request.method == \"GET\":\n if 'u_name' in session:\n \n data = {}\n data['name'] = session['name']\n data['email'] = session['email']\n data['u_name'] = session['u_name']\n data['password'] = session['password']\n data['public_key'] = format_key_for_display(session['public_key'])\n data['transactions'] = get_transactions(trunc = True)\n data['mined_transactions'] = get_mined_transactions(trunc = True)\n\n return render_template('home/page-transactions.html', data = data)\n else:\n return redirect('/login')\n else:\n # route to add a new transaction\n data = request.get_json()\n tx_keys = ['from_addr', 'to_addr', 'value', 'gas', 'args']\n for k in tx_keys:\n if k not in list(data.keys()):\n return jsonify({\"status\": False, \n \"message\": 'Transaction failed. Make sure all required field are included'})\n \n data['from_addr'] = format_key_for_use(data['from_addr'])\n data['to_addr'] = format_key_for_use(data['to_addr'])\n\n data['timestamp'] = time()\n\n # Sign transaction if this account is the one making the transaction\n # ==================================================================\n if 'u_name' in session:\n if data['from_addr'] == session['public_key']:\n sig = Wallet.sign_transaction(session['private_key'], [\n data['from_addr'], \n data['to_addr'], \n data['value'], \n data['gas'], \n str(data['args'])\n ]\n )\n\n data['signature'] = sig\n # ==================================================================\n \n\n response_data = host_node.make_transaction(data)\n return jsonify(response_data)\n \n@app.route('/get_transaction_by_hash')\ndef trans_detail():\n if 'u_name' in session:\n \n data = {}\n data['trans'] = get_transaction()\n\n return render_template('home/page-transaction-detail.html', data = data)\n else:\n return redirect('/login')\n\n@app.route('/contracts', methods=['GET'])\ndef contracts():\n if 'u_name' in session:\n data = {}\n data['name'] = session['name']\n data['email'] = session['email']\n data['u_name'] = session['u_name']\n data['password'] = session['password']\n data['public_key'] = format_key_for_display(session['public_key'])\n\n data['contracts'] = get_contracts(trunc = True)\n\n return render_template('home/page-contracts.html', data=data)\n else:\n redirect('/login')\n\n@app.route('/get-contract-result', methods=['POST'])\ndef get_contract_result():\n data = request.get_json()\n \n response = get_contract(data['contract_addr'])\n \n return jsonify(response)\n\n@app.route('/connect-node', methods=['GET', 'POST'])\ndef connect_node():\n if 'u_name' in session:\n if request.method == 'GET':\n data = {}\n data['name'] = session['name']\n data['email'] = session['email']\n data['u_name'] = session['u_name']\n data['password'] = session['password']\n data['public_key'] = format_key_for_display(session['public_key'])\n\n tmp = []\n for n in host_node.nodes_inbound:\n tmp.append({\n \"address\": n.address,\n \"port\": n.port,\n 'public_key': n.pk\n })\n data['in_nodes'] = tmp\n\n tmp = []\n for n in host_node.nodes_outbound:\n tmp.append({\n \"address\": n.address,\n \"port\": n.port,\n 'public_key': n.pk\n })\n data['out_nodes'] = tmp\n return render_template('home/page-connect-node.html', data=data)\n else:\n data = request.get_json()\n return_data = host_node.connect_with_node(data['address'], int(data['port']))\n res = db.get_connected_node((data['address'], int(data['port'])))\n pk = None\n if len(res) != 0:\n pk = res[0][2]\n return_data['public_key'] = format_key_for_display(pk)\n \n return jsonify(return_data)\n else:\n return redirect('/login')\n\n@app.route('/mine', methods=['GET'])\ndef mine_block():\n return_data = host_node.start_mining()\n \n if return_data['status'] == True:\n return jsonify({'status': True,'message': return_data['msg'],\n 'Block': host_node.blockchain.chain[-1].block_item})\n else:\n return jsonify({'status': False,'message': return_data['msg']})\n\n@app.route('/sync', methods=['GET'])\ndef sync_chain():\n \n return_data = host_node.sync_chain()\n\n return jsonify(return_data)\n\n@app.route('/check-sync-complete')\ndef check_sync():\n # Checks to see if synchronization is still in progress\n if host_node.sync_finished:\n return {'status': True, 'message': 'Synchronization completed Successfully'}\n else:\n return {'status': False, 'message': 'Synchronization is still in progress...'}\n \n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('home/page-error-404.html'), 404\n\n\n# Used by flask's session variable\napp.secret_key = 'mysecret'\n\nif __name__ == '__main__':\n app.run(host=addr, port=(int(port) - 1000), threaded = True)\n\n","repo_name":"SaeedBashar/e-vote-blockchain","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41206805288","text":"import numpy as np\nimport torch\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_X(N=1_000, I=50, Q=30, N_test=200):\n N = N + N_test\n\n X = torch.randn([N * I, Q], dtype=torch.float32)\n\n X = (X - X.mean(0)) / X.std(0)\n X = X / np.sqrt(X.shape[1])\n X = X.reshape([N, I, Q])\n\n return X\n\n\ndef simulate(X, v_beta=0.5, v_gamma=0.8, b=-1, F=None, P=1):\n if F is None:\n F = torch.ones([X.shape[0], 1])\n\n # simulate single phenotype\n K = F.shape[1]\n b = b * torch.ones([K, P])\n v_beta = v_beta * torch.ones(P)\n v_gamma = v_gamma * torch.ones(P)\n\n # sample weights\n gamma = torch.randn((X.shape[2], v_gamma.shape[0]))\n _w = torch.einsum(\"nik,kp->nip\", X, gamma)\n _scale_w = torch.sqrt(v_gamma / _w.var([0, 1]))\n gamma = _scale_w[None, :] * gamma\n _w = _scale_w[None, None, :] * _w\n\n w = torch.softmax(_w, dim=1)\n\n # sample z\n beta = torch.randn((X.shape[2], v_beta.shape[0]))\n beta = beta / torch.sqrt((beta**2).mean(0, keepdim=True))\n z = torch.einsum(\"nik,kp->nip\", X, beta)\n u = torch.einsum(\"nip,nip->np\", w, z)\n u = (u - u.mean(0)) / u.std(0)\n u = torch.sqrt(v_beta) * u\n beta = torch.sqrt(v_beta) * beta\n\n # compute rates\n logits = F.mm(b) + u\n\n # sample Y\n Y = torch.distributions.Binomial(2, logits=logits).sample()\n\n return F, Y, u, w\n\n\ndef split_data(Xs, test_size=200, val_size=0.0, test_rs=127, val_rs=412):\n idxs_all = np.arange(Xs[0].shape[0])\n idxs = {}\n idxs[\"train_val\"], idxs[\"test\"] = train_test_split(idxs_all, test_size=test_size, random_state=test_rs)\n\n if not np.isclose(val_size, 0):\n idxs[\"train\"], idxs[\"val\"] = train_test_split(idxs[\"train_val\"], test_size=val_size, random_state=val_rs)\n else:\n idxs[\"train\"] = idxs[\"train_val\"]\n del idxs[\"train_val\"]\n\n outs = []\n for X in Xs:\n out = {}\n for key in idxs.keys():\n out[key] = X[idxs[key]]\n outs.append(out)\n return outs\n\n\ndef load_simulation(seed=42, N=1_000, I=50, Q=30, N_test=200, P=1, v_beta=0.5, v_gamma=0.8, b=-1, F=None):\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n X = get_X(N, I, Q, N_test)\n data = simulate(X, v_beta, v_gamma, b, F, P)\n X, F, Y, u, w = split_data((X, *data))\n return X, F, Y, u, w\n","repo_name":"AIH-SGML/MixMIL","sub_path":"mixmil/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70166366713","text":"\nimport pandas as pd\nimport time\n\n\nartists = pd.read_csv(\"data/artists.csv\")\n\n\nstart = time.time()\nartists[\"high_followers_has_genre\"] = artists.apply(lambda df: 1 if df.followers > 10 and len(df.genres) > 0 else 0 , axis = 1)\nend = time.time()\nprint(end - start)\n\n\n\nstart = time.time()\nartists[\"high_followers_has_genre\"] = [1 if followers > 10 and len(genres) > 0 else 0\n for followers, genres in \n zip(artists.followers, artists.genres)]\nend = time.time()\nprint(end - start)\n\n\n\nimport numpy as np\n\ndef create_follower_genre_feature(followers, genres):\n \n return 1 if followers > 10 and len(genres) > 0 else 0\n \n\nvec_create_follower_genre_feature = np.vectorize(create_follower_genre_feature)\n\n\nstart = time.time()\nartists[\"high_followers_has_genre\"] = vec_create_follower_genre_feature(artists.followers, artists.genres)\nend = time.time()\nprint(end - start)\n\n\n\nstart = time.time()\ncheck = [\"Ace\", \"Rico\", \"Luna\"]\nartists[artists.name.isin(check)]\nend = time.time()\nprint(end - start)\n\n\n\nstart = time.time()\ncheck = {\"Ace\", \"Rico\", \"Luna\"}\nartists[artists.name.isin(check)]\nend = time.time()\nprint(end - start)\n\n\n\n\n","repo_name":"atreadw1492/software_engineering_for_data_scientists","sub_path":"ch6/slow_apply_example.py","file_name":"slow_apply_example.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"25855004875","text":"'''\nhttps://www.hackerrank.com/challenges/grading/problem\n'''\n\n\ndef grade(params):\n\n lst_total = []\n\n for data in params:\n\n if data >= 38:\n grade = data / 5\n average_grade = (grade + 1) * 5\n total = average_grade - data\n\n if total < 3:\n lst_total.append(average_grade)\n elif total == 3:\n lst_total.append(data) \n else:\n lst_total.append(data)\n\n return lst_total\n\n\nif __name__ == '__main__':\n value_grade = [73, 67, 38, 33]\n print(grade(value_grade))\n\n","repo_name":"dafinoer/turbo-fortnight","sub_path":"challange/grading_student.py","file_name":"grading_student.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"16227449013","text":"\n\nimport datetime\n\nimport ship_luis # Look Up In Scrapes, fill in _collected\nimport ship_luoti # Look Up On The Internet, fill in _collected\nimport ship_ulmui # Update Local Most Updated Info, from _collected\nimport ship_show\n\n#import ship_db_access as db\n#import ship_ec_helpers as ec\n\n\n# Hardcoded constants\nIMO = '5351894' # ''8917613' # '8888630' #\nLST_FLD = ['imo', 'mmsi', 'callsign', 'ship_name', 'ship_type', 'flag',\n 'ship_length', 'ship_width',\n 'ship_weight_gt', 'ship_weight_dw', 'ship_draught',\n 'ship_status', 'home_port', 'build_year', 'build_place',\n 'ship_owner', 'ship_operator', 'classification_society',\n 'former_names', 'info_source', 'info_update']\n\n\ndef main(imo):\n\n # 1 LUIS: Look Up In Scrapes, fill in _collected\n dtt_start = datetime.datetime.now()\n ship_luis.ship_luis(imo) # Look Up In Scrapes, i.e. Collect data from all available scrp tables\n print(\"... Ship Look up in Scrapes: {:.3} ms\".format((datetime.datetime.now() - dtt_start).total_seconds()*1000))\n\n # 2 LUOTI: Look Up On The Internet, fill in _collected. This makes more sence with imo, as it's the more widely searchable identifyer\n dtt_start = datetime.datetime.now()\n lst_soti = ship_luoti.main(imo)\n print(\"... Ship Look up on the internet: {:.3} ms\".format((datetime.datetime.now() - dtt_start).total_seconds()*1000))\n\n # 3 ULMUI: Update Local Most Updated Info, from _collected. This makes more sence with mmsi, as it's a more specific identifyer\n dtt_start = datetime.datetime.now()\n ship_ulmui.update_local_mui(mmsi) # Update Most Updated Info, based on all collected info\n print(\"... Present up-to-date ship data: {:.3} ms\".format((datetime.datetime.now() - dtt_start).total_seconds()*1000))\n\n # 4 SHOW: Present up-to-date ship data\n dtt_start = datetime.datetime.now()\n ship_show.imo(imo)\n print(\"... Present up-to-date ship data: {:.3} ms\".format((datetime.datetime.now() - dtt_start).total_seconds()*1000))\n\nif __name__ == '__main__':\n\n main(IMO)","repo_name":"MartinHvidberg/freeshipdb","sub_path":"src/ship_update.py","file_name":"ship_update.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11201504259","text":"# make predictions on the testing data\nprint(\"[INFO] Totalreactionforce...\")\npreds = new_model.predict(testImagesX)\npreds2 = new_model.predict(trainImagesX)\n# compute the difference between the *predicted* force and the\n# *actual* force, then compute the percentage difference and\n# the absolute percentage difference\n\ndiff = preds.flatten() - testY\nprint(diff)\nabs_diff = abs(diff)\npercentDiff = (diff / testY) * 100\nabsPercentDiff = np.abs(percentDiff)\n#print(absPercentDiff)\n\n# compute the mean and standard deviation of the absolute percentage\n# difference\n# print(\"mean of array\", np.mean(abs_diff))\nmean = np.mean(abs_diff)\nmax = np.amax(abs_diff)\nprint('mean', mean)\nprint('max', max)\n","repo_name":"PRAMODVENKATESH/Deep-Learning-model-to-predict-plasticity-distribution-in-ductile-materials","sub_path":"code/make_predictions_on_testimage.py","file_name":"make_predictions_on_testimage.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"25386221130","text":"import uasyncio as asyncio\nfrom machine import Timer\n\nfrom DCF77.decoder_uGUIv1 import *\nfrom DCF77.local_time_calendar_uGUI import LocalTimeCalendar\n\n\nclass DCF_device():\n def __init__(self,key_in_gpio):\n self.local_time = LocalTimeCalendar()\n self.dcf_decoder = DCF_Decoder(key_in_gpio, self.local_time)\n \n def next_second(self):\n self.local_time.next_second() \n \n def get_local_time(self):\n # Format:\n ## localtime : t[0]:year, t[1]:month, t[2]:mday, t[3]:hour, t[4]:minute, t[5]:second, t[6]:weekday, t[7]:time_zone\n return self.local_time.get_raw_time_and_date()\n \n def get_status(self):\n ts = self.dcf_decoder.get_time_status()\n time_state = ts[1]\n if ts[1] == SYNC:\n ts_text = \"sync'd\"\n elif time_state == SYNC_IN_PROGRESS:\n ts_text = \"in progress\"\n elif time_state == SYNC_FAILED:\n ts_text = \"frame fail\"\n elif time_state == OUT_OF_SYNC:\n ts_text = \"out of sync\"\n else:\n ts_text = \"init\"\n ss = self.dcf_decoder.get_signal_status()\n bit_rank = ss[0]\n last_bit = ss[1]\n return (time_state,ts_text,bit_rank,last_bit)\n # format\n ## time_status = [self._status_controller.time_event, self._status_controller.time_state, self._status_controller.error_message]\n ## signal_status = [self._status_controller.last_received_frame_bit_rank, self._status_controller.last_received_frame_bit,\n ## self._status_controller.signal_event, self._status_controller.signal_state]\n\n\nif __name__ == \"__main__\":\n \n # D0 = Probe(26) # -- time_trigger \n # D1 = Probe(16) # DCF_Decoder._DCF_clock_IRQ_handler\n # D2 = Probe(17) # DCF_Decoder.frame_decoder\n # D3 = Probe(18) # \n # D4 = Probe(19) # _StatusController.signal_received\n # D5 = Probe(20) # _StatusController.signal_timeout\n # D6 = Probe(21) # -- time_status == SYNC\n # D7 = Probe(27) #\n \n #-------------------------------------------------------------------------- \n TONE_GPIO = const(7) # the GPIO where DCF signal is received by MCU\n dcf = DCF_device(TONE_GPIO) \n \n #-------------------------------------------------------------------------- \n def timer_IRQ(timer):\n one_second_time_event.set()\n\n async def time_trigger():\n while True:\n D0.off()\n await one_second_time_event.wait()\n D0.on()\n one_second_time_event.clear()\n dcf.local_time.next_second()\n print(\"\\t\"*5, dcf.get_local_time())\n print(dcf.get_status())\n\n Timer(mode=Timer.PERIODIC, freq=1, callback=timer_IRQ)\n# self.one_second_time_event = uasyncio.ThreadSafeFlag()\n one_second_time_event = uasyncio.ThreadSafeFlag()\n\n asyncio.create_task(dcf.dcf_decoder.DCF_signal_monitoring())\n asyncio.create_task(dcf.dcf_decoder.frame_decoder())\n asyncio.create_task(time_trigger()) \n scheduler = uasyncio.get_event_loop()\n scheduler.run_forever()\n\n","repo_name":"xiansnn/DCF77-clock","sub_path":"DCF77_microGUI/DCF77_device.py","file_name":"DCF77_device.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"42085031046","text":"# Coding Challenge 2\r\n### Chelsea Lizardo\r\n### NRS 528\r\n#\r\n#\r\n#Using these lists:\r\n\r\nlst1 = ['dog', 'cat', 'rabbit', 'hamster', 'gerbil']\r\nlst2 = ['dog', 'hamster', 'snake']\r\n\r\n# 1.) Determine which items are present in both lists.\r\n#Define intersection of the two list using def and intersection functions\r\n\r\ndef intersection(lst1, lst2):\r\n# create list 3 using a for loop that iterates through to find common items in both lists\r\n lst3 = [value for value in lst1 if value in lst2]\r\n return lst3\r\n#print intersection\r\nprint(intersection(lst1, lst2))\r\n\r\n# 2.) Determine which items do not overlap in the lists.\r\ndef intersection2(lst1, lst2):\r\n# create list 3 using a for loop that iterates through to find common items in both lists\r\n# use logic gate \"not\" to perform opposite operation on list\r\n lst3 = [value for value in lst1 if value not in lst2]\r\n return lst3\r\n#print intersection\r\nprint(intersection2(lst1, lst2))","repo_name":"chelsealizardo/codingChallenge2","sub_path":"codingChallenge2/List Overlap.py","file_name":"List Overlap.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8595873284","text":"import requests\nimport time, calendar\nimport datetime as dt\nimport os\nimport subprocess\nimport json\nimport threading\nimport jinja2\nimport markdown\n\n#Custom console print\ndef console_log(message: str) -> None:\n currentDT = calendar.timegm(time.gmtime())\n currentDT = dt.datetime.fromtimestamp(currentDT).strftime('%m/%d/%Y - %H:%M:%S')\n\n print(f\"[{currentDT}] [{message}]\")\n\n#Get saved posts from user's reddit rss feed\ndef get_posts() -> list:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36'}\n saved_data = requests.get(rss_url, headers=headers).json()\n posts = saved_data['data']['children']\n return posts\n\n#Get file names from subreddit folder\ndef get_files(subreddit: str) -> list:\n path = f\"downloads/{subreddit}\"\n os.makedirs(path, exist_ok = True)\n files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n files = [x.split(\".\")[0] for x in files]\n files = [x.split(\"_\")[0] for x in files]\n\n return files\n\ndef download_file(media_url: str, filename: str) -> None:\n try:\n media = requests.get(media_url, timeout=5)\n except Timeout:\n return download_file(media_url, filename)\n if media.status_code == 200:\n open(filename, 'wb').write(media.content)\n\n# Render template with jinja2\ndef render_template(file_name: str, **context) -> object:\n return jinja2.Environment(\n loader=jinja2.FileSystemLoader('templates/')\n ).get_template(file_name).render(context)\n\ndef process_download(post: dict, gallery_data: list) -> None:\n title = post['data']['title']\n source = post['data']['domain']\n media_url = post['data']['url']\n post_id = post['data']['id']\n subreddit = post['data']['subreddit']\n selftext = post['data']['selftext']\n\n if source == 'i.imgur.com':\n media_url = media_url.replace('.gifv', '.mp4')\n file_extension = media_url.split(\".\")[-1]\n file_name = f'downloads/{subreddit}/{post_id}.{file_extension}'\n download_file(media_url, file_name)\n\n elif source == 'i.redd.it':\n file_extension = media_url.split(\".\")[-1]\n file_name = f'downloads/{subreddit}/{post_id}.{file_extension}'\n download_file(media_url, file_name)\n \n elif source == 'gfycat.com':\n file_name = f'downloads/{subreddit}/{post_id}.mp4'\n gfycat_id = media_url.split(\"/\")[-1]\n media_url = f\"https://giant.gfycat.com/{gfycat_id}.mp4\"\n download_file(media_url, file_name)\n \n elif source == f\"self.{subreddit}\":\n post_html = markdown.markdown(selftext)\n\n # Get date created (OP)\n timestamp = post['data']['created']\n date_time = dt.datetime.fromtimestamp(timestamp)\n op_time_string = date_time.strftime(\"%d %B, %Y | %H:%M:%S\")\n\n html = render_template('post_template.html', post = post, post_html = post_html, op_time_string = op_time_string)\n with open(f\"downloads/{subreddit}/{post_id}.html\",\"w\", encoding='utf-8') as f:\n f.write(html)\n \n elif source == 'reddit.com':\n index = 1\n for image in gallery_data:\n file_extension = \"jpg\" if image['image_type'] == \"image/jpg\" else \"png\"\n download_file(f\"https://i.redd.it/{image['media_id']}.{file_extension}\", f\"downloads/{subreddit}/{post_id}_{index}.{file_extension}\")\n index = index + 1\n \n else:\n subprocess.call('yt-dlp \"'+media_url+'\" -o \"/downloads/'+subreddit+'/'+post_id+'.mp4\" --quiet', shell=False)\n\n#Run main process\ndef main(f_stop: object) -> None:\n console_log(\"Getting saved posts...\")\n posts = get_posts()\n \n console_log(\"Downloading posts...\")\n for post in posts:\n #If post is a link (not comment, account, subreddit, etc.)\n if post['kind'] == 't3':\n gallery_data = []\n if \"gallery_data\" in post['data']:\n images = post['data']['gallery_data']['items']\n for image in images:\n gallery_data.append({\"media_id\": image['media_id'], \"image_type\": post['data']['media_metadata'][image['media_id']]['m']})\n\n #get previously downloaded file names (so we don't re-download the same posts)\n subreddit = post['data']['subreddit']\n files = get_files(subreddit)\n\n post_id = post['data']['id']\n if not post_id in files:\n console_log(f\"Downloading - Post ID : {post_id}\")\n process_download(post, gallery_data)\n \n console_log(f\"Next check in {delay} seconds\")\n print()\n if not f_stop.is_set():\n threading.Timer(delay, main, [f_stop]).start()\n\nif __name__ == \"__main__\":\n with open(\"settings.json\") as f:\n settings = json.load(f)\n rss_url = settings['rss_uri']\n delay = settings['delay']\n \n threading.Thread(target=main, args=(threading.Event(),)).start()","repo_name":"abe287/reddit-auto-saver","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1290366034","text":"from unittest import TestCase\nfrom sklearn import linear_model\nimport numpy as np\n\nfrom ml.regression.regression import LinearModel\n\nclass TestLinearModel(TestCase):\n\n X = np.array([-5, -3, -1, 1, 3, 5]).reshape(-1,1)\n y = np.array([1, 2, 3, 4, 5, 6])\n\n X_test = np.array([-4, -2, 2, 4]).reshape(-1,1)\n y_test = np.array([1.5, 2.5, 4.5, 5.5])\n\n def test_init(self):\n #Test initialization for each model\n linear = LinearModel('linear')\n eln = LinearModel('elasticnet') \n elncv = LinearModel('elasticnetcv')\n bayes = LinearModel('bayes_ridge') \n orth = LinearModel('orthogonal') \n orthcv = LinearModel('orthogonalcv') \n theil = LinearModel('theil') \n sgd = LinearModel('sgd')\n pct = LinearModel('perceptron')\n p_a = LinearModel('passive-agressive')\n\n self.assertIsInstance(linear.get_model(), linear_model.LinearRegression)\n self.assertIsInstance(eln.get_model(), linear_model.ElasticNet)\n self.assertIsInstance(elncv.get_model(), linear_model.ElasticNetCV)\n self.assertIsInstance(bayes.get_model(), linear_model.BayesianRidge)\n self.assertIsInstance(orth.get_model(), linear_model.OrthogonalMatchingPursuit)\n self.assertIsInstance(orthcv.get_model(), linear_model.OrthogonalMatchingPursuitCV)\n self.assertIsInstance(theil.get_model(), linear_model.TheilSenRegressor)\n self.assertIsInstance(sgd.get_model(), linear_model.SGDRegressor)\n self.assertIsInstance(pct.get_model(), linear_model.Perceptron)\n self.assertIsInstance(p_a.get_model(), linear_model.PassiveAggressiveRegressor)\n \n \n def test_params(self):\n linear = LinearModel('linear', n_jobs=1, normalize=False)\n \n params = linear.get_params()\n self.assertEqual(params['n_jobs'], 1)\n self.assertEqual(params['normalize'], False)\n \n params['n_jobs'] = 5\n\n linear.set_params(**params)\n\n self.assertEqual(linear.get_params()['n_jobs'], 5)\n \n def test_linear(self):\n linear = LinearModel('linear', X=self.X, y=self.y)\n\n #Test predictions in training set\n self.assertEqual(linear.get_model().predict(5), 6)\n self.assertEqual(linear.get_model().predict(-5), 1)\n\n\n def test_scores_compare(self):\n print(\"Comparing linear and elastic net\")\n \n linear = LinearModel('linear', X=self.X, y=self.y)\n eln = LinearModel('elasticnet', X=self.X, y=self.y)\n\n score_linear = linear.score(self.X_test, self.y_test)\n score_eln = eln.score(self.X_test, self.y_test)\n\n print(\"Average accuracy:\")\n\n print('linear: ' + str(score_linear), end=' ')\n print('eln: ' + str(score_eln))\n \n comparison = linear.compare(eln, self.X_test, self.y_test)\n\n print(\"Multiple metrics\\nmetric | linear | eln\")\n\n for metric, (linear, eln) in comparison.items():\n print(metric + ' ' + str(linear) + ' ' + str(eln))\n \n","repo_name":"Booligans/BMLF","sub_path":"src/unittest/python/tests/ml/regression/regression_tests.py","file_name":"regression_tests.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"14276187649","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 16 14:10:20 2020\r\n\r\n@author: noahs\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport Regress as r\r\nimport func\r\nm,b = 0,0\r\nfig, (ax1,ax2 )= plt.subplots(2, 1)\r\n\r\ndef scatter_Plot(x, Y, all_L, avg_L, avgP, x_L,y_L ):\r\n\r\n \r\n ax1.scatter(x,Y, label = all_L, s = 5, c = 'b')\r\n ax1.scatter(avgP[0],avgP[1], label = avg_L, s = 25, c = 'r') \r\n ax1.set(title = 'Fifa Data Set', xlabel = x_L, ylabel = y_L)\r\n fig.set_size_inches(12,12)\r\n ax1.legend()\r\n #bs = np.linspace(model.get_m()-2*model.get_m(),model.get_m()+2*model.get_m())\r\n slopes = np.arange(model.get_m()-2*model.get_m(),model.get_m()+2*model.get_m(),model.get_m()/5)\r\n\r\n SSs = []\r\n print('Please wait...')\r\n for i in range(len(slopes)):\r\n SSs.append(func.SS(x,Y,slopes[i], avgP[1] - slopes[i]*avgP[0]))\r\n ax2.plot(slopes, SSs)\r\n ax2.set(title = 'Sum of Squared Error', xlabel = 'Slope', ylabel = 'SS')\r\ndef line_Plot(x):\r\n x = np.linspace(0,np.max(x))\r\n ax1.plot(x, m*x+b, c = 'y')\r\n\r\n\r\ncolx = 'Overall' #choose columns to compare\r\ncolY = 'Potential'\r\n\r\nOdf = pd.read_csv('data.csv') #import data set\r\ndf = Odf.loc[:,'Crossing':'GKReflexes']\r\ndf = pd.concat([df, Odf.loc[:,['Overall','Potential']]], axis = 1)\r\ni = 100000 #number of points to compare\r\nclean_df = func.clean_Data(df,i,colx, colY,False, False) #Clean the data\r\n\r\nx = clean_df[colx] \r\nY = clean_df[colY] \r\n\r\n\r\n\r\nmodel = r.best_Fit() #Create a best fit model\r\nmodel.fit(x, Y) #Fit with the data\r\n\r\n\r\nm = model.slope_Best() #Use model to find slope of best fit line\r\nb = model.b_Best() #Use model to find y intercept of best fit line\r\navgP = model.get_avgP() #Get the average point\r\n\r\n\r\nscatter_Plot(x,Y, 'Individual', 'Average individual',avgP, colx, colY )\r\nline_Plot(x)\r\n\r\n\r\nprint('\\n\\nThe best fit line for this set of data is: y =', m, 'x +', b )\r\n","repo_name":"noahsolomon0518/Data-Science-Projects","sub_path":"Linear Regression/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71556579193","text":"'''\n2023.11.1\n4가지 연산을 이용하여 1 만들기\nhttps://www.codetree.ai/missions/2/problems/make-one-using-four-operations?&utm_source=clipboard&utm_medium=text\n# 실수 1: 가장 작은 값과 큰 값의 경우를 생각 안함 => TC를 만들 때 가장 작은 경우, 큰 경우를 생각하기\n# 실수 2: visited 배열의 크기를 N+1로 해서, 값이 N+1, N+2, N+3으로 커질 때 index 범위 오류 남\n'''\n\nfrom collections import deque\n\nN = int(input())\nINF = 10000000 # viisted 최대 범위 숫자\nvisited = [False for _ in range(INF)] # visited[i]: i번 숫자를 만든 적이 있는지\nq = deque()\n\n# -1, +1, /2, /3을 한 값을 구해주는 함수\ndef make_next(cur, i): # cur숫자에서 i번 연산을 수행\n if i == 0:\n return cur - 1\n elif i == 1:\n return cur + 1\n elif i == 2:\n if cur % 2 != 0:\n return -1\n return cur // 2\n else:\n if cur % 3 != 0:\n return -1\n return cur // 3\n\nresult = 0 # 출력값\ndef bfs():\n next_num = 0\n while q:\n num, cnt = q.popleft()\n if num == 1: return cnt # 만약 처음 값이 1이라면 아무런 연산을 수행하지 않아야 하므로 바로 리턴\n for i in range(4):\n next_num = make_next(num, i) # -1, 1, /2, /3 연산을 수행한 뒤의 값\n if next_num == -1: # 만약 나누는 연산인 경우 나누어 떨어지지 않는다면 무시\n continue\n if next_num == 1: # 1을 처음으로 만들었다면\n break # for문 나가기\n \n if not visited[next_num]: # 해당 숫자를 만든 적이 없다면\n visited[next_num] = True # 해당 값을 방문했다고 표시\n q.append((next_num, cnt+1))\n if next_num == 1: # 1을 만들었다면\n return cnt + 1 # 이전 카운트+1을 리턴\n\nvisited[N] = True # 입력값 방문처리\nq.append((N,0)) # 입력값과 연산 수행횟수 큐에 넣기\nprint(bfs())\n\n'''\n해설\n- 이제까지 BFS를 생각할 때는 위치를 노드, 간선은 인접한 칸으로 가는 선 이정도로 생각했음\n- 이 문제에서는 각 숫자들이 노드, 연산이 간선으로 간주하여 그래프를 그릴 수 있음\n => 가중치가 전부 1인 그래프가 주어졌을 때 정점 n으로부터 정점 1까지의 최단거리를 구하는 문제=BFS\n- 정점을 1에서 2n-1번까지 사용해 최단거리를 구해야 함\n => n보다 큰 값을 만든 다음 2/3으로 숫자를 나눠 1로 더 빨리 갈 수 있음\n => -1 연산을 n-1번 반복하면 항상 1이 됨(답은 최대 n-1)\n => n에 +1 연산을 n-1번 연산을 했을 때 = 2n-1이 됨, 이 최댓값까지만 정점을 만들어 최단거리를 구하면 됨\n- 시간복잡도 = 정점 총 2n-1, 각 정점 당 최대 4개의 정점 = O(2n-1*4)=o(n)\n'''\n\nfrom collections import deque\nimport sys\nimport enum\n\nOPERATOR_NUM = 4\nINT_MAX = sys.maxsize\n\nclass OPERATOR(enum.Enum):\n SUBTRACT = 0\n ADD = 1\n DIV2 = 2\n DIV3 = 3\n\nn = int(input())\nans = INT_MAX\n\nq = deque()\nvisited = [False for _ in range(2*n)]\n\n# step[i]: 정점 n에서 시작해서 정점 i 지점에 도달하기 위한 최단거리를 기록\nstep = [0 for _ in range(2*n)]\n\n# num 값에 해당 operator를 사용할 수 있는지 판단\n# 2로 나누거나 3으로 나누려는 경우 num이 해당 값으로 나누어 떨어질 때만\n# 해당 연산을 사용 가능\ndef possible(num, op):\n if op == OPERATOR.SUBTRACT.value or op == OPERATOR.ADD.value:\n return True\n elif op == OPERATOR.DIV2.value:\n return num % 2 == 0\n else:\n return num % 3 == 0\n\n# num에 op연산을 수행했을 때의 결과를 반환\ndef calculate(num, op):\n if op == OPERATOR.SUBTRACT.value:\n return num - 1\n elif op == OPERATOR.ADD.value:\n return num + 1\n elif op == OPERATOR.DIV2.value:\n return num // 2\n else:\n return num // 3\n\n# 1에서 2n-1사이의 숫자만 이용해도 올바른 답을 구할 수 있으므로\n# 그 범위 안에 들어오는 숫자인지 확인\ndef in_range(num):\n return 1 <= num and num <= 2*n-1\n\n# 1에서 2n-1사이의 숫자이면서 아직 방문한 적이 없다면 가야함\ndef can_go(num):\n return in_range(num) and not visited[num]\n\n# queue에 새로운 위치를 추가하고 방문 여부를 표시\ndef push(num, new_step):\n q.append(num)\n visited[num] = True\n step[num] = new_step\n\n# BFS를 통해 최소 연산 횟수를 구하기\ndef find_min():\n global ans\n \n # queue에 남은 것이 없을 때까지 반복\n while q:\n # queue에서 가장 먼저 들어온 원소를 빼기\n curr_num = q.popleft()\n \n # queue에서 뺸 원소의 위치를 기준으로 4가지 연산을 적용\n for i in range(OPERATOR_NUM):\n # 연산을 적용할 수 없는 경우라면 패스\n if not possible(curr_num, i):\n continue\n new_num = calculate(curr_num, i)\n # 아직 방문한 적이 없으면서 갈 수 있는 곳이라면 새로 queue에 넣어줌\n if can_go(new_num):\n # 최단 거리는 이전 최단거리에 1이 증가\n push(new_num, step[curr_num] + 1)\n \n # 1번 정점까지 가는 데 필요한 최소 연산 횟수를 답으로 기록\n ans = step[1]\n\n# BFS를 통해 최소 연산 횟수를 구하기\npush(n,0)\nfind_min()\nprint(ans)","repo_name":"YJ243/python_codingTest","sub_path":"chap5_DFS_BFS/codetree_makeone.py","file_name":"codetree_makeone.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25013190767","text":"from __future__ import annotations\nimport json\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from pathlib import Path\n\nfrom ..job_distributor import SortedWorkerHandler, QMLDemo, ReturnTypes\n\n\ndef build_strategy_matrix_offsets(\n num_workers: int,\n sphinx_examples_dir: Path,\n sphinx_examples_execution_times_file_loc: str = None,\n glob_pattern: str = \"*.py\",\n) -> ReturnTypes.DictSortedWorkerHandler:\n \"\"\"\n Generates a JSON Dict of the following schema:\n\n {\n \"num_workers\": \n \"workers\": [\n {\n \"load\": \n \"tasks\": [\n {\n \"name\": \n \"load\": \n }\n ]\n }\n ]\n }\n\n The jobs are distributed across the workers as evenly as possible. To see the methodology of the distribution,\n please see ../../job_distributor.py. Details are there.\n\n This function also adds 1 to the load of all demos. This is done to handle the case where you may not know the\n load of the demos or unable to fetch that information. This would make the SortedWorkerHandler job to distribute\n the jobs evenly across all the workers, if all the demos had a load of 0, then they would all go into 1 worker\n as the SortedWorkerHandler would see them all not requiring any power to build.\n\n\n Args:\n num_workers: The total number of nodes that needs to be spawned\n sphinx_examples_dir: The directory where all the sphinx demonstrations reside\n sphinx_examples_execution_times_file_loc: The path to the JSON file\n containing the name of demos to execution time\n glob_pattern: The pattern use to glob all demonstration files inside sphinx_examples_dir. Defaults to \"*.py\"\n\n Returns:\n ReturnTypes.DictSortedWorkerHandler\n \"\"\"\n if sphinx_examples_execution_times_file_loc is not None:\n with open(sphinx_examples_execution_times_file_loc) as fh:\n execution_times = json.load(fh)\n else:\n execution_times = {}\n job_distribution_handler = SortedWorkerHandler(num_workers=num_workers)\n for sphinx_examples_file_name in sphinx_examples_dir.glob(glob_pattern):\n # Adding +1 to load of each demo as we want the load on all demos to be >1 in order for distribution\n # To work well\n job = QMLDemo(\n name=sphinx_examples_file_name.name,\n load=execution_times.get(sphinx_examples_file_name.name, 0) + 1,\n )\n job_distribution_handler.add_task(job)\n job_distribution_handler.assign_tasks_to_workers()\n\n # Drop all workers with a load of 0\n job_distribution_worker_list = [ worker for worker in job_distribution_handler.asdict()[\"workers\"] if worker[\"load\"] ]\n return {\n \"num_workers\": len(job_distribution_worker_list),\n \"workers\": job_distribution_worker_list\n }\n","repo_name":"PennyLaneAI/qml","sub_path":".github/workflows/qml_pipeline_utils/qml_pipeline_utils/services/build_strategy_matrix.py","file_name":"build_strategy_matrix.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":439,"dataset":"github-code","pt":"95"} +{"seq_id":"72451545593","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport os\nfrom urllib import request\nfrom urllib.parse import quote\nimport string\nimport json\nfrom bs4 import BeautifulSoup\n'''\nCreated on 2017/12/27\n\n@author: liscmb\n'''\nresponse = request.urlopen(\"https://movie.douban.com/chart\")\nhtml = response.read().decode(\"utf-8\")\nsoup = BeautifulSoup(html,\"lxml\")\n\n\nprint(\"OK\")\n\nfw = open(\"./filelist\", \"w\",encoding='utf-8')\n\n\nfor child in soup.body.find_all(href=re.compile(\"typerank\\?type_name\")):\n print(child.text)\n fw.write(child.text+\"\\n\")\n typeNo = re.findall(r\"type=(.+?)&\",str(child))[0]\n response3 = request.urlopen('https://movie.douban.com/j/chart/top_list?type=' +typeNo+ '&interval_id=100%3A90&action=&start=0&limit=20')\n text = json.loads(response3.read().decode(\"utf-8\"))\n print(text)\n print(\"Rank\\tRating\\tTitle\\tUrl\")\n fw.write(\"Rank\\tRating\\tTitle\\tUrl\"+\"\\n\")\n for mo in text:\n info = str(mo.get('rank')) + \"\\t\" + mo.get('rating')[0] + \"\\t\" + mo.get('title') + \"\\t\" + mo.get('url')+\"\\n\"\n print(info)\n fw.write(info)\n\nfw.close()\n# s = quote('https://movie.douban.com'+ str(child.get('href')), safe=string.printable)\n# print(s)\n# response2 = request.urlopen(s)\n# html2 = response2.read().decode('utf-8')\n# soup2 = BeautifulSoup(html2,\"lxml\")\n# print(soup2)\n# for child2 in soup2.body.find_all(class_=re.compile(\"movie-list-item[\\S\\s]+watched\")):\n# print(\"ok2\")\n# print(child2)\n\n\n\n# for span in soup.body.find(class_='types'):\n# for child in span.find_all(href=True):\n# print(child)\n\n# for child in soup.head.stripped_strings:\n# print(repr(child))\n#\n# for child2 in soup.body.find(class_='types').children:\n# for child3 in child2:\n# print(repr(child3))\n","repo_name":"douyacai911/DoubanMovieTop20byType","sub_path":"GetRank.py","file_name":"GetRank.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12812026084","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\n\nfrom datetime import date, timedelta\n\nfrom character.models import Spieler\n\nfrom .models import *\nfrom .forms import ProposalForm\n\n\ndays = [\"Mo\", \"Di\", \"Mi\", \"Do\", \"Fr\", \"Sa\", \"So\"]\nmonths = [\"Januar\", \"Februar\", \"März\", \"April\", \"Mai\", \"Juni\", \"Juli\", \"August\", \"September\", \"Oktober\", \"November\", \"Dezember\"]\n\ndef get_1st_day_of_next_month(date_in_month: date):\n next_month = (date_in_month.month + 1) % 12\n next_year = date_in_month.year\n\n if next_month == 0: next_month = 12\n if next_month == 1: next_year += 1\n\n return date(next_year, next_month, 1)\n\n\ndef get_month(date_in_month: date):\n this_month = date_in_month.month\n this_year = date_in_month.year\n\n next_month_day = get_1st_day_of_next_month(date_in_month)\n\n return {\n \"month_name\": months[date_in_month.month-1],\n \"month\": date_in_month.month,\n \"year\": date_in_month.year,\n \"first_weekday\": date(this_year, this_month, 1).weekday(),\n \"last_day\": (next_month_day - timedelta(days=1)).day\n }\n\n\n@login_required\ndef index(request):\n\n if request.method == \"POST\":\n form = ProposalForm(request.POST)\n if form.is_valid():\n proposal = form.save(commit=False)\n \n proposal.player = request.user\n \n chosen_date, created = Day.objects.get_or_create(date=request.POST[\"date\"])\n proposal.day = chosen_date\n \n prev_player = chosen_date.proposal_set.order_by(\"-order\").first()\n proposal.order = prev_player.order+1 if prev_player else 1\n proposal.save()\n\n return redirect(\"planner:index\")\n\n\n today = date.today()\n \n day_in_next_month = get_1st_day_of_next_month(today)\n day_in_far_month = get_1st_day_of_next_month(day_in_next_month)\n\n context = {\n \"topic\": \"Terminplaner\",\n \"weekdays\": days,\n \"today\": today,\n \"this_month\": get_month(today),\n \"next_month\": get_month(day_in_next_month),\n \"far_month\": get_month(day_in_far_month),\n \"days\": [day.to_dict() for day in Day.objects.filter(date__gte=today)],\n\n \"form\": ProposalForm()\n }\n\n return render(request, \"planner/index.html\", context)\n","repo_name":"GorenPnP/pnpWebsite","sub_path":"ppServer/planner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"12133914692","text":"import os\nimport sys\n\nimport torch\nimport torch.optim as optim \nfrom torch import nn\n\nfrom config import cfg\nfrom models import default_model_path, init_models_faulty, init_models\n\n__all__ = [\"training\"]\n\ndebug = False\ntorch.manual_seed(0)\n\nclass WarmUpLR(optim.lr_scheduler._LRScheduler):\n \"\"\"warmup_training learning rate scheduler\n Args:\n optimizer: optimzier(e.g. SGD)\n total_iters: totoal_iters of warmup phase\n \"\"\"\n def __init__(self, optimizer, total_iters, last_epoch=-1):\n\n self.total_iters = total_iters\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self):\n \"\"\"we will use the first m batches, and set the learning\n rate to base_lr * m / total_iters\n \"\"\"\n return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]\n\n\ndef training(\n trainloader,\n arch,\n dataset,\n in_channels,\n precision,\n retrain,\n checkpoint_path,\n force,\n device,\n fl,\n ber,\n pos,\n):\n\n \"\"\"\n Apply quantization aware training.\n :param trainloader: The loader of training data.\n :param arch: A string. The architecture of the model would be used.\n :param dataset: A string. The name of the training data.\n :param in_channels: An int. The input channels of the training data.\n :param precision: An int. The number of bits would be used to quantize\n the model.\n :param retrain: A boolean. Start from checkpoint.\n :param checkpoint_path: A string. The path that stores the models.\n :param force: Overwrite checkpoint.\n :param device: A string. Specify using GPU or CPU.\n \"\"\"\n\n model, checkpoint_epoch = init_models(arch, 3, precision, retrain, checkpoint_path, dataset) # Quantization Aware Training without using bit error!\n\n print(\"Training with Learning rate %.4f\" % (cfg.learning_rate))\n\n if dataset == 'cifar100': \n print('cifar100')\n opt = optim.SGD(model.parameters(), lr=cfg.learning_rate, momentum=0.9)\n #iter_per_epoch = len(trainloader)\n #warmup_scheduler = WarmUpLR(opt, iter_per_epoch * 1) # warmup = 1\n #train_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2)\n else:\n opt = optim.SGD(model.parameters(), lr=cfg.learning_rate, momentum=0.9)\n\n model = model.to(device)\n from torchsummary import summary\n if dataset == 'imagenet128':\n print('imagenet128')\n summary(model, (3, 128, 128))\n elif dataset == 'imagenet224':\n print('imagenet224')\n summary(model, (3, 224, 224))\n else:\n summary(model, (3, 32, 32))\n # model = torch.nn.DataParallel(model)\n torch.backends.cudnn.benchmark = True\n\n for x in range(checkpoint_epoch + 1, cfg.epochs):\n\n print(\"Epoch: %03d\" % x)\n\n running_loss = 0.0\n running_correct = 0\n for batch_id, (inputs, outputs) in enumerate(trainloader):\n \n inputs = inputs.to(device)\n outputs = outputs.to(device)\n\n opt.zero_grad()\n\n # Store original model parameters before\n # quantization/perturbation, detached from graph\n if precision > 0:\n list_init_params = []\n with torch.no_grad():\n for init_params in model.parameters():\n list_init_params.append(init_params.clone().detach())\n\n if debug:\n if batch_id % 100 == 0:\n print(\"initial params\")\n print(model.fc2.weight[0:3, 0:3])\n print(model.conv1.weight[0, 0, :, :])\n\n model.train()\n model_outputs = model(inputs) # pylint: disable=E1102\n\n _, preds = torch.max(model_outputs, 1)\n outputs = outputs.view(\n outputs.size(0)\n ) # changing the size from (batch_size,1) to batch_size.\n\n if precision > 0:\n if debug:\n if batch_id % 100 == 0:\n print(\"quantized params\")\n print(model.fc2.weight[0:3, 0:3])\n print(model.conv1.weight[0, 0, :, :])\n\n loss = nn.CrossEntropyLoss()(model_outputs, outputs)\n\n # Compute gradient of perturbed weights with perturbed loss\n loss.backward()\n\n # restore model weights with unquantized value\n # This step is not important because list_init_params == model.parameters()\n # Therefore, apply gradients on model.parameters() directly is OK.\n if precision > 0:\n with torch.no_grad():\n for i, restored_params in enumerate(model.parameters()):\n restored_params.copy_(list_init_params[i])\n\n if debug:\n if batch_id % 100 == 0:\n print(\"restored params\")\n print(model.fc2.weight[0:3, 0:3])\n print(model.conv1.weight[0, 0, :, :])\n\n # update restored weights with gradient\n opt.step()\n #if dataset == 'cifar100': \n # if x <= 1: # warmup = 1\n # warmup_scheduler.step()\n # else:\n # train_scheduler.step()\n # lr_scheduler.step()\n\n running_loss += loss.item()\n running_correct += torch.sum(preds == outputs.data)\n\n accuracy = running_correct.double() / (len(trainloader.dataset))\n print(\"For epoch: {}, loss: {:.6f}, accuracy: {:.5f}\".format(\n x, \n running_loss / len(trainloader.dataset), \n accuracy\n )\n )\n if (x+1)%10 == 0:\n\n model_path = default_model_path(\n cfg.model_dir, arch, dataset, precision, fl, ber, pos, x+1\n )\n\n if not os.path.exists(os.path.dirname(model_path)):\n os.makedirs(os.path.dirname(model_path))\n\n if os.path.exists(model_path) and not force:\n print(\"Checkpoint already present ('%s')\" % model_path)\n sys.exit(1)\n\n torch.save(\n {\n \"epoch\": x,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": opt.state_dict(),\n \"loss\": running_loss / batch_id,\n \"accuracy\": accuracy,\n },\n model_path,\n )\n","repo_name":"IBM/NeuralFuse","sub_path":"zs_train.py","file_name":"zs_train.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"19639219130","text":"#!/usr/bin/env python\n#-*- coding: UTF-8 -*-\n\nimport random\nimport sys\n\nextracted_lines = 700\n\nlines = []\nfor line in sys.stdin:\n lines.append(line)\nprint(\" \".join(str(s) for s in random.sample(lines, extracted_lines)))\n","repo_name":"seppilee/eval_grammar","sub_path":"get_random_line.py","file_name":"get_random_line.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12271925067","text":"from __future__ import annotations\n\nimport sys\nimport types\nimport typing\nfrom typing import Any, Callable, Optional\n\nfrom dataclass_array import array_dataclass\nfrom dataclass_array import field_utils\nfrom dataclass_array.typing import TypeAlias\nfrom etils import array_types as array_types_lib\nimport typing_extensions # TODO(py38): Remove\n\n_LeafFn = Callable[[TypeAlias], None]\n\n_NoneType = type(None)\n\n\ndef _visit_leaf(hint: TypeAlias, leaf_fn: _LeafFn):\n \"\"\"Leaf node.\"\"\"\n if hint == _NoneType: # Normalize `None`\n hint = None\n return leaf_fn(hint)\n\n\ndef _visit_union(hint: TypeAlias, leaf_fn: _LeafFn):\n \"\"\"Recurse in `Union[x, y]`, `x | y`, or `Optional[x]`.\"\"\"\n item_hints = typing_extensions.get_args(hint)\n for item_hint in item_hints:\n _visit(item_hint, leaf_fn)\n\n\ndef _visit(hint: TypeAlias, leaf_fn: _LeafFn):\n \"\"\"Recurse in the type annotation tree.\"\"\"\n origin = typing_extensions.get_origin(hint)\n visit_fn = _ORIGIN_TO_VISITOR.get(origin, _visit_leaf)\n visit_fn(hint, leaf_fn)\n\n\n# Currently, only support `Union` and `Optional` but could be extended\n# to `dict`, `list`,...\n_ORIGIN_TO_VISITOR = {\n typing.Union: _visit_union,\n None: _visit_leaf, # Default origin\n}\nif sys.version_info >= (3, 10):\n _ORIGIN_TO_VISITOR[types.UnionType] = _visit_union # In Python 3.10+: x | y\n\n\ndef _get_leaf_types(hint: TypeAlias) -> list[type[Any]]:\n \"\"\"Extract the inner list of the types (`Optional[A] -> [A, None]`).\"\"\"\n all_types = []\n\n def _collect_leaf_types(hint):\n all_types.append(hint)\n\n _visit(hint, leaf_fn=_collect_leaf_types)\n\n return all_types\n\n\ndef get_array_type(hint: TypeAlias) -> Optional[Any]:\n \"\"\"Returns the array type, or `None` if no type was detected.\n\n Example:\n\n ```python\n get_array_type(f32[..., 3]) -> f32[..., 3]\n get_array_type(dca.Ray) -> dca.Ray['...']\n get_array_type(Optional[dca.Ray]) -> dca.Ray['...']\n get_array_type(dca.Ray | dca.Camera | None) -> dca.DataclassArray\n get_array_type(Any) -> None # Any not an array type\n get_array_type(dca.Ray | int) -> None # int not an array type\n get_array_type(list[dca.Ray]) -> None # list not an array type\n get_array_type(dca.Ray | f32['... 3']) -> NotImplementedError (unsupported)\n ```\n\n Args:\n hint: The typing annotation\n\n Returns:\n The array type, or `None` if not type was detected\n \"\"\"\n leaf_types = _get_leaf_types(hint)\n # Filter `None` element (e.g. `Optional[dca.Ray]`)\n leaf_types = [l for l in leaf_types if l is not None]\n if not leaf_types:\n return None\n\n dc_types = []\n array_types = []\n other_types = []\n for leaf in leaf_types:\n if field_utils.DataclassWithShape.is_dca(leaf):\n dc_types.append(field_utils.DataclassWithShape.from_hint(leaf))\n elif isinstance(leaf, array_types_lib.ArrayAliasMeta):\n array_types.append(leaf)\n else:\n other_types.append(leaf)\n\n if other_types: # Non-array type\n return None\n if array_types and dc_types:\n raise NotImplementedError(\n f'{hint} mix dataclass and array. Please open an issue if you need '\n 'this feature.'\n )\n if dc_types:\n if len(dc_types) > 1:\n # Validate the inner shape\n common_shapes = {x.shape for x in dc_types}\n if len(common_shapes) != 1:\n raise NotImplementedError(\n f'{hint} mix dataclass with different inner shape. Please open an '\n 'issue if you need this feature.'\n )\n (common_shape,) = common_shapes\n return field_utils.DataclassWithShape(\n cls=array_dataclass.DataclassArray,\n shape=common_shape,\n )\n else:\n return dc_types[0]\n if array_types:\n if len(array_types) > 1:\n raise NotImplementedError(\n f'{hint} mix multiple array types. Please open an issue if you need '\n 'this feature.'\n )\n else:\n return array_types[0]\n","repo_name":"google-research/dataclass_array","sub_path":"dataclass_array/type_parsing.py","file_name":"type_parsing.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"95"} +{"seq_id":"44614292846","text":"import numpy as np\nfrom optimized_matrix.matrix import Matrix\n\nFREE = 0\nSTART = 1\nPATH = 2\nEND = 9\nBLOCKED = 8\n\n\ndef find_shortest_path(array_of_objects):\n min_object = None\n min_value = float('inf')\n\n for obj in array_of_objects:\n if 'length' in obj and obj['length'] < min_value:\n min_value = obj['length']\n min_object = obj\n return min_object\n\n\ndef pretty_print_all(grid, start, end, iteration):\n grid[start[0]][start[1]] = START\n grid[end[0]][end[1]] = END\n\n for i in range(1, len(iteration[\"path\"]) - 1):\n grid[iteration[\"path\"][i][0]][iteration[\"path\"][i][1]] = get_path_arrow(iteration[\"path\"][i],\n iteration[\"path\"][i + 1])\n\n row_up = \"┌─\" + \"──┬─\" * (len(grid[0]) - 1) + \"──┐\"\n print(row_up)\n\n for r in range(len(grid)):\n row = \"│ \" + \" │ \".join(to_cell_char(grid[r][c]) for c in range(len(grid[r]))) + \" │ \"\n row_down = \"├─\" + \"──┼─\" * (len(grid[r]) - 1) + \"──┤\" if r < len(grid) - 1 else \"└─\" + \"──┴─\" * (\n len(grid[r]) - 1) + \"──┘\"\n print(row)\n print(row_down)\n\n\ndef get_path_arrow(from_cell, to_cell):\n if from_cell[0] == to_cell[0] and from_cell[1] == to_cell[1] - 1:\n return '→'\n elif from_cell[0] == to_cell[0] and from_cell[1] == to_cell[1] + 1:\n return '←'\n elif from_cell[0] == to_cell[0] - 1 and from_cell[1] == to_cell[1]:\n return '↓'\n elif from_cell[0] == to_cell[0] + 1 and from_cell[1] == to_cell[1]:\n return '↑'\n return '?'\n\n\ndef to_cell_char(cell_id):\n if cell_id == FREE:\n return \" \"\n elif cell_id == START:\n return \"○\"\n elif cell_id == END:\n return \"●\"\n elif cell_id == BLOCKED:\n return \"▩\"\n else:\n return str(cell_id)\n\n\nif __name__ == '__main__':\n START = [0, 0]\n END = [5, 5]\n ITERATION = []\n\n for _ in range(1000):\n mtr = Matrix()\n mtr.init_grid(6, 6, 4)\n result = mtr.find_a_path(START, END)\n if result['result']:\n length = len(result['path'])\n ITERATION.append({'length': length, 'data': result})\n\n for _ in ITERATION:\n print(_['length'])\n shortest = find_shortest_path(ITERATION)\n\n print('-----------')\n print(np.array(shortest['data']['grid']))\n print('path -->', shortest['data']['path'])\n print('length -->', shortest['length'])\n\n pretty_print_all(shortest['data']['grid'], START, END, shortest['data'])\n","repo_name":"Tiinzzy/python-tests","sub_path":"Learning-Python/data-structure/optimized_matrix/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38209238753","text":"# Description of the problem can be found at \r\n\r\nn = int(input())\r\na, b = 0, 3\r\nif n==1:\r\n print(a)\r\nelse:\r\n for i in range(n-2):\r\n a, b = b, (2 * b + 3 * a) % 1000000007\r\n \r\n print(b)","repo_name":"ASSERT-KTH/C4B_APR","sub_path":"data_directory/1382_problem_id/541_author_id/Rejected.py","file_name":"Rejected.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7416910843","text":"import numpy as np\n\nimport tensorflow as tf\n\n\n\nX = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],dtype=np.float32)\nY = np.array([1, 1, 1, -1, -1, -1, 1, 1, 1, -1],dtype=np.float32)\n\ninitial_weight = np.ones_like(X,dtype=np.float32) / X.shape[0]\n\n\ndef cal_error(y_true, y_pred):\n error = y_true != y_pred\n\n e = error / y_true.shape[0]\n\n return e\n\n\ndef error_fn(e, epsilon=1e-6):\n\n alpha = np.log((1-e)/(e+epsilon))*0.5\n\n return alpha\n\ndef update_weight(weight,alpha,y_ture,y_predict):\n\n def tp_fn():\n update_weight = weight*np.exp(-alpha)/np.sum(weight)\n return update_weight\n\n def tn_fn():\n update_weight = weight * np.exp(alpha) / np.sum(weight)\n return update_weight\n\n new_weight = np.where(y_ture==y_predict,tp_fn(),tn_fn())\n\n return new_weight\n\n\ndef loss_fn(y_true,y_pred):\n loss = tf.losses.hinge_loss(y_true,y_predict)\n return loss\n\n\nlr = 0.0001\niter = 500\n\n\nX_input = tf.placeholder(dtype=tf.float32,shape=[None],name='input')\nY_input = tf.placeholder(dtype=tf.float32,shape=[None],name='label')\n\nW1 = tf.Variable(initial_value=tf.random_uniform([1]),dtype=tf.float32)\nB1 = tf.Variable(initial_value=tf.constant(0.0),dtype=tf.float32)\n\n\ny_predict = tf.nn.tanh(X_input*W1 + B1)\nloss = loss_fn(Y_input,y_predict)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)\n\ntrain_op = optimizer.minimize(loss)\n\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(iter):\n index = np.random.choice((X.shape[0] - 1), size=1)\n x = X[index]\n y = Y[index]\n feed_dict = {X_input: x, Y_input: y}\n l,p = sess.run([loss,y_predict],feed_dict=feed_dict)\n print('{} train: loss = {} , predict = {} , true = {}'.format(i,l,p,y))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Shanlans/datascientist","sub_path":"adaboost/simple_test.py","file_name":"simple_test.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5704343992","text":"\"\"\"\r\nUtilidades para uso en el curso.\r\n\"\"\"\r\nfrom itertools import product\r\n\r\nimport numpy as np\r\nimport sklearn.metrics as sk_mt\r\n\r\n\r\ndef get_allocations(n_assets):\r\n \"\"\"\r\n Calcula combinaciones posibles de alocaciones entre n activos\r\n Parámetros\r\n n_assets : Cantidad de activos en portfolio\r\n\r\n Resultado\r\n generator\r\n \"\"\"\r\n it = product(range(n_assets + 1), repeat=n_assets)\r\n\r\n return (np.array(e) / n_assets for e in it if sum(e) == n_assets)\r\n\r\n\r\ndef binary_classification_metrics(model, X_test, y_true):\r\n \"\"\"\r\n Computa ROC AUC, Accuracy Score y Kolmogorov-Smirnov para un modelo de\r\n clasificación binaria dado.\r\n Parámetros\r\n model : Modelo ya fitteado\r\n X_test : Dataset con features\r\n y_true : Etiquetras correctas para el dataset X_test\r\n\r\n Resultado\r\n tuple (ROC AUC, Accuracy, K-S)\r\n \"\"\"\r\n if hasattr(model, 'decision_function'):\r\n y_score = model.decision_function(X_test)\r\n else:\r\n y_score = model.predict_proba(X_test)[:, 1]\r\n\r\n y_pred = model.predict(X_test)\r\n fpr, tpr, _ = sk_mt.roc_curve(y_true, y_score)\r\n\r\n roc_auc = sk_mt.auc(fpr, tpr)\r\n accuracy = sk_mt.accuracy_score(y_true, y_pred)\r\n ks = np.max(tpr - fpr)\r\n\r\n return roc_auc, accuracy, ks\r\n","repo_name":"mselser95/Machine-Learning","sub_path":"mlfin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"12963392382","text":"\"\"\"\r\n Ejercicio 2: Encontrar el máximo común divisor entre dos números\r\n 'a' y 'b', donde ambos son mayores a 0\r\n\"\"\"\r\n\r\n# Función que recibe dos números y por medio del algoritmo de euclides\r\n# calcula el M.C.D. de estos \r\ndef mcd(numero1: int, numero2) -> int:\r\n # Función controladora\r\n if numero2 == 0:\r\n return numero1\r\n\r\n return mcd(numero2, numero1 % numero2)\r\n\r\n# Función principal\r\ndef main() -> None:\r\n # Creación de excepción al recibir 0 o menor\r\n MenorACeroExcepcion: Exception = Exception(\"El número ingresado es menor a 0\")\r\n\r\n # Entrada de datos validada\r\n while True:\r\n try:\r\n # Entrada del primer número convertido a tipo entero\r\n numero1: int = int(input(\"Ingrese el valor del primer número: \"))\r\n\r\n # Entrada del segundo número convertido a tipo entero\r\n numero2: int = int(input(\"Ingrese el valor del segundo número: \"))\r\n \r\n # Validación de que los números sean mayores a 0\r\n if numero1 <= 0 or numero2 <= 0:\r\n raise MenorACeroExcepcion\r\n break\r\n\r\n except ValueError:\r\n # Aviso de excepción por no ingreso de un número\r\n print(\"Por favor, ingrese solo números.\")\r\n except MenorACeroExcepcion:\r\n # Aviso de excepción porque alguno de los valores es menor a 1\r\n print(\"Por favor, ingrese solo números mayores a 0\")\r\n except:\r\n # Aviso por excepcion no controlada\r\n print(\"Ha ocurrido un error inesperado\")\r\n exit(0)\r\n\r\n # Impresión de los resultados\r\n print(f\"El M.C.D. de {numero1} y {numero2} es {mcd(numero1, numero2)}\")\r\n\r\nmain()","repo_name":"xAGH/EAM","sub_path":"II Semestre/Analisis de algoritmos/Evidencias/recursividad/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7699135018","text":"import numpy as np\nimport sys\nfrom scipy.sparse import coo_matrix, find, csr_matrix\nfrom logistic_mf import LogisticMF\nfrom collaborative import get_collab_matrix\n\n\ndef cross_validation(M, k):\n # randomly divide nonzero entries in M into k sets\n # nonzero entries represented by 3 arrays\n (i, j, v) = find(M) # i - user, j - song, v - listening count\n idx = np.arange(len(i)) # nonzero entries indices corresponding to i,j,v\n np.random.shuffle(idx)\n test_sets = np.array_split(idx, k) # split entries into k subarrays\n avg_mpr = 0.0\n\n for test_set_i in test_sets:\n # convert test set to dict of user: (dict of song: listen_count)\n test_set = {}\n for entry in test_set_i:\n user = i[entry]\n song = j[entry]\n listen_count = v[entry]\n\n if user not in test_set:\n test_set[user] = {}\n test_set[user][song] = listen_count\n\n # create training set\n training_M = M.copy()\n for entry in test_set_i:\n training_M[i[entry], j[entry]] = 0\n training_M.eliminate_zeros()\n training_M = training_M.tocoo()\n\n # call logistic_mf w/ training_M\n lmf = LogisticMF(training_M, n_latent_factors=5, alpha=2,\n l2_regularization=1, gamma=0.5, iterations=5)\n lmf.train(partition_size=(500, 500))\n\n # calculate MPR\n listen_sum = 0.0\n rank_sum = 0.0\n user = 0\n for user_song_ranks in lmf.get_rank_matrix():\n if user not in test_set:\n user += 1\n continue\n\n for song in test_set[user].keys():\n rank = user_song_ranks[song]\n\n listen_sum += test_set[user][song]\n rank_sum += test_set[user][song] * rank\n\n user += 1\n\n MPR = rank_sum / listen_sum\n print(MPR)\n avg_mpr += MPR\n\n return avg_mpr / k\n\n\nif __name__ == \"__main__\":\n arg = sys.argv[1] if len(sys.argv) > 1 else \"\"\n scale = -arg.count(\"s\")\n user_labels, track_labels, M = get_collab_matrix(\n scale=10 ** scale, fp=\"mid_triplets.csv\"\n )\n\n avg_mpr = cross_validation(M.tocsr(), 10)\n print(\"Average MPR: \" + str(avg_mpr))\n","repo_name":"timothy-e/music-recommender","sub_path":"performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"4299004548","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\n\na = Analysis(['START.pyw', 'authorization.py', 'bot_ships.py', 'bot_shooting.py', 'checks.py', 'decks.py', 'field.py', 'four_deck_ship.py', 'one_deck_ship.py', 'plr_ships.py', 'settings.py', 'ship.py', 'statistic.py', 'three_deck_ship.py', 'two_deck_ship.py', 'users_data_base.py'],\n pathex=['C:\\\\Users\\\\kolag\\\\Desktop\\\\sea_battle_final'],\n binaries=[],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='START',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n upx_exclude=[],\n runtime_tmpdir=None,\n console=False )\n","repo_name":"Nikola1001/sea_battle","sub_path":"START.spec","file_name":"START.spec","file_ext":"spec","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16980934284","text":"from wtforms import Form, StringField, validators, BooleanField, DateTimeField\nfrom views.helper.exceptions import FormLoadException\nfrom models.device import Device\nfrom models.interface import Interface\nfrom models.equipment import Equipment\nfrom . import ObjectIdField\nfrom . import JsonField\n\n\nclass CreateOrUpdateForm(Form):\n name = StringField(validators=[validators.Length(max=125), validators.DataRequired()]) # 线缆名称\n signalType = StringField(validators=[validators.Length(max=125), validators.DataRequired()]) # 信号类型\n shieldType = StringField(validators=[validators.Length(max=125), validators.DataRequired()]) # 屏蔽类型\n note = StringField(validators=[validators.Length(max=125), validators.DataRequired()]) # 备注\n isCustom = BooleanField(default=False) # 是否为定制\n parameterList = StringField(validators=[validators.Length(max=125)]) # 如果是定制线缆,参数表的ID\n startInterfaceId = ObjectIdField(validators=[validators.DataRequired()]) # 起始接口ID\n endInterfaceId = ObjectIdField(validators=[validators.DataRequired()]) # 结束接口ID\n\n def load(self, cable):\n cable.name = self.name.data\n cable.signal_type = self.signalType.data\n cable.shield_type = self.shieldType.data\n cable.note = self.note.data\n cable.is_custom = self.isCustom.data\n cable.parameter_list = self.parameterList.data\n\n try:\n start_interface = Interface.objects().get(id=self.startInterfaceId.data)\n start_device = start_interface.parent_device\n start_equipment = start_interface.parent_equipment\n\n except Interface.DoesNotExist:\n raise FormLoadException('起始接口不存在,id: ' + str(self.startInterfaceId.data))\n except Device.DoesNotExist:\n raise FormLoadException('起始接口所属的设备不存在,接口id: ' + str(self.startInterfaceId.data))\n except Equipment.DoesNotExist:\n raise FormLoadException('起始接口所属设备的装置不存在,接口id: ' + str(self.startInterfaceId.data))\n else:\n cable.start_interface = start_interface.id\n cable.start_device = start_device.id\n cable.start_equipment = start_equipment.id\n\n try:\n end_interface = Interface.objects().get(id=self.endInterfaceId.data)\n end_device = end_interface.parent_device\n end_equipment = end_interface.parent_equipment\n\n except Interface.DoesNotExist:\n raise FormLoadException('结束接口不存在,id: ' + str(self.endInterfaceId.data))\n except Device.DoesNotExist:\n raise FormLoadException('结束接口所属的设备不存在,接口id: ' + str(self.endInterfaceId.data))\n except Equipment.DoesNotExist:\n raise FormLoadException('结束接口所属设备的装置不存在,接口id: ' + str(self.endInterfaceId.data))\n else:\n cable.end_interface = end_interface.id\n cable.end_device = end_device.id\n cable.end_equipment = end_equipment.id\n\n\nclass SearchForm(Form):\n searchKey = StringField()\n name = StringField()\n signalType = StringField()\n shieldType = StringField()\n isCustom = BooleanField()\n note = StringField()\n orderBy = JsonField()\n\n startEquipmentId = ObjectIdField()\n startDeviceId = ObjectIdField()\n startInterfaceId = ObjectIdField()\n\n endEquipmentId = ObjectIdField()\n endDeviceId = ObjectIdField()\n endInterfaceId = ObjectIdField()\n\n createUserId = ObjectIdField()\n createdAtStart = DateTimeField()\n createdAtEnd = DateTimeField()\n","repo_name":"Mirrorystal/example","sub_path":"src/backend/views/forms/cable.py","file_name":"cable.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7395700189","text":"import boto3\nimport os\nfrom base_database import BaseDatabase\n\ntable_name = os.getenv(\"DYNAMO_TABLE_NAME\", \"query\")\n\n\nclass DynamoDB(BaseDatabase):\n def __init__(self) -> None:\n super().__init__()\n self._dynamodb = boto3.resource(\"dynamodb\")\n self._table = self.get_client().Table(table_name)\n\n def get_table(self):\n return self._table\n\n def get_client(self):\n return self._dynamodb\n\n def update_item(self, pk: str, attribute: str):\n item = self.get_table().update_item(\n Key={\n \"pk\": pk,\n \"sk\": pk,\n }, \n UpdateExpression=\"set attributeone=:a\",\n ExpressionAttributeValues={\n ':a': attribute,\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n\n return item\n","repo_name":"pazsaragi/cloud-octo-examples","sub_path":"event-sourcing-kinesis/services/syncService/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"22204309033","text":"import sys\r\ninput=sys.stdin.readline\r\nn,m=map(int,input().split(\" \"))\r\n\r\ndef count(n):\r\n #n!을 소인수 분해 하였을 때 2와 5의 갯수 구하기.\r\n count_2=0\r\n count_5=0\r\n curnum=2\r\n interest=n\r\n while interest>=curnum:\r\n count_2+=interest//curnum\r\n curnum*=2\r\n curnum=5\r\n interest=n\r\n while interest>=curnum:\r\n count_5+=interest//curnum\r\n curnum*=5\r\n return count_2,count_5\r\n\r\nover=count(n)\r\nbehind1=count(m)\r\nbehind2=count(n-m)\r\nanswer=min(over[0]-(behind1[0]+behind2[0]),over[1]-(behind1[1]+behind2[1]))\r\nprint(answer)","repo_name":"MinwooPark96/BaekJoon_SorceCode","sub_path":"백준/Silver/2004. 조합 0의 개수/조합 0의 개수.py","file_name":"조합 0의 개수.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27158850436","text":"import os\n__version__ = '0.1'\nfrom public import app\nfrom module.instances import Instances\n\n\nif __name__ == '__main__':\n try:\n Instances.load_instaces()\n except Exception as e:\n print(f\"unable lunch service. {e}\") \n exit(1)\n port = int(os.environ.get(\"PORT\", 8080))\n app.run('0.0.0.0', port=port)","repo_name":"supapo/aws_ec2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11186814038","text":"# -*- coding: utf-8 -*-\nfrom openerp import api, fields, models, _\nimport datetime\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nimport calendar\nfrom io import BytesIO,StringIO\nimport xlwt\nimport io\nfrom base64 import b64decode\nimport base64\nfrom openerp import http\nfrom odoo.http import request\nfrom odoo.addons.web.controllers.main import serialize_exception,content_disposition\nfrom odoo.exceptions import ValidationError\n\nclass Binary(http.Controller):\n @http.route('/opt/download', type='http', auth=\"public\")\n @serialize_exception\n def download_document(self,model,field,id,filename=None, **kw):\n \"\"\" Download link for files stored as binary fields.\n :param str model: name of the model to fetch the binary from\n :param str field: binary field\n :param str id: id of the record from which to fetch the binary\n :param str filename: field holding the file's name, if any\n :returns: :class:`werkzeug.wrappers.Response`\n \"\"\"\n cr, uid, context = request.cr, request.uid, request.context\n env = api.Environment(cr, 1, {}) \n out_brw=env['output'].browse(int(id))\n filecontent = base64.b64decode(out_brw.xls_output or '')\n if not filecontent:\n return request.not_found()\n else:\n if not filename:\n filename = '%s_%s' % (model.replace('.', '_'), id)\n return request.make_response(filecontent,\n [('Content-Type', 'application/octet-stream'),\n ('Content-Disposition', content_disposition(filename))])\n\n\n#class DailyPaymentReport(models.TransientModel):\n#\n# _name = \"payment.report\"\n# _description = \"Payment Report\"\n \nclass ResCompany(models.Model):\n _inherit = 'res.company'\n _description = \"Payment Report\"\n\n \n def print_excel_report(self):\n cr= self.env.cr\n workbook = xlwt.Workbook()\n pay_ids=self.env['account.payment'].search([('payment_date','=','2018-10-26')])\n style1 = xlwt.easyxf('pattern: pattern solid, fore_colour ice_blue;alignment: horiz centre;font: bold on; borders: left medium, top medium, bottom medium,right medium')\n style2 = xlwt.easyxf('pattern: pattern solid, fore_colour ivory;alignment: horiz centre;font: bold on; borders: left medium, top medium, bottom medium,right medium')\n Header_Text ='Payment Report'\n sheet = workbook.add_sheet('Payment Report')\n# sheet.row(0).height = 256 * 4\n sheet.col(0).width = 256 * 30\n sheet.col(1).width = 256 * 30\n sheet.col(2).width = 256 * 30\n sheet.col(3).width = 256 * 30\n sheet.col(4).width = 256 * 30\n sheet.col(5).width = 256 * 30\n sheet.write_merge(0, 0,0,5,'PAYMENT REPORT',style1)\n sheet.write_merge(1,2, 0,0,'PAYMENT ID',style1)\n sheet.write_merge(1,2, 1,1,'PARTNER ANME',style1)\n sheet.write_merge(1,2, 2,2,'AMOUNT',style1)\n sheet.write_merge(1,2, 3,3,'PAYMENT DATE',style1)\n sheet.write_merge(1,2, 4,4,'USERNAME',style1)\n sheet.write_merge(1,2, 5,5,'INVOICE STATUS',style1)\n sheet.write_merge(1,2, 6,6,'INVOICE AMOUNT',style1)\n sheet.write_merge(1,2, 7,7,'INVOICE DUEDATE',style1)\n sheet.write_merge(1,2, 8,8,'INVOICE REFERENCE',style1)\n sheet.write_merge(1,2, 9,9,'INVOICE ORIGIN',style1)\n# sheet.write(3,0,\"\",style1)\n# row=3\n# for inv_id in inv_ids:\n## sheet.write(row,0,inv_id.client_service_manager_id.name)\n# sheet.write(row,1,inv_id.branch_id.name)\n# sheet.write(row,2,inv_id.amount_total)\n# row+=1\n stream =BytesIO()\n workbook.save(stream)\n cr.execute(\"\"\" DELETE FROM output\"\"\")\n attach_id = self.env['output'].create({'name':Header_Text+'.xls', 'xls_output': base64.b64encode(stream.getvalue())})\n return {\n 'type' : 'ir.actions.act_url',\n 'url': '/opt/download?model=output&field=xls_output&id=%s&filename=Payment Report.xls'%(attach_id.id),\n 'target': 'new',\n } ","repo_name":"r-bhargavi/aalmir","sub_path":"aalmir_custom/wizard/report_payment.py","file_name":"report_payment.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20969402536","text":"\"\"\"empty message\n\nRevision ID: 3b327cf85c18\nRevises: c972848e38f6\nCreate Date: 2017-09-20 17:56:30.449520\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '3b327cf85c18'\ndown_revision = 'c972848e38f6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('shoppinglists', 'date_created')\n op.drop_column('shoppinglists', 'date_modified')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('shoppinglists', sa.Column('date_modified', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))\n op.add_column('shoppinglists', sa.Column('date_created', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","repo_name":"skafis/flask-api","sub_path":"migrations/versions/3b327cf85c18_.py","file_name":"3b327cf85c18_.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11095870645","text":"import os\nimport shutil\nimport cv2\n\ndef main():\n\n repo_dir = os.getcwd()\n input_dir = f\"{repo_dir}/parameters/input\"\n output_dir = f\"{repo_dir}/parameters/output\"\n\n video_files = [f for f in os.listdir(input_dir) if f.lower().endswith('.mp4')]\n if len(video_files) == 0:\n print(\"Input directory does not contain video.\")\n return\n elif len(video_files) > 1:\n print(\"Input directory contains multiple videos. Should only contain one video.\")\n return\n elif len(video_files) == 1:\n dataset_name = video_files[0].replace(\".mp4\", \"\")\n\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n output_dataset_dir = f\"{output_dir}/{dataset_name}\"\n output_data_dir = f\"{output_dataset_dir}/data\"\n os.makedirs(output_dataset_dir)\n os.makedirs(output_data_dir)\n\n video_dir = f\"{input_dir}/{video_files[0]}\"\n cap = cv2.VideoCapture(video_dir)\n if not cap.isOpened():\n print(\"Error opening video file.\")\n return\n fps = cap.get(cv2.CAP_PROP_FPS)\n frame_rate = 10\n frame_step = int(round(fps/frame_rate))\n\n frame_index = 0\n data_index = 0\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n if frame_index % frame_step == 0:\n frame_output_dir = f\"{output_dir}/{dataset_name}/data/{data_index:06d}.jpg\"\n cv2.imwrite(frame_output_dir, frame)\n data_index += 1\n\n frame_index += 1\n\n cap.release()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"AwrodHaghiTabrizi/UMARV-CV-LaneDetection","sub_path":"src/scripts/get_frames_from_video.py","file_name":"get_frames_from_video.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"2662543107","text":"import os\nfrom flask import Flask, json, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom src.utils.logger import init_logger\nfrom src.db.db import db,EventModel\nfrom flask_marshmallow import Marshmallow\n\napp = Flask(__name__)\n\n# injects environment variable here\nenv_config = os.getenv(\"APP_SETTINGS\", \"config.DevelopmentConfig\")\napp.config.from_object(env_config)\n\n# connect db with our app\ndb.init_app(app)\nma = Marshmallow(app)\n\nclass EventModelSchema(ma.Schema):\n class Meta:\n fields = ('eid','name','eventtype','contactnumber','startdate','enddate','address','host','description','chatId')\n\nevent_model_schema = EventModelSchema()\nevents_model_schema = EventModelSchema(many=True)\n\n@app.before_first_request\ndef create_table():\n db.create_all()\n logger.info(\"Events relation created\")\n\n@app.route(\"/\",methods=[\"GET\"])\ndef index():\n return jsonify(\"Welcome to events service\")\n\n@app.route('/api/events/',methods = [\"POST\"])\ndef createNewEvent():\n eventInfo = request.get_json()\n try:\n add_new_event(eventInfo)\n return jsonify(eventInfo)\n except:\n return jsonify(\"Something went wrong. Please check the data and try again\")\n\n\n@app.route('/api/events/',methods = [\"GET\"])\ndef getEvents():\n events = EventModel.query.all()\n result = events_model_schema.dump(events)\n return jsonify(result)\n\n@app.route('/api/events/',methods = [\"GET\"])\ndef getEventbyId(eid):\n event = EventModel.query.filter_by(eid=eid).first()\n result = event_model_schema.dump(event)\n return jsonify(result)\n\n@app.route('/api/events//update',methods = [\"POST\"])\ndef updateEvent(eid):\n try: \n eventInfo = request.get_json()\n event = EventModel.query.filter_by(eid=eid).first()\n if event:\n # delete th event first from the database\n db.session.delete(event)\n db.session.commit()\n add_new_event(eventInfo)\n return jsonify(eventInfo)\n except:\n return jsonify(\"Something went wrong. Please check the data and try again\")\n\n@app.route('/api/events//delete', methods=['DELETE'])\ndef delete(eid):\n event = EventModel.query.filter_by(eid=eid).first()\n if event:\n db.session.delete(event)\n db.session.commit()\n return jsonify(\"Successfully delete event: \"+str(event.name))\n return jsonify(\"Could not find the event to delete!\")\n\n@app.route('/api/events//chat', methods=['POST'])\ndef createChat(eid):\n event = EventModel.query.filter_by(eid=eid).first()\n if event:\n event.chatId = event.eid\n db.session.commit()\n return jsonify(\"Successfully created chat \"+str(event.chatId))\n return jsonify(\"Could not find the event\")\n\n@app.route('/api/events//chat', methods=['DELETE'])\ndef deleteChat(eid):\n event = EventModel.query.filter_by(eid=eid).first()\n if event:\n id = event.chatId\n event.chatId = None\n db.session.commit()\n return jsonify(\"Successfully deleted chat \"+str(id))\n return jsonify(\"Could not find the chat to delete!\")\n\ndef add_new_event(eventInfo):\n # extract all the data from json body\n name = eventInfo[\"name\"]\n eventtype = eventInfo[\"eventtype\"]\n contactnumber = eventInfo[\"contactnumber\"]\n startdate = eventInfo[\"startdate\"]\n enddate = eventInfo[\"enddate\"]\n address = eventInfo[\"address\"]\n host = eventInfo[\"host\"]\n description = eventInfo[\"description\"]\n \n # write new event data to the db\n new_event = EventModel(name=name,eventtype=eventtype,contactnumber=contactnumber,startdate=startdate,enddate=enddate,address=address,host=host,description=description)\n db.session.add(new_event)\n db.session.commit()\n\nif __name__ == '__main__':\n logger = init_logger(__name__, testing_mode=False)\n app.run(host=\"localhost\",port=\"5000\",debug=app.config.get(\"DEBUG\"))","repo_name":"Apahadi73/ubiquitous-memory","sub_path":"events/Eenv/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3586665570","text":"\"\"\"\nTCP client porgram to request operations from the server (GUI_Main.py).\nOnce the connection is built:\n if the client send 'light on' and the light brightness(0~255), the GUI will turn on the light;\n if the client send 'light off', the GUI will turn off the light;\n if the client sends 'compute', the GUI will compute the dof6;\n if the client sends 'save', the GUI will save the log file;\n if the client sends 'disconnect', the GUI will disconnect from the client.\n\nAuthor: Siqi Dai\n\"\"\"\n\n\nimport socket\n\n\ndef Client():\n HOST = 'localhost' # server's hostname or IP address (currently we use localhost)\n PORT = 65432 # port used by the server\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n try:\n s.connect((HOST, PORT))\n except:\n pass\n message = input(\"\\nMessage to server? (light on/light off/compute/save/disconnect) \")\n s.send(message.encode())\n if message.encode() == b'disconnect':\n print(\"Client exits.\")\n data = s.recv(1024)\n print(data.decode())\n exit()\n elif message.encode() == b'light on':\n while True:\n message2 = input(\"Brightness (0~255): \") # let the user set light brightness\n if 0 <= int(message2.encode()) <= 255: break # check if the user input is valid\n s.send(message2.encode())\n data = s.recv(1024)\n print(data.decode())\n s.close()\n Client()\n elif message.encode() == b'light off':\n data = s.recv(1024)\n print(data.decode())\n s.close()\n Client()\n elif message.encode() == b'compute':\n message2 = input(\"Which car model? \")\n s.send(message2.encode())\n data = s.recv(1024)\n print(data.decode())\n s.close()\n Client()\n elif message.encode() == b'save':\n data = s.recv(1024)\n print(data.decode())\n s.close()\n Client()\n else:\n print('Invalid input. Enter again.')\n Client()\n\n\nClient()\n","repo_name":"SiqiD47/vision2019","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"13449671759","text":"import os\nimport cv2\nimport numpy as np\nimport torch\nimport requests\nimport time\n\n# Define the directories to save images and blur detection results\nimage_dir = \"Images\"\noutput_folder = \"Output\"\n\n# Create the directories if they don't exist\nif not os.path.exists(image_dir):\n os.makedirs(image_dir)\nif not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n# Define the YOLOv7 model and its hyperparameters\n# model = torch.hub.load('.', 'custom', 'yolov7.pt', source='local') \nmodel = torch.hub.load('Jayravalcode/yolov5_cloud', 'yolov5s', pretrained=True)\nconf_thres = 0.75\niou_thres = 0.5\n\n\n# Create the directories if they don't exist\n\nwith open(\"urls.txt\", \"r\") as f:\n URLs = [line.strip() for line in f]\n# Continuously wait for new images in the input folder\nwhile True:\n\n for URL in URLs:\n r = requests.get(url=URL)\n data = r.json()\n did = list(data[\"data\"][\"live-record\"].keys())\n\n rtmp = []\n for i in did:\n rtmp.append(\"rtmp://\" + URL.split(\"/\")[2].split(\":\")[0] + \":80/live-record/\" + i)\n\n for rtmp_link in rtmp:\n # Define the time gap between frame captures (in seconds)\n time_gap = 1\n\n # Capture frames from the RTMP link and save them in the image directory\n cap = cv2.VideoCapture(rtmp_link)\n for i in range(1):\n ret, frame = cap.read()\n if ret:\n # Get the current timestamp and format it\n timestamp = time.time()\n timestamp_str = time.strftime(f\"{rtmp_link.split('/')[4]}_%H:%M:%S\", time.localtime(timestamp))\n\n # Save the image in the image directory\n image_filename = f\"{timestamp_str}.jpg\"\n image_path = os.path.join(image_dir, image_filename)\n cv2.imwrite(image_path, frame)\n\n image_files = [f\"./Images/\"+f for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f)) and f.endswith('.jpg')]\n results = model(image_files)\n # results.save(\"Output\")\n detections = results.pred[0][results.pred[0][:, 4] > conf_thres]\n print(\"==================\")\n print(\"Results:-\",results.pred[0][:,4]) \n print(detections)\n print(\"length of detections:-\",len(detections),\",\",\"Checker:=>RTMP Link\" + \" \" + rtmp_link + \" \" + \"is done\")\n print(\"==================\")\n if len(detections) > 0:\n results.save(\"Output\")\n os.system(f\"rm -r ./Images/*\")\n i = i+1\n else:\n break \n #time.sleep(180)\n cap.release()\n print(\"RTMP Link\" + \" \" + rtmp_link + \" \" + \"is done\")\n \n \n\n","repo_name":"Jayravalcode/yolov5_cloud","sub_path":"main_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"23797491126","text":"f=open(\"main.tex\",\"r\")\r\ng=f.read()\r\nf.close()\r\n\r\ndef inputs(texfile):\r\n x=0\r\n while x < len(texfile):\r\n if texfile[x:x+7]==\"\\\\input{\":\r\n y=\"\"\r\n z=x+7\r\n while not(texfile[z]==\"}\"):\r\n y+=texfile[z]\r\n z+=1\r\n z+=1\r\n f=open(y+\".tex\",\"r\")\r\n h=f.read()\r\n f.close()\r\n return texfile[:x]+inputs(h + texfile[z:])\r\n x+=1\r\n return texfile\r\n\r\na=inputs(g)\r\nb=open(\"main_concatenated.tex\",\"w\")\r\nb.write(a)\r\nb.close()\r\n","repo_name":"kayakmath/latex-file-tools","sub_path":"latexconcatenator.py","file_name":"latexconcatenator.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13217080776","text":"import argparse\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport signal\n\nscripts_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(scripts_path)\n\nimport devtools_paths\n\n\ndef parse_options(cli_args):\n parser = argparse.ArgumentParser(description='Run boot perf test')\n parser.add_argument('--runs', help='Number of runs', type=int)\n parser.add_argument('--chrome-binary', dest='chrome_binary', help='path to Chromium binary')\n return parser.parse_args(cli_args)\n\n\ndef check_chrome_binary(chrome_binary):\n return os.path.exists(chrome_binary) and os.path.isfile(chrome_binary) and os.access(chrome_binary, os.X_OK)\n\n\ndef popen(arguments, cwd=None, env=None):\n return subprocess.Popen(arguments, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)\n\n\ndef to_platform_path_exact(filepath):\n if not is_cygwin:\n return filepath\n output, _ = popen(['cygpath', '-w', filepath]).communicate()\n # pylint: disable=E1103\n return output.strip().replace('\\\\', '\\\\\\\\')\n\n\ndef start_hosted_mode_server():\n proc = popen([devtools_paths.node_path(), devtools_paths.hosted_mode_script_path()])\n hosted_mode_pid = proc.pid\n return hosted_mode_pid\n\n\ndef stop_hosted_mode_server(hosted_mode_pid):\n if hosted_mode_pid is None:\n return\n\n os.kill(hosted_mode_pid, signal.SIGTERM)\n hosted_mode_pid = None\n\n\ndef run_boot_perf_test(chrome_binary, runs):\n boot_perf_errors_found = False\n exec_command = [devtools_paths.node_path(), devtools_paths.boot_perf_test_path(), '--progress=false', '--runs=%s' % runs]\n\n env = os.environ.copy()\n env['CHROME_BIN'] = chrome_binary\n\n cwd = devtools_paths.devtools_root_path()\n\n boot_perf_proc = popen(exec_command, cwd=cwd, env=env)\n (boot_perf_proc_out, _) = boot_perf_proc.communicate()\n\n if boot_perf_proc.returncode != 0:\n boot_perf_errors_found = True\n\n print(boot_perf_proc_out)\n return boot_perf_errors_found\n\n\ndef main():\n OPTIONS = parse_options(sys.argv[1:])\n is_cygwin = sys.platform == 'cygwin'\n chrome_binary = None\n\n # Default to the downloaded / pinned Chromium binary\n downloaded_chrome_binary = devtools_paths.downloaded_chrome_binary_path()\n if check_chrome_binary(downloaded_chrome_binary):\n chrome_binary = downloaded_chrome_binary\n\n # Override with the arg value if provided.\n if OPTIONS.chrome_binary:\n chrome_binary = OPTIONS.chrome_binary\n if not check_chrome_binary(chrome_binary):\n print('Unable to find a Chrome binary at \\'%s\\'' % chrome_binary)\n sys.exit(1)\n\n if (chrome_binary is None):\n print('Unable to run, no Chrome binary provided')\n sys.exit(1)\n\n runs = 37\n if OPTIONS.runs:\n runs = int(OPTIONS.runs)\n\n print('Running boot perf test with %s iteration(s). This may take some time...' % runs)\n print('Using Chromium binary (%s)\\n' % chrome_binary)\n\n errors_found = False\n hosted_mode_pid = None\n try:\n hosted_mode_pid = start_hosted_mode_server()\n errors_found = run_boot_perf_test(chrome_binary, runs)\n except Exception as err:\n print(err)\n finally:\n stop_hosted_mode_server(hosted_mode_pid)\n\n if errors_found:\n print('ERRORS DETECTED')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"youtube/cobalt","sub_path":"third_party/devtools/scripts/test/run_boot_perf_check.py","file_name":"run_boot_perf_check.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"95"} +{"seq_id":"10831954694","text":"from taskcontrol.lib import LogBase\r\nimport logging\r\n\r\nlog = LogBase()\r\n\r\n\r\nif log:\r\n \r\n lg = log.logger_create({\r\n \"name\": \"logtest\",\r\n \"handlers\": {\"handler\": {\"type\": \"file\", \"format\": \"%(levelname)s - %(asctime)s - %(name)s - %(message)s\", \"path\": \"./demos/logs/\", \"file\": \"filename.log\"}, \"level\": logging.INFO}\r\n })\r\n if lg:\r\n l = log.log({\r\n \"name\": \"logtest\",\r\n \"level\": \"info\",\r\n \"message\": \"This is a test\"\r\n })\r\n if not l:\r\n print(\"Error in logging\")\r\n d = log.logger_delete(\"logtest\")\r\n if d:\r\n print(\"Log deleted\")\r\n else:\r\n print(\"Unable to delete\")\r\n","repo_name":"taskcontrols/py-taskcontrol","sub_path":"demos/main_logger.py","file_name":"main_logger.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"36763831815","text":"import sys\n\n\nclass Queue():\n def __init__(self):\n self.items = []\n def enqueue(self, item):\n self.items.append(item)\n def dequeue(self):\n return self.items.pop(0)\n def isEmpty(self):\n return not self.items\n\n\ndef move_line(line, direction):\n combined = False\n nums = [x for x in line if x > 0]\n if direction == 1:\n line = [0 for _ in range(len(line) - len(nums))] + nums\n for i in reversed(range(len(line))):\n if i==0: break\n if line[i] == 0: break\n if line[i] == line[i-1]:\n line[i] *= 2\n line[:i] = [0] + line[:i-1]\n else:\n line = nums + [0 for _ in range(len(line) - len(nums))]\n for i in range(len(line)-1):\n if line[i] == 0: break\n if line[i] == line[i+1]:\n line[i] *= 2\n line[i+1:] = line[i+2:] + [0]\n return line\n\n\ndef move(grid, direction):\n new_grid = []\n for xgrid in grid:\n new_grid.append(xgrid[:])\n if direction[1] == 0:\n for i in range(len(new_grid)):\n new_grid[i] = move_line(new_grid[i], direction[0])\n else:\n for i in range(len(new_grid[0])):\n ygrid = move_line([xgrid[i] for xgrid in new_grid], direction[1])\n for j in range(len(ygrid)):\n new_grid[j][i] = ygrid[j]\n return new_grid\n\n\ndef biggest_in_grid(grid):\n biggest = 0\n for xgrid in grid:\n for x in xgrid:\n if x > biggest:\n biggest = x\n return biggest\n\n\ngrid = []\nread = sys.stdin.readline\nn = int(read())\nfor i in range(n):\n grid.append([int(x) for x in read().split()])\nif n == 1:\n print(grid[0][0])\nelse:\n ans = 0\n directions = [[1,0],[-1,0],[0,1],[0,-1]]\n q = Queue()\n q.enqueue((0, grid))\n while not q.isEmpty():\n depth, cur = q.dequeue()\n for direction in directions:\n new_grid = move(cur, direction)\n new_biggest = biggest_in_grid(new_grid)\n if new_biggest > ans:\n ans = new_biggest\n if depth < 4:\n q.enqueue((depth+1, new_grid))\n print(ans)\n","repo_name":"rokrokss/baekjoon","sub_path":"12100 - 2048 (Easy)/12100.py","file_name":"12100.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"12052453472","text":"import sys\nimport requests\nfrom requests_toolbelt import MultipartEncoder\nfrom os.path import dirname, join\nfrom com.chaquo.python import Python\nimport uuid\nimport json\nimport base64\n\ndef translate(target_Language,image_dir,file_name,efile_name):\n\n image_dirs = str(image_dir)\n filenames = join(dirname(image_dirs),file_name)\n\n data = {\n 'source': target_Language,\n 'target': 'ko',\n 'image': (filenames, open(filenames, 'rb'), 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'})\n }\n m = MultipartEncoder(data, boundary=uuid.uuid4())\n\n headers = {\n \"Content-Type\": m.content_type,\n \"X-NCP-APIGW-API-KEY-ID\": \"w5lgfrssck\",\n \"X-NCP-APIGW-API-KEY\": \"tct9yx0oteeuixAnAdIOETTtKiZFhixSLzNw3vvM\"\n }\n\n url = \"https://naveropenapi.apigw.ntruss.com/image-to-image/v1/translate\"\n res = requests.post(url, headers=headers, data=m.to_string())\n\n resObj = json.loads(res.text)\n imageStr = resObj.get(\"data\").get(\"renderedImage\")\n\n return imageStr\n\n","repo_name":"scok/Android-Translate","sub_path":"app/src/main/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"43889001466","text":"def jscheduling(jb,n):\n t = 1\n p = 0\n mp = 0\n count = 0\n while t<= n:\n for i in jb:\n if i[1] == t:\n if i[2]>p:\n count+=1\n p = i[2]\n t = t+1\n mp += p\n p = 0\n return (count-1,mp)\n\nn = 4\njobs = [(1,4,20),(2,1,10),(3,1,40),(4,1,30)]\nprint(jscheduling(jobs,n))","repo_name":"yaswanth-vakkala/Python-Programming","sub_path":"pdir4/job_scheduling.py","file_name":"job_scheduling.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7152306302","text":"import keras\nfrom keras.models import Sequential,Input,Model\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\nimport numpy as np\nimport tensorflow as tf\n\nbig_list = [] \n#read data from file\nf = open('histo.txt')\nfor line in f:\n big_list.append(line.strip().split(\" \"))\nf.close()\n\n#separating histogram into testing and training sets\nyes_list = big_list[0:1505]\nno_list = big_list[1505:5259]\nunsure_list = big_list[5259:]\n\nyes_train = yes_list[0:int(1505 * 0.8)]\nyes_test= yes_list[int(1505 * 0.8): 1505]\nno_train = no_list[0: int((5259 - 1505) * .8)]\nno_test = no_list[int((5259 - 1505) * .8): 5259]\n\ntrain_data = np.concatenate((yes_train, no_train))\ntest_data = np.concatenate((yes_test, no_test))\nnp.random.shuffle(train_data)\nnp.random.shuffle(test_data)\n\n\n\n#separating values into x and y values\ntrain_X=train_data[:,1:769]\ntrain_Y=train_data[:,0]\ntrain_Y=[[x] for x in train_Y]\ntest_X=test_data[:,1:769]\ntest_Y=test_data[:,0]\ntest_Y=[[x] for x in test_Y]\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(1, 768)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10)\n])\n\n\nsess = tf.Session()\n","repo_name":"gannotsk21/ML_Lab8","sub_path":"gannotsk21_part2.py","file_name":"gannotsk21_part2.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20953475173","text":"import numpy as np\nimport pytest\n\nimport tvm.testing\nfrom tvm import relay\nfrom tvm.contrib.hexagon.session import Session\nfrom tvm.relay.backend import Executor, Runtime\n\n\ndef get_mobilenet():\n \"\"\"Download and import mobilenet model with ONNX\"\"\"\n onnx = pytest.importorskip(\"onnx\")\n\n model_url = \"https://github.com/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx\" # pylint: disable=line-too-long\n model_path = tvm.contrib.download.download_testdata(\n model_url, \"mobilenetv2-7.onnx\", module=\"onnx\"\n )\n return onnx.load(model_path)\n\n\n@pytest.mark.parametrize(\"enable_usmp\", [False, True])\n@tvm.testing.requires_hexagon\ndef test_mobilenet_aot(hexagon_session: Session, aot_host_target, aot_target, enable_usmp):\n \"\"\"Test mobilenet with aot executor\"\"\"\n dtype = \"float32\"\n onnx_model = get_mobilenet()\n\n data_in = np.random.rand(1, 3, 224, 224).astype(dtype=dtype)\n\n input_name = \"data\"\n shape_dict = {input_name: data_in.shape}\n relay_mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True)\n inputs = {input_name: data_in}\n\n target_llvm = tvm.target.Target(\"llvm\")\n config = {\"tir.usmp.enable\": enable_usmp}\n with tvm.transform.PassContext(opt_level=3, config=config):\n hexagon_lowered = tvm.relay.build(\n relay_mod,\n tvm.target.Target(aot_target, host=aot_host_target),\n runtime=Runtime(\"cpp\"),\n executor=Executor(\"aot\", {\"unpacked-api\": False, \"interface-api\": \"packed\"}),\n params=params,\n )\n\n hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered)\n hexagon_mod.set_input(**inputs)\n hexagon_mod.run()\n hexagon_output = hexagon_mod.get_output(0).numpy()\n\n with tvm.transform.PassContext(opt_level=3):\n llvm_lowered = tvm.relay.build(\n relay_mod,\n tvm.target.Target(target_llvm, host=target_llvm),\n runtime=Runtime(\"cpp\"),\n executor=Executor(\"aot\", {\"interface-api\": \"packed\"}),\n params=params,\n )\n\n llvm_mod = tvm.runtime.executor.AotModule(llvm_lowered[\"default\"](tvm.cpu(0)))\n llvm_mod.set_input(**inputs)\n llvm_mod.run()\n expected_output = llvm_mod.get_output(0).numpy()\n tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n tvm.testing.main()\n","repo_name":"apache/tvm","sub_path":"tests/python/contrib/test_hexagon/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":10533,"dataset":"github-code","pt":"95"} +{"seq_id":"43309080499","text":"#!/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef solve():\n limit = 10 ** 6\n rec = [0 for i in range(limit + 1)]\n for u in range(1, limit):\n v = 1\n while u * v <= limit:\n if (u + v) % 4 == 0 and 3 * v - u > 0 and (3 * v - u) % 4 == 0:\n rec[u * v] += 1\n v += 1\n return sum(map(lambda x: x == 10, rec))\n\n\nif __name__ == \"__main__\":\n print(solve())\n","repo_name":"brickgao/ProjectEuler","sub_path":"src/0135.py","file_name":"0135.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21268944872","text":"import logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Any, List, Mapping, NamedTuple, Optional\n\nfrom PyQt6 import QtDBus\nfrom PyQt6.QtCore import QObject, QVersionNumber\n\nfrom vorta.network_status.abc import NetworkStatusMonitor, SystemWifiInfo\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkManagerMonitor(NetworkStatusMonitor):\n def __init__(self, nm_adapter: 'NetworkManagerDBusAdapter' = None):\n self._nm = nm_adapter or NetworkManagerDBusAdapter.get_system_nm_adapter()\n\n def is_network_metered(self) -> bool:\n try:\n return self._nm.get_global_metered_status() in (\n NMMetered.YES,\n NMMetered.GUESS_YES,\n )\n except DBusException:\n logger.exception(\"Failed to check if network is metered, assuming it isn't\")\n return False\n\n def get_current_wifi(self) -> Optional[str]:\n # Only check the primary connection. VPN over WiFi will still show the WiFi as Primary Connection.\n # We don't check all active connections, as NM won't disable WiFi when connecting a cable.\n try:\n active_connection_path = self._nm.get_primary_connection_path()\n if not active_connection_path:\n return\n active_connection = self._nm.get_active_connection_info(active_connection_path)\n if active_connection.type == '802-11-wireless':\n settings = self._nm.get_settings(active_connection.connection)\n ssid = self._get_ssid_from_settings(settings)\n if ssid:\n return ssid\n except DBusException:\n logger.exception(\"Failed to get currently connected WiFi network, assuming none\")\n return None\n\n def get_known_wifis(self) -> List[SystemWifiInfo]:\n wifis = []\n try:\n connections_paths = self._nm.get_connections_paths()\n except DBusException:\n logger.exception(\"Failed to list connections\")\n return wifis\n\n for connection_path in connections_paths:\n try:\n settings = self._nm.get_settings(connection_path)\n except DBusException:\n logger.warning(\"Couldn't load settings for %s\", connection_path, exc_info=True)\n else:\n ssid = self._get_ssid_from_settings(settings)\n if ssid:\n timestamp = settings['connection'].get('timestamp')\n wifis.append(\n SystemWifiInfo(\n ssid=ssid,\n last_connected=timestamp and datetime.utcfromtimestamp(timestamp),\n )\n )\n return wifis\n\n def _get_ssid_from_settings(self, settings):\n wireless_settings = settings.get('802-11-wireless') or {}\n raw_ssid = wireless_settings.get('ssid')\n ssid = raw_ssid and decode_ssid(raw_ssid)\n return ssid\n\n\ndef decode_ssid(raw_ssid: List[int]) -> Optional[str]:\n \"\"\"SSIDs are binary strings, but we need something to show to the user.\"\"\"\n # Best effort UTF-8 decoding, as most SSIDs are UTF-8 (or even ASCII)\n str_ssid = bytes(raw_ssid).decode('utf-8', 'surrogateescape')\n if str_ssid.isprintable():\n return str_ssid\n else:\n return ''.join(c if c.isprintable() else ascii(c)[1:-1] for c in str_ssid)\n\n\nclass UnsupportedException(Exception):\n \"\"\"NetworkManager is not available\"\"\"\n\n\nclass DBusException(Exception):\n \"\"\"Failed to call a DBus method\"\"\"\n\n\nclass NetworkManagerDBusAdapter(QObject):\n \"\"\"Simple adapter to NetworkManager's DBus interface.\n This should be the only part of NM support that needs manual testing.\"\"\"\n\n BUS_NAME = 'org.freedesktop.NetworkManager'\n NM_PATH = '/org/freedesktop/NetworkManager'\n\n def __init__(self, parent, bus):\n super().__init__(parent)\n self._bus = bus\n self._nm = self._get_iface(self.NM_PATH, 'org.freedesktop.NetworkManager')\n\n @classmethod\n def get_system_nm_adapter(cls) -> 'NetworkManagerDBusAdapter':\n bus = QtDBus.QDBusConnection.systemBus()\n if not bus.isConnected():\n raise UnsupportedException(\"Can't connect to system bus\")\n nm_adapter = cls(parent=None, bus=bus)\n if not nm_adapter.isValid():\n raise UnsupportedException(\"Can't connect to NetworkManager\")\n return nm_adapter\n\n def isValid(self):\n if not self._nm.isValid():\n return False\n nm_version = self._get_nm_version()\n if nm_version < QVersionNumber(1, 2):\n logger.warning(\n 'NetworkManager version 1.2 or later required, found %s',\n nm_version.toString(),\n )\n return False\n return True\n\n def get_primary_connection_path(self) -> Optional[str]:\n return read_dbus_property(self._nm, 'PrimaryConnection')\n\n def get_active_connection_info(self, active_connection_path) -> 'ActiveConnectionInfo':\n active_connection = self._get_iface(active_connection_path, 'org.freedesktop.NetworkManager.Connection.Active')\n return ActiveConnectionInfo(\n connection=read_dbus_property(active_connection, 'Connection'),\n type=read_dbus_property(active_connection, 'Type'),\n )\n\n def get_connections_paths(self) -> List[str]:\n settings_manager = self._get_iface(self.NM_PATH + '/Settings', 'org.freedesktop.NetworkManager.Settings')\n return get_result(settings_manager.call('ListConnections'))\n\n def get_settings(self, connection_path) -> Mapping[str, Mapping[str, Any]]:\n settings = self._get_iface(connection_path, 'org.freedesktop.NetworkManager.Settings.Connection')\n return get_result(settings.call('GetSettings'))\n\n def get_global_metered_status(self) -> 'NMMetered':\n return NMMetered(read_dbus_property(self._nm, 'Metered'))\n\n def _get_nm_version(self):\n version, _suffindex = QVersionNumber.fromString(read_dbus_property(self._nm, 'Version'))\n return version\n\n def _get_iface(self, path, interface) -> QtDBus.QDBusInterface:\n return QtDBus.QDBusInterface(self.BUS_NAME, path, interface, self._bus)\n\n\ndef read_dbus_property(obj, property):\n # QDBusInterface.property() didn't work for some reason\n props = QtDBus.QDBusInterface(obj.service(), obj.path(), 'org.freedesktop.DBus.Properties', obj.connection())\n msg = props.call('Get', obj.interface(), property)\n return get_result(msg)\n\n\ndef get_result(msg: QtDBus.QDBusMessage) -> Any:\n if msg.type() == msg.MessageType.ReplyMessage:\n return msg.arguments()[0]\n else:\n raise DBusException(\"DBus call failed: {}\".format(msg.arguments()))\n\n\nclass ActiveConnectionInfo(NamedTuple):\n connection: str\n type: str\n\n\nclass NMMetered(Enum):\n UNKNOWN = 0\n YES = 1\n NO = 2\n GUESS_YES = 3\n GUESS_NO = 4\n\n\nclass NMDeviceType(Enum):\n # Only the types we care about\n UNKNOWN = 0\n WIFI = 2\n","repo_name":"borgbase/vorta","sub_path":"src/vorta/network_status/network_manager.py","file_name":"network_manager.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":1730,"dataset":"github-code","pt":"95"} +{"seq_id":"72319671991","text":"from typing import Optional\n\nimport typer\nfrom typing_extensions import Annotated\n\n\ndef main(name: Annotated[Optional[str], typer.Argument()] = None):\n if name is None:\n print(\"Hello World!\")\n else:\n print(f\"Hello {name}\")\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"tiangolo/typer","sub_path":"docs_src/arguments/optional/tutorial002_an.py","file_name":"tutorial002_an.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":12521,"dataset":"github-code","pt":"95"} +{"seq_id":"42055131754","text":"#!/usr/bin/env python3\n\n''' Keithely source fixed current setting '''\nimport Gpib\nfrom time import sleep\n# Port GPIB 0, GPIB Intrument address 24\nins=Gpib.Gpib(0,24)\n# Reset GPIB defaults\nins.write(\"*RST\")\n# Identify request\nins.write(\"*IDN?\")\n# Read answer\nprint(ins.read().decode('utf-8'))\n''' Select source function, mode '''\n#Select current source.\nins.write(\":SOUR:FUNC CURR\")\n''' Set source range, level, compliance ''' \n# Select source range.\nins.write(\":SOUR:CURR:RANG 10E-3\")\n# Source output.\nins.write(\":SOUR:CURR:LEV 5E-3\")\n# 30V compliance.\nins.write(\":SENS:VOLT:PROT 30\")\n# Voltage measure function.\nins.write(\":SENS:FUNC 'VOLT'\")\n''' Set measure function, range ''' \n# 30V measure range.\nins.write(\":SENS:VOLT:RANG 30\")\n# Voltage reading only.\nins.write(\":FORM:ELEM VOLT\")\n# Turn on output\nins.write(\":OUTP ON\")\n# Read data, Trigger, acquire reading.\nins.write(\":READ?\")\n#print(ins.read())\n# Read answer\nV=float(ins.read())\nprint(\"Measured voltage: {0}V Resistance {1} ohm\".format(V, V/0.005))\n# wait in seconds\nsleep(10)\n# Turn off output\nins.write(\":OUTP OFF\")\n","repo_name":"cliagit/spin-lab","sub_path":"Esercizi/keithley_2400_source_fixed_current.py","file_name":"keithley_2400_source_fixed_current.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24808855804","text":"##\n##\nimport pexpect\n\nclass RouteServer:\n\t'''This class will instantiate an object that\n\twe will use to connect via pexpect to a BGP\n\troute server of our choosing...\n\t'''\n\tdef __init__(self, routeServer):\n\t\t'''This is my constructor'''\n\t\tself.routeServer = routeServer\n\n\tdef telnetConnect(self):\n\t\t'''This is my method!'''\n\t\tself.session = pexpect.spawn('telnet ' + self.routeServer)\n\t\t#session = pexpect.spawn('telnet' + routeServer)\n\t\t#session.sendline('\\n')\n\t\tself.session.sendline('\\n')\n\t\t#session.expect(['>'])\n\t\tself.session.expect(['>'])\n\t\t#session.interact()\n\t\tself.session.interact()\n\noptus = RouteServer('203.202.125.6')\noptus.telnetConnect()","repo_name":"mkseth4774/ine-guide-to-network-programmability-python-course-files","sub_path":"RouteServer.py","file_name":"RouteServer.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12719678257","text":"# -*-coding:utf-8 -*-\nimport urllib.request\nimport json\nimport datetime\nfrom db.mongodb import mongodb\n\n\ndef getURLDatas(beforeDate):\n url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_timeline?src=web&before=\" + beforeDate + \"T00%3A01%3A00.175Z&limit=200&type=post&category=5562b415e4b00c57d9b94ac8\"\n res = urllib.request.urlopen(url)\n datas = res.read()\n jsonObj = json.loads(datas)\n list = []\n set = {}\n articles = mongodb.initCollection('articles')\n for item in jsonObj['d'][\"entrylist\"]:\n result = articles.find({\"id\": item[\"objectId\"]})\n # print(item[\"createdAt\"])\n # print(result.count())\n if result.count() == 0:\n articles.insert({\"columnid\": \"BJcTbyZoW\", \"brief\": item[\"summaryInfo\"],\n \"title\": item[\"title\"],\n \"link\": item[\"originalUrl\"],\n \"id\": item[\"objectId\"],\n \"publishtime\":datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d %H:%M:%S\"),\n \"author\":\"crawler\"})\n\n\nnow = datetime.datetime.now()\ndstr = \"2017-01-01\"\ndtime = datetime.datetime.strptime(dstr, \"%Y-%m-%d\")\nwhile dtime < now:\n print(dstr)\n getURLDatas(dstr)\n dtime = datetime.datetime.strptime(dstr, \"%Y-%m-%d\")\n dtime = dtime + datetime.timedelta(days=1)\n dstr = datetime.datetime.strftime(dtime, \"%Y-%m-%d\")\n","repo_name":"jinshw/tutorial","sub_path":"juejin.py","file_name":"juejin.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26694634517","text":"from googleapiclient.discovery import build\nfrom urllib.request import urlretrieve\nfrom configparser import ConfigParser\nimport argparse\n\n\nclass Uthumer:\n _api = None\n settings = ConfigParser()\n settings.read('settings.ini')\n\n @property\n def api(self):\n if not self._api:\n api_params = {'developerKey': self.settings['youtube']['API_KEY'],\n 'serviceName': 'youtube',\n 'version': 'v3'}\n self._api = build(**api_params)\n return self._api\n\n def get_thumbs(self, video_id):\n request_params = {'id': video_id,\n 'part': 'snippet',\n 'fields': 'items(snippet(thumbnails))'}\n request = self.api.videos().list(**request_params)\n response = request.execute()\n thumbs = response['items'][0]['snippet']['thumbnails']\n return thumbs\n\n def get_largest_thumb_url(self, thumbs):\n for size in self.settings['youtube']['SIZES'].split(','):\n if size in thumbs:\n thumb_url = thumbs[size]['url']\n return thumb_url\n return None\n\n def download_thumb(self, video_id, path='wallpaper.jpg'):\n thumbs = self.get_thumbs(video_id)\n thumb_url = self.get_largest_thumb_url(thumbs)\n urlretrieve(thumb_url, path)\n\n def get_video_id(self, url):\n sanitized_url = url.strip('/')\n if 'youtu.be' in sanitized_url:\n id = sanitized_url.split('youtu.be/')[1]\n else:\n id = sanitized_url.split('watch?v=')[1]\n return id\n\nif __name__ == '__main__':\n uthumer = Uthumer()\n parser = argparse.ArgumentParser(description=\"Downloads a Youtube video thumbnail.\")\n parser.add_argument('url',\n help='Youtube video URL',\n default=None)\n parser_args = parser.parse_args()\n url = parser_args.url\n if url:\n video_id = uthumer.get_video_id(url)\n uthumer.download_thumb(video_id)\n","repo_name":"izdwuut/uthumer","sub_path":"uthumer.py","file_name":"uthumer.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"75173506551","text":"import pandas as pd\nfrom glob import glob\nimport json\n\nconfig_path = \"config.json\"\n\n# --------------------------------------------------------------\n# Load configuration settings and build the path to the files\n# --------------------------------------------------------------\nwith open(config_path, \"r\") as config_file:\n config = json.load(config_file)\n\nfiles_path = config[\"files_path\"]\nfile_pattern = config[\"file_pattern\"]\nfull_path_pattern = files_path + \"*\" + file_pattern\n\n# --------------------------------------------------------------\n# Load raw data\n# --------------------------------------------------------------\nfiles = glob(full_path_pattern)\n\n\ndef read_data_from_files(files):\n \"\"\"_summary_\n\n Args:\n files (_type_): _description_\n\n Returns:\n _type_: _description_\n \"\"\"\n\n acc_df = pd.DataFrame()\n gyr_df = pd.DataFrame()\n\n acc_set = 1\n gyr_set = 1\n\n for f in files:\n participant = f.split(\"-\")[0][-1]\n label = f.split(\"-\")[1]\n category = f.split(\"-\")[2].split(\"_\")[0].rstrip(\"123\")\n\n df = pd.read_csv(f)\n df[\"participant\"] = participant\n df[\"label\"] = label\n df[\"category\"] = category\n\n if \"Accelerometer\" in f:\n df[\"set\"] = acc_set\n acc_set += 1\n acc_df = pd.concat([acc_df, df])\n\n if \"Gyroscope\" in f:\n df[\"set\"] = gyr_set\n gyr_set += 1\n gyr_df = pd.concat([gyr_df, df])\n\n acc_df.index = pd.to_datetime(acc_df[\"epoch (ms)\"], unit=\"ms\")\n gyr_df.index = pd.to_datetime(gyr_df[\"epoch (ms)\"], unit=\"ms\")\n\n acc_df.drop(columns=[\"epoch (ms)\", \"time (01:00)\", \"elapsed (s)\"], inplace=True)\n gyr_df.drop(columns=[\"epoch (ms)\", \"time (01:00)\", \"elapsed (s)\"], inplace=True)\n\n return acc_df, gyr_df\n\n\nacc_df, gyr_df = read_data_from_files(files)\n\n# --------------------------------------------------------------\n# Merging datasets\n# --------------------------------------------------------------\npd.concat([acc_df, gyr_df], axis=1)\n\n# Rename columns\ndata_merged = pd.concat([acc_df.iloc[:, :3], gyr_df], axis=1)\ndata_merged.columns = [\n \"acc_x\",\n \"acc_y\",\n \"acc_z\",\n \"gyr_x\",\n \"gyr_y\",\n \"gyr_z\",\n \"participant\",\n \"label\",\n \"category\",\n \"set\",\n]\n\n# --------------------------------------------------------------\n# Resample data (frequency conversion)\n# --------------------------------------------------------------\n\n# Accelerometer: 12.500HZ\n# Gyroscope: 25.000Hz\n\nsampling = {\n \"acc_x\": \"mean\",\n \"acc_y\": \"mean\",\n \"acc_z\": \"mean\",\n \"gyr_x\": \"mean\",\n \"gyr_y\": \"mean\",\n \"gyr_z\": \"mean\",\n \"participant\": \"last\",\n \"label\": \"last\",\n \"category\": \"last\",\n \"set\": \"last\",\n}\n\n# Split by day\ndays = [g for n, g in data_merged.groupby(pd.Grouper(freq=\"D\"))]\ndata_resampled = pd.concat(\n df.resample(rule=\"200ms\").apply(sampling).dropna() for df in days\n)\n\ndata_resampled[\"set\"] = data_resampled[\"set\"].astype(\"int\")\n\n# --------------------------------------------------------------\n# Export dataset\n# --------------------------------------------------------------\ndata_resampled.to_pickle(\"data/interim/01_dataset.pkl\")\n","repo_name":"MaxManchenko/tracking_barbell_exercises","sub_path":"src/data/01_make_dataset_from_raw_files.py","file_name":"01_make_dataset_from_raw_files.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20580248483","text":"import os\nfrom redis.cluster import RedisCluster\nimport base64\nfrom io import BytesIO\nfrom PIL import Image\n\nhost = 'image-cluster-0001-001.wexg8l.0001.use1.cache.amazonaws.com' #subject to change\nport = 6379\nimg_path = ''\n\ndef main():\n\n rc = RedisCluster(\n host=host,\n port=port,\n decode_responses = True,\n skip_full_coverage_check=True,\n )\n\n for filename in os.listdir(img_path):\n with Image.open(os.path.join(img_path, filename)) as img:\n buffer = BytesIO()\n img.save(buffer, 'JPEG')\n img_str = base64.b64encode(buffer.getvalue())\n rc.set(filename, img_str)\n\n rc.close()\n\nmain()\n","repo_name":"daBatmanCoder/AWSproject","sub_path":"milestones/ex6/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"22033744252","text":"from collections.abc import (\n Callable,\n Mapping,\n Sequence,\n)\nfrom typing import (\n Any,\n Optional,\n)\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom reconcile.gql_definitions.fragments.ocm_environment import OCMEnvironment\nfrom reconcile.gql_definitions.ocm_labels.clusters import ClusterV1\nfrom reconcile.ocm_labels.integration import (\n ClusterSubscriptionLabelSource,\n OcmLabelsIntegration,\n OcmLabelsIntegrationParams,\n init_cluster_subscription_label_source,\n)\nfrom reconcile.ocm_labels.label_sources import (\n ClusterRef,\n LabelState,\n)\nfrom reconcile.test.fixtures import Fixtures\nfrom reconcile.test.ocm.fixtures import build_ocm_cluster\nfrom reconcile.test.ocm.test_utils_ocm_labels import build_subscription_label\nfrom reconcile.utils.helpers import flatten\nfrom reconcile.utils.ocm.base import (\n ClusterDetails,\n OCMCapability,\n build_label_container,\n)\nfrom reconcile.utils.ocm_base_client import OCMBaseClient\nfrom reconcile.utils.secret_reader import SecretReader\n\n\n@pytest.fixture\ndef fx() -> Fixtures:\n return Fixtures(\"ocm_labels\")\n\n\n@pytest.fixture\ndef ocm_labels(\n secret_reader: SecretReader,\n mocker: MockerFixture,\n ocm_base_client: OCMBaseClient,\n) -> OcmLabelsIntegration:\n mocker.patch.object(OcmLabelsIntegration, \"secret_reader\", secret_reader)\n intg = OcmLabelsIntegration(\n OcmLabelsIntegrationParams(managed_label_prefixes=[\"my-label-prefix\"])\n )\n intg.ocm_apis = {\n \"ocm-prod\": ocm_base_client,\n \"ocm-stage\": ocm_base_client,\n }\n return intg\n\n\n@pytest.fixture\ndef cluster_query_func(\n fx: Fixtures,\n data_factory: Callable[[type[ClusterV1], Mapping[str, Any]], Mapping[str, Any]],\n) -> Callable:\n def q(*args: Any, **kwargs: Any) -> dict[Any, Any]:\n return {\n \"clusters\": [\n data_factory(ClusterV1, c)\n for c in fx.get_anymarkup(\"clusters.yml\")[\"clusters\"]\n ]\n }\n\n return q\n\n\n@pytest.fixture\ndef clusters(\n ocm_labels: OcmLabelsIntegration, cluster_query_func: Callable\n) -> list[ClusterV1]:\n return ocm_labels.get_clusters(cluster_query_func)\n\n\n@pytest.fixture\ndef ocm_base_client(mocker: MockerFixture) -> OCMBaseClient:\n return mocker.create_autospec(spec=OCMBaseClient)\n\n\n@pytest.fixture\ndef envs(gql_class_factory: Callable) -> list[OCMEnvironment]:\n return [\n gql_class_factory(\n OCMEnvironment,\n {\n \"name\": \"ocm-prod\",\n \"accessTokenClientSecret\": {\n \"field\": \"client_secret\",\n \"path\": \"path/to/client_secret\",\n },\n },\n ),\n gql_class_factory(\n OCMEnvironment,\n {\n \"name\": \"ocm-stage\",\n \"accessTokenClientSecret\": {\n \"field\": \"client_secret\",\n \"path\": \"path/to/client_secret\",\n },\n },\n ),\n ]\n\n\n@pytest.fixture\ndef build_cluster_details() -> Callable:\n def _(\n name: str = \"cluster_name\",\n org_id: str = \"org_id\",\n subs_labels: Optional[list[tuple[str, str]]] = None,\n ) -> ClusterDetails:\n ocm_cluster = build_ocm_cluster(name, subs_id=f\"{name}-sub-id\")\n return ClusterDetails(\n ocm_cluster=ocm_cluster,\n organization_id=org_id,\n organization_labels=build_label_container([]),\n subscription_labels=build_label_container(\n [\n build_subscription_label(k, v, ocm_cluster.subscription.id)\n for k, v in subs_labels or []\n ],\n ),\n capabilities={\n \"foo\": OCMCapability(name=\"foo\", value=\"bar\"),\n },\n )\n\n return _\n\n\n@pytest.fixture\ndef ocm_clusters(build_cluster_details: Callable) -> list[ClusterDetails]:\n return [\n build_cluster_details(\n name=\"cluster-1\",\n org_id=\"org-id-1\",\n subs_labels=[\n (\"my-label-prefix.to-be-removed\", \"enabled\"),\n (\"my-label-prefix.to-be-changed\", \"disabled\"),\n (\"do-not-touch\", \"enabled\"),\n ],\n ),\n build_cluster_details(\n name=\"cluster-2\",\n org_id=\"org-id-2\",\n subs_labels=[\n (\"another-do-not-touch-attribute\", \"something-else\"),\n ],\n ),\n build_cluster_details(\n name=\"cluster-3\",\n org_id=\"org-id-2\",\n subs_labels=[\n (\"my-label-prefix.to-be-removed\", \"enabled\"),\n ],\n ),\n ]\n\n\n@pytest.fixture\ndef subscription_label_current_state(\n ocm_clusters: Sequence[ClusterDetails],\n) -> LabelState:\n return {\n ClusterRef(\n cluster_id=ocm_clusters[0].ocm_cluster.id,\n org_id=ocm_clusters[0].organization_id,\n ocm_env=\"ocm-prod\",\n name=ocm_clusters[0].ocm_cluster.name,\n label_container_href=f\"{ocm_clusters[0].ocm_cluster.subscription.href}/labels\",\n ): {\n \"my-label-prefix.to-be-changed\": \"disabled\",\n \"my-label-prefix.to-be-removed\": \"enabled\",\n },\n ClusterRef(\n cluster_id=ocm_clusters[1].ocm_cluster.id,\n org_id=ocm_clusters[1].organization_id,\n ocm_env=\"ocm-stage\",\n name=ocm_clusters[1].ocm_cluster.name,\n label_container_href=f\"{ocm_clusters[1].ocm_cluster.subscription.href}/labels\",\n ): {},\n ClusterRef(\n cluster_id=ocm_clusters[2].ocm_cluster.id,\n org_id=ocm_clusters[2].organization_id,\n ocm_env=\"ocm-stage\",\n name=ocm_clusters[2].ocm_cluster.name,\n label_container_href=f\"{ocm_clusters[2].ocm_cluster.subscription.href}/labels\",\n ): {\n \"my-label-prefix.to-be-removed\": \"enabled\",\n },\n }\n\n\n@pytest.fixture\ndef cluster_file_subscription_label_source(\n clusters: list[ClusterV1],\n ocm_labels: OcmLabelsIntegration,\n) -> ClusterSubscriptionLabelSource:\n return init_cluster_subscription_label_source(clusters)\n\n\n@pytest.fixture\ndef subscription_label_desired_state(\n clusters: Sequence[ClusterV1],\n) -> LabelState:\n desired: LabelState = {\n ClusterRef(\n cluster_id=cluster.spec.q_id,\n org_id=cluster.ocm.org_id,\n ocm_env=cluster.ocm.environment.name,\n name=cluster.name,\n label_container_href=None,\n ): flatten(cluster.ocm_subscription_labels or {})\n for cluster in clusters\n if cluster.spec and cluster.spec.q_id and cluster.ocm # mypy again :(\n }\n if len(clusters) != len(desired):\n raise RuntimeError(\"not all clusers had spec and ocm. should not happen\")\n return desired\n","repo_name":"app-sre/qontract-reconcile","sub_path":"reconcile/test/ocm_labels/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"97"} +{"seq_id":"32495606132","text":"import pandas as pd\n\nfrom env import get_connection\n\n################################################################# acquire main function#################################################################\n\ndef get_telco_data():\n \n db_url = get_connection('telco_churn')\n\n query = '''\n SELECT \n customers.customer_id,\n customers.gender,\n customers.senior_citizen,\n customers.partner,\n customers.dependents,\n customers.tenure,\n customers.phone_service,\n customers.multiple_lines,\n customers.online_security,\n customers.online_backup,\n customers.device_protection,\n customers.tech_support,\n customers.streaming_tv,\n customers.streaming_movies,\n customers.paperless_billing,\n customers.monthly_charges,\n customers.total_charges,\n customers.churn,\n payment_types.payment_type,\n contract_types.contract_type,\n internet_service_types.internet_service_type\n FROM\n customers\n LEFT JOIN\n customer_details ON customer_details.customer_id = customers.customer_id\n LEFT JOIN\n customer_contracts ON customer_contracts.customer_id = customer_details.customer_id\n LEFT JOIN\n customer_payments ON customer_payments.customer_id = customer_contracts.customer_id\n LEFT JOIN\n customer_signups ON customer_signups.customer_id = customer_payments.customer_id\n LEFT JOIN\n customer_subscriptions ON customer_subscriptions.customer_id = customer_signups.customer_id\n LEFT JOIN\n customer_churn ON customer_churn.customer_id = customer_subscriptions.customer_id\n LEFT JOIN\n payment_types ON payment_types.payment_type_id = customers.payment_type_id\n LEFT JOIN\n contract_types ON contract_types.contract_type_id = customer_contracts.contract_type_id\n LEFT JOIN\n internet_service_types ON internet_service_types.internet_service_type_id = customers.internet_service_type_id;\n '''\n\n #read sql query into a dataframe\n telco_df = pd.read_sql(query, db_url)\n\n #replace all the total charges rows with no values with 0\n telco_df['total_charges'] = telco_df['total_charges'].replace(' ', 0)\n\n #replacing all the no internet service to No\n telco_df['multiple_lines'] = telco_df['multiple_lines'].replace('No phone service', 'No')\n telco_df['online_security'] = telco_df['online_security'].replace('No phone service', 'No')\n telco_df['online_backup'] = telco_df['online_backup'].replace('No phone service', 'No')\n telco_df['device_protection'] = telco_df['device_protection'].replace('No phone service', 'No')\n telco_df['tech_support'] = telco_df['tech_support'].replace('No phone service', 'No')\n telco_df['streaming_movies'] = telco_df['streaming_movies'].replace('No phone service', 'No')\n telco_df['streaming_tv'] = telco_df['streaming_tv'].replace('No phone service', 'No')\n\n #grouping all the value counts in payment type into manual and electronic payments\n telco_df.loc[telco_df['payment_type'].str.contains('automatic', case=False), 'payment_type'] = 'Automatic Payment'\n telco_df.loc[telco_df['payment_type'].str.contains('check', case=False), 'payment_type'] = 'Manual Payment'\n\n #grouping all the value counts in contract type into monthly and yearly contracts\n telco_df.loc[telco_df['contract_type'].str.contains('year', case=False), 'contract_type'] = 'one/two-years'\n\n #changing all the values in churn to boolean\n telco_df['churn'] = telco_df['churn'].replace('Yes', True)\n telco_df['churn'] = telco_df['churn'].replace('No', False)\n\n # Convert \"total_charges\" to numeric (this will handle any non-numeric values)\n telco_df['total_charges'] = pd.to_numeric(telco_df['total_charges'], errors='coerce')\n # Replace any NaN values with 0 (or any other suitable value)\n telco_df['total_charges'].fillna(0, inplace=True)\n # Convert \"total_charges\" to integer\n telco_df['total_charges'] = telco_df['total_charges'].astype(int)\n \n return telco_df\n\n\n#################################### encoding and cleaning data columns #######################################################################\n\ndef encode_columns(train, val, test):\n # One-hot encoding categorical columns with get_dummies\n train = pd.get_dummies(train, columns=[\n 'gender', 'senior_citizen', 'partner', 'dependents', 'phone_service',\n 'multiple_lines', 'online_security', 'online_backup', 'device_protection',\n 'tech_support', 'streaming_tv', 'streaming_movies', 'payment_type',\n 'contract_type', 'internet_service_type', 'paperless_billing'\n ], drop_first=True)\n \n val = pd.get_dummies(val, columns=[\n 'gender', 'senior_citizen', 'partner', 'dependents', 'phone_service',\n 'multiple_lines', 'online_security', 'online_backup', 'device_protection',\n 'tech_support', 'streaming_tv', 'streaming_movies', 'payment_type',\n 'contract_type', 'internet_service_type', 'paperless_billing'\n ], drop_first=True)\n \n test = pd.get_dummies(test, columns=[\n 'gender', 'senior_citizen', 'partner', 'dependents', 'phone_service',\n 'multiple_lines', 'online_security', 'online_backup', 'device_protection',\n 'tech_support', 'streaming_tv', 'streaming_movies', 'payment_type',\n 'contract_type', 'internet_service_type', 'paperless_billing'\n ], drop_first=True)\n\n # Drop columns with 'No internet service' and 'internet_service_type_None'\n cols_to_drop = train.columns[train.columns.str.contains('No internet service')]\n train = train.drop(columns=cols_to_drop)\n train = train.drop(columns=['internet_service_type_None'])\n train = train.rename(columns={'gender_male': 'gender'})\n\n cols_to_drop = val.columns[val.columns.str.contains('No internet service')]\n val = val.drop(columns=cols_to_drop)\n val = val.drop(columns=['internet_service_type_None'])\n val = val.rename(columns={'gender_male': 'gender'})\n\n cols_to_drop = test.columns[test.columns.str.contains('No internet service')]\n test = test.drop(columns=cols_to_drop)\n test = test.drop(columns=['internet_service_type_None'])\n test = test.rename(columns={'gender_male': 'gender'})\n\n # Rename columns by removing \"_Yes\"\n columns_to_rename = [\n \"partner_Yes\", \"dependents_Yes\", \"phone_service_Yes\", \n \"multiple_lines_Yes\", \"online_security_Yes\", \"online_backup_Yes\", \n \"device_protection_Yes\", \"tech_support_Yes\", \"streaming_tv_Yes\", \n \"streaming_movies_Yes\", \"paperless_billing_Yes\"\n ]\n for col in columns_to_rename:\n new_name = col.replace(\"_Yes\", \"\")\n train.rename(columns={col: new_name}, inplace=True)\n val.rename(columns={col: new_name}, inplace=True)\n test.rename(columns={col: new_name}, inplace=True)\n\n # Rename columns based on provided replacements\n columns_to_rename = [\n \"senior_citizen_1\", \"payment_type_Manual Payment\", \n \"contract_type_one/two-years\", \"internet_service_type_Fiber optic\"\n ]\n for col in columns_to_rename:\n new_name = col.replace(\"senior_citizen_1\", \"senior_citizen\").replace(\"payment_type_Manual Payment\",\n \"payment_type\").replace(\"contract_type_one/two-years\", \n \"contract_type\").replace(\"internet_service_type_Fiber optic\", \n \"internet_service_type\")\n train.rename(columns={col: new_name}, inplace=True)\n val.rename(columns={col: new_name}, inplace=True)\n test.rename(columns={col: new_name}, inplace=True)\n \n return train, val, test\n","repo_name":"TheodoreQuansah/Telco_churn_classification_project","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"7816601899","text":"#/usr/bin/python\r\nimport os, sys\r\nfrom PIL import Image\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nimport time\r\nbinary = FirefoxBinary('C:\\Program Files (x86)\\Mozilla Firefox\\Firefox.exe')\r\nbrowser = webdriver.Firefox(firefox_binary=binary)\r\n\r\ndef find_between_r( s, first, last ):\r\n try:\r\n start = s.rindex( first ) + len( first )\r\n end = s.rindex( last, start )\r\n return s[start:end]\r\n except ValueError:\r\n return \"\"\r\n\r\ndef resizeImg(imgArr):\r\n print(\"resize begin\")\r\n for index, imageList in enumerate(imgArr):\r\n try:\r\n print(\"proccesess picture: \" + str(index) + \"| of:\" + str(len(imgArr)))\r\n img = Image.open(imageList[\"ImagePath\"])\r\n oldSize = img.size\r\n new_width = int(imageList[\"ImageWidth\"])\r\n new_height = int(imageList[\"ImageHeight\"])\r\n oldWidth = int(oldSize[0])\r\n oldHeight = int(oldSize[1])\r\n print(\"image path:\")\r\n print(imageList[\"ImagePath\"])\r\n if ( oldWidth>new_width and oldHeight>new_height ):\r\n print(\"image resizing...\")\r\n img = img.resize((new_width, new_height), Image.ANTIALIAS)\r\n img.save(imageList[\"ImagePath\"],optimize=True,quality=85)\r\n print(\"image resized from:\")\r\n print(str(oldWidth)+\" x \"+str(oldHeight))\r\n print(\"to:\")\r\n print(str(new_width)+\" x \"+str(new_height))\r\n except Exception:\r\n print(\"File not found\")\r\n pass\r\n\r\ndef GetImageArr (link):\r\n for index, article in enumerate(link):\r\n print(\"proccesess article: \" + str(index) + \"| of:\" + str(len(link)))\r\n print(\"article url: \" + str(article))\r\n browser.get(article)\r\n browser.maximize_window()\r\n time.sleep(2)\r\n elem = browser.find_elements_by_xpath('//img')\r\n resultArr = []\r\n for ii in elem:\r\n Image = ii\r\n artLink = Image.get_attribute('src')\r\n artLink = artLink.split('?', 1)[0]\r\n ImageWidth = str(Image.get_property(\"width\"))\r\n ImageHeight = str(Image.get_property(\"height\"))\r\n ImageLink = artLink\r\n ImageName = find_between_r(Image.get_attribute('src'),\"/\",\"?\")\r\n ImagePath = \"D:\" + artLink.replace(\"http://sampleside-cdn.com\", \"\")\r\n dict = {\"ImageWidth\":ImageWidth,\"ImageHeight\":ImageHeight,\"ImageLink\":ImageLink,\"ImageName\":ImageName,\"ImagePath\":ImagePath}\r\n resultArr.append(dict)\r\n del dict;\r\n resizeImg(resultArr)\r\n\r\n\r\ndef linkCycle( linkFile ):\r\n with open(linkFile) as f:\r\n linkList = f.readlines()\r\n imageArr = GetImageArr(linkList)\r\n print(linkList[12])\r\nlinkCycle(\"linklist.txt\")","repo_name":"vladrogovsky/pythonimgresizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"23565282303","text":"\"\"\"def hey(*values):\n print(\"first:\"+ values[0]+ \"second:\"+values[1])\ndef hello():\n print(\"hello\")\n\nvalue=\"trash\"\nhey(\"Sachu\",\"Hey\")\n\nls=[\"kfjodskfjsojc\",\"maangandi\"]\nls.append(\"he\")\ntp=(\"hh,d \",\"slhioshf\",\"sfhsfhsjhfsd\",2)\"\"\"\n\n\n\n\"\"\"value=10\n\ndef sample():\n value=30\n print(value)\nsample()\nprint(value)\"\"\"\n\n\"\"\"def sample(name,age=20):\n print(name,age)\n\n\nsample(name=\"TRASH\")\"\"\"\n\ndef sample(num1,num2):\n sum=num1+num2\n return sum\n\nresult=sample(10,15)\nprint(result)\n\n","repo_name":"trashz403/PYTHON-BAXICS","sub_path":"funcitonsample.py","file_name":"funcitonsample.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"33377189073","text":"import socket\nimport os\n\nusing = ' '\n\n\ndef register1(use_for='for socket transparent'):\n def register_port(func):\n def writing_port(*args):\n result = func(*args)\n if not os.path.exists('/home/mayinghao/port.json'):\n os.mknod('/home/mayinghao/port.json')\n\n with open('/home/mayinghao/port.json', 'a+') as f:\n end = f.tell()\n f.seek(0)\n lines = f.readlines()\n f.seek(end)\n print(lines)\n print([x for x in lines if x.startswith(str(args[0]))])\n if [x for x in lines if x.startswith(str(args[0]))]:\n pass\n else:\n f.write(str(args[0])+':' + use_for+'\\n')\n\n return result\n return writing_port\n return register_port\n\n\ndef receive_port(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n host = socket.gethostname()\n s.bind((host, port))\n s.listen(5)\n s1, address = s.accept()\n while True:\n get_str = s1.recv(2000).decode('utf-8')\n yield get_str\n try:\n s1.send('hello,world'.encode('utf-8'))\n except BrokenPipeError:\n\n s1.close()\n break\n\n\ndef send_socket(port, *args):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = socket.gethostname()\n port = port\n client.connect((host, port))\n index = 0\n while True:\n client.send(str(args[index]).encode('utf-8'))\n d = client.recv(2000).decode('utf-8')\n index += 1\n yield d\n if index == 2:\n break\n\n\n","repo_name":"mythmyh/bili","sub_path":"bilibili/bilibili/spiders/package_socket.py","file_name":"package_socket.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"30375824263","text":"import sys\n\nsys.path.append('./')\nimport os\n\nimport faiss\nimport numpy as np\nfrom arguments import IndexArguments, DataArguments, ModelArguments, TrainingArguments\nfrom transformers import HfArgumentParser\nfrom evaluate import validate, load_test_data\n\nfrom LibVQ.base_index import FaissIndex\n\nfaiss.omp_set_num_threads(32)\n\nif __name__ == '__main__':\n parser = HfArgumentParser((IndexArguments, DataArguments, ModelArguments, TrainingArguments))\n index_args, data_args, model_args, training_args = parser.parse_args_into_dataclasses()\n\n # Load embeddings\n emb_size = 768\n doc_embeddings = np.memmap(os.path.join(data_args.embeddings_dir, 'docs.memmap'),\n dtype=np.float32, mode=\"r\")\n doc_embeddings = doc_embeddings.reshape(-1, emb_size)\n query_embeddings = np.memmap(os.path.join(data_args.embeddings_dir, 'test-queries.memmap'),\n dtype=np.float32, mode=\"r\")\n query_embeddings = query_embeddings.reshape(-1, emb_size)\n\n # Creat Faiss index\n index = FaissIndex(index_method=index_args.index_method,\n emb_size=len(doc_embeddings[0]),\n ivf_centers_num=index_args.ivf_centers_num,\n subvector_num=index_args.subvector_num,\n subvector_bits=index_args.subvector_bits,\n dist_mode=index_args.dist_mode)\n\n print('Training the index with doc embeddings')\n # index.CPU_to_GPU(0)\n index.fit(doc_embeddings)\n index.add(doc_embeddings)\n # index.GPU_to_CPU()\n\n index_file = os.path.join(data_args.embeddings_dir,\n f'{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index')\n index.save_index(index_file)\n # index.load_index(index_file)\n\n # Test the performance\n scores, ann_items = index.search(query_embeddings, topk=100, nprobe=index_args.nprobe)\n test_questions, test_answers, collections = load_test_data(\n query_andwer_file='./data/NQ/raw_dataset/nq-test.qa.csv',\n collections_file='./data/NQ/dataset/collection.tsv')\n validate(ann_items, test_questions, test_answers, collections)\n","repo_name":"staoxiao/LibVQ","sub_path":"examples/NQ/basic_index/faiss_index.py","file_name":"faiss_index.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"97"} +{"seq_id":"8457793032","text":"\"\"\"\nFuzzy set abstractions and utilities\n\"\"\"\n\nfrom math import ceil\nfrom typing import Callable\n\nfrom yvain.logical_systems import LogicalSystem, Zadeh\nfrom yvain.membership_functions import MembershipFunction\n\n\ndef _integrate(function: Callable[[float, ], float],\n start: float, end: float, n: int) -> float:\n \"\"\"\n Calculate defined integral of given `function` in range of\n [start, end]\n\n :param function: Function to integrate\n :param start: Left bound\n :param end: Right bound\n :param n: Number of parabolas used to compute integral\n :raise ValueError: When n is not even\n :raise ValueError: When start is greater or equal to end\n :return: Field of area under given `function`\n \"\"\"\n if n % 2 != 0:\n raise ValueError(\"In Simpson rule n have to be even\")\n if start >= end:\n raise ValueError(\n f\"Upper bound ({end}) is lesser or equal to lower ({start})\")\n\n step = (end - start) / n\n\n _sum = function(start) + function(end)\n for i in range(1, n):\n if i % 2 == 0:\n _sum += 2 * function(start + i * step)\n else:\n _sum += 4 * function(start + i * step)\n\n return (step / 3) * _sum\n\n\nclass FuzzySet:\n \"\"\"\n Fuzzy set if extension of classical one where each element\n is described by degree of membership instead of classical `member/not a member`\n \"\"\"\n\n def membership(self, x: float) -> float:\n \"\"\"\n Membership describes in what degree given element belongs to fuzzy set.\n When membership is equal to 1 we can say that element is fully member of\n fuzzy set. Analogously when membership is equal to 0 then given element is\n not a member of fuzzy set.\n\n :param x: We are searching degree of truth of this element\n :return: Degree of membership of given `x`\n \"\"\"\n\n return self.membership_function(x)\n\n def complement(self) -> 'FuzzySet':\n \"\"\"\n Negation operator. Fuzzy set resulting from this operation will have\n all memberships reversed - i.e. element with full membership will not belong\n to fuzzy set anymore and elements that was previously not in fuzzy set will\n gain full membership. Intermediate memberships will change proportionally\n\n :return: Complementary fuzzy set where all memberships are reversed\n \"\"\"\n\n return FuzzySet(self.logic.complement(self.membership_function),\n self.logic)\n\n def intersection(self, other_set: 'FuzzySet') -> 'FuzzySet':\n \"\"\"\n :param other_set: Second fuzzy set\n :return: Common part of `self` and `other_set`\n \"\"\"\n\n return FuzzySet(self.logic.t_norm(self.membership_function, other_set.membership_function),\n self.logic)\n\n def union(self, other_set: 'FuzzySet') -> 'FuzzySet':\n \"\"\"\n :param other_set: Second fuzzy set\n :return: Union of `self` and `other_set`\n \"\"\"\n\n return FuzzySet(\n self.logic.t_conorm(self.membership_function, other_set.membership_function),\n self.logic)\n\n __invert__ = complement\n __or__ = union\n __and__ = intersection\n\n def __init__(self, membership_function: MembershipFunction,\n logic: LogicalSystem = Zadeh()):\n \"\"\"\n :param membership_function: Function describing degree of membership of each element in domain\n :param logic: Norms used to perform intersection, union and negation\n \"\"\"\n\n self.membership_function = membership_function\n self.logic = logic\n\n\nDefuzzificationMethod = Callable[[FuzzySet, float, float], float]\n\n\ndef centroid(fuzzy_set: FuzzySet, start: float, end: float) -> float:\n \"\"\"\n :param fuzzy_set: Set to defuzzify\n :param start: Universe lowest value\n :param end: Universe highest value\n :return: Center of mass for given fuzzy set\n \"\"\"\n\n n = int(ceil(end - start)) * 100\n\n field = _integrate(fuzzy_set.membership, start, end, n)\n x_field = _integrate(lambda x: x * fuzzy_set.membership(x), start, end, n)\n\n return x_field / field\n","repo_name":"Parowicz/Yvain","sub_path":"yvain/fuzzy_set.py","file_name":"fuzzy_set.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"30043821812","text":"from PyQt5.Qt import *\nfrom PyQt5.QtWidgets import QInputDialog, QLineEdit, QDialog, QApplication\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n#Imports\nimport pandas as pd\nimport numpy as np\nimport boto3, io\nimport os\nimport datetime\nimport sys, traceback\nfrom sumWindow import Ui_Summer\nfrom dataTypeWindow import Ui_DataTypeWindow\nfrom melterWindow import Ui_Melter\nfrom nullFinderWindow import Ui_Unnull\nfrom whiteSpaceStripWindow import Ui_RemoveWhiteSpace\nfrom replacerWindow import Ui_Replacer\nfrom basicMathWindow import Ui_basicMathWindow\n\nclass Ui_DataSpanner(QMainWindow):\n\n \n def setupUi(self, DataSpanner):\n DataSpanner.setObjectName(\"DataSpanner\")\n DataSpanner.resize(1440, 783)\n self.centralwidget = QtWidgets.QWidget(DataSpanner)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 731, 731))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.Outputs = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.Outputs.setContentsMargins(0, 4, 0, 0)\n self.Outputs.setObjectName(\"Outputs\")\n self.Outputs_Label = QtWidgets.QLabel(self.verticalLayoutWidget)\n \n font = QtGui.QFont()\n font.setFamily(\"Avenir\")\n font.setPointSize(20)\n self.Outputs_Label.setFont(font)\n self.Outputs_Label.setAlignment(QtCore.Qt.AlignCenter)\n self.Outputs_Label.setObjectName(\"Outputs_Label\")\n self.Outputs.addWidget(self.Outputs_Label)\n self.CodeSwitch = QtWidgets.QTabWidget(self.verticalLayoutWidget)\n self.CodeSwitch.setEnabled(True)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.CodeSwitch.sizePolicy().hasHeightForWidth())\n self.CodeSwitch.setSizePolicy(sizePolicy)\n self.CodeSwitch.setObjectName(\"CodeSwitch\")\n \n #Pandas outputs area\n self.Pandas = QtWidgets.QWidget()\n self.Pandas.setObjectName(\"Pandas\")\n self.PandasScrollArea = QtWidgets.QScrollArea(self.Pandas)\n self.PandasScrollArea.setGeometry(QtCore.QRect(0, 0, 731, 321))\n self.PandasScrollArea.setWidgetResizable(True)\n self.PandasScrollArea.setObjectName(\"PandasScrollArea\")\n self.PandasScrollAreaWidget = QtWidgets.QWidget()\n self.PandasScrollAreaWidget.setGeometry(QtCore.QRect(0, 0, 729, 319))\n self.PandasScrollAreaWidget.setObjectName(\"PandasScrollAreaWidget\")\n self.PandasCode = QtWidgets.QPlainTextEdit(self.PandasScrollAreaWidget)\n self.PandasCode.setGeometry(QtCore.QRect(0, 0, 731, 321))\n font = QtGui.QFont()\n font.setFamily(\"Avenir\")\n font.setPointSize(12)\n self.PandasCode.setFont(font)\n self.PandasCode.setObjectName(\"PandasCode\")\n self.PandasScrollArea.setWidget(self.PandasScrollAreaWidget)\n self.CodeSwitch.addTab(self.Pandas, \"\")\n \n #PySpark outputs area\n self.PySpark = QtWidgets.QWidget()\n self.PySpark.setObjectName(\"PySpark\")\n self.PySparkScrollArea = QtWidgets.QScrollArea(self.PySpark)\n self.PySparkScrollArea.setGeometry(QtCore.QRect(0, 0, 731, 321))\n self.PySparkScrollArea.setWidgetResizable(True)\n self.PySparkScrollArea.setObjectName(\"PySparkScrollArea\")\n self.PySparkScrollAreaWidget = QtWidgets.QWidget()\n self.PySparkScrollAreaWidget.setGeometry(QtCore.QRect(0, 0, 729, 319))\n self.PySparkScrollAreaWidget.setObjectName(\"PySparkScrollAreaWidget\")\n self.PysparkCode = QtWidgets.QPlainTextEdit(self.PySparkScrollAreaWidget)\n self.PysparkCode.setGeometry(QtCore.QRect(10, 0, 691, 321))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.PysparkCode.sizePolicy().hasHeightForWidth())\n self.PysparkCode.setSizePolicy(sizePolicy)\n self.PysparkCode.setObjectName(\"PysparkCode\")\n self.PySparkScrollArea.setWidget(self.PySparkScrollAreaWidget)\n self.CodeSwitch.addTab(self.PySpark, \"\")\n self.Outputs.addWidget(self.CodeSwitch)\n self.CheckDataFrame = QtWidgets.QPushButton(self.verticalLayoutWidget)\n self.CheckDataFrame.setEnabled(True)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.CheckDataFrame.sizePolicy().hasHeightForWidth())\n \n #Check DataFrame button\n self.CheckDataFrame.setSizePolicy(sizePolicy)\n self.CheckDataFrame.setIconSize(QtCore.QSize(10, 16))\n self.CheckDataFrame.setObjectName(\"CheckDataFrame\")\n self.Outputs.addWidget(self.CheckDataFrame)\n self.frame = QtWidgets.QFrame(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())\n \n #output text formattings\n self.frame.setSizePolicy(sizePolicy)\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.OutputText = QtWidgets.QPlainTextEdit(self.frame)\n self.OutputText.setGeometry(QtCore.QRect(10, 0, 701, 321))\n font = QtGui.QFont()\n font.setFamily(\"Avenir\")\n font.setPointSize(12)\n self.OutputText.setFont(font)\n self.OutputText.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)\n self.OutputText.setObjectName(\"OutputText\")\n self.Outputs.addWidget(self.frame)\n self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(739, 0, 701, 771))\n self.verticalLayoutWidget_2.setObjectName(\"verticalLayoutWidget_2\")\n \n #Tools/Buttons box and layout\n self.Tools = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)\n self.Tools.setContentsMargins(0, 0, 0, 0)\n self.Tools.setObjectName(\"Tools\")\n self.ToolsLabel = QtWidgets.QLabel(self.verticalLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.ToolsLabel.sizePolicy().hasHeightForWidth())\n self.ToolsLabel.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Avenir\")\n font.setPointSize(20)\n self.ToolsLabel.setFont(font)\n self.ToolsLabel.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)\n self.ToolsLabel.setObjectName(\"ToolsLabel\")\n self.Tools.addWidget(self.ToolsLabel)\n \n #Utilities box\n self.UtilitiesBox = QtWidgets.QGroupBox(self.verticalLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(0)\n #sizePolicy.setVerticalStretch(0)\n #sizePolicy.setHeightForWidth(self.UtilitiesBox.sizePolicy().hasHeightForWidth())\n self.UtilitiesBox.setSizePolicy(sizePolicy)\n self.UtilitiesBox.setObjectName(\"UtilitiesBox\")\n self.gridLayoutWidget_4 = QtWidgets.QWidget(self.UtilitiesBox)\n self.gridLayoutWidget_4.setGeometry(QtCore.QRect(9, 19, 400, 171))\n self.gridLayoutWidget_4.setObjectName(\"gridLayoutWidget_4\")\n self.UtilitiesLayout = QtWidgets.QGridLayout(self.gridLayoutWidget_4)\n self.UtilitiesLayout.setContentsMargins(0, 0, 0, 0)\n self.UtilitiesLayout.setObjectName(\"UtilitiesLayout\")\n \n #makeCSV Button Set up\n self.MakeCSVButton = QtWidgets.QToolButton(self.gridLayoutWidget_4)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n #sizePolicy.setHeightForWidth(self.MakeCSVButton.sizePolicy().hasHeightForWidth())\n self.MakeCSVButton.setSizePolicy(sizePolicy)\n self.MakeCSVButton.setObjectName(\"MakeCSVButton\")\n self.UtilitiesLayout.addWidget(self.MakeCSVButton, 0, 2, 1, 1)\n \n #Restore DF Set up\n self.RestoreDFButton = QtWidgets.QToolButton(self.gridLayoutWidget_4)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.RestoreDFButton.sizePolicy().hasHeightForWidth())\n self.RestoreDFButton.setSizePolicy(sizePolicy)\n self.RestoreDFButton.setObjectName(\"RestoreDFButton\")\n self.UtilitiesLayout.addWidget(self.RestoreDFButton, 0, 1, 1, 1)\n \n #Save DF Button\n self.SaveDFButton = QtWidgets.QToolButton(self.gridLayoutWidget_4)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.SaveDFButton.sizePolicy().hasHeightForWidth())\n self.SaveDFButton.setSizePolicy(sizePolicy)\n self.SaveDFButton.setObjectName(\"SaveDFButton\")\n self.UtilitiesLayout.addWidget(self.SaveDFButton, 0, 0, 1, 1)\n self.Tools.addWidget(self.UtilitiesBox)\n\n \n #clean box set up\n self.CleanBox = QtWidgets.QGroupBox(self.verticalLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.CleanBox.sizePolicy().hasHeightForWidth())\n self.CleanBox.setSizePolicy(sizePolicy)\n self.CleanBox.setObjectName(\"CleanBox\")\n self.gridLayoutWidget = QtWidgets.QWidget(self.CleanBox)\n self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 20, 681, 171))\n self.gridLayoutWidget.setObjectName(\"gridLayoutWidget\")\n self.CleanLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)\n self.CleanLayout.setContentsMargins(0, 0, 0, 0)\n self.CleanLayout.setSpacing(0)\n self.CleanLayout.setObjectName(\"CleanLayout\")\n \n #remove value in column\n self.RemoveValueInColumnButton = QtWidgets.QToolButton(self.gridLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.RemoveValueInColumnButton.sizePolicy().hasHeightForWidth())\n self.RemoveValueInColumnButton.setSizePolicy(sizePolicy)\n self.RemoveValueInColumnButton.setObjectName(\"RemoveValueInColumnButton\")\n self.CleanLayout.addWidget(self.RemoveValueInColumnButton, 1, 0, 1, 1)\n \n #column titles to lower case\n self.ColumnTitlesLowerCaseButton = QtWidgets.QToolButton(self.gridLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.ColumnTitlesLowerCaseButton.sizePolicy().hasHeightForWidth())\n self.ColumnTitlesLowerCaseButton.setSizePolicy(sizePolicy)\n self.ColumnTitlesLowerCaseButton.setObjectName(\"ColumnTitlesLowerCaseButton\")\n self.CleanLayout.addWidget(self.ColumnTitlesLowerCaseButton, 0, 1, 1, 1)\n \n #DropblankColumns\n self.DropBlankColumnsButton = QtWidgets.QToolButton(self.gridLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.DropBlankColumnsButton.sizePolicy().hasHeightForWidth())\n self.DropBlankColumnsButton.setSizePolicy(sizePolicy)\n self.DropBlankColumnsButton.setObjectName(\"DropBlankColumnsButton\")\n self.CleanLayout.addWidget(self.DropBlankColumnsButton, 0, 0, 1, 1)\n \n #Remove White Space In Headers\n self.RemoveWhiteSpaceInHeadersButton = QtWidgets.QToolButton(self.gridLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.RemoveWhiteSpaceInHeadersButton.sizePolicy().hasHeightForWidth())\n self.RemoveWhiteSpaceInHeadersButton.setSizePolicy(sizePolicy)\n self.RemoveWhiteSpaceInHeadersButton.setObjectName(\"RemoveWhiteSpaceInHeadersButton\")\n self.CleanLayout.addWidget(self.RemoveWhiteSpaceInHeadersButton, 0, 2, 1, 1)\n \n #strip whitespace in column\n self.StripWhiteSpaceInColumnsButton = QtWidgets.QToolButton(self.gridLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.StripWhiteSpaceInColumnsButton.sizePolicy().hasHeightForWidth())\n self.StripWhiteSpaceInColumnsButton.setSizePolicy(sizePolicy)\n self.StripWhiteSpaceInColumnsButton.setObjectName(\"StripWhiteSpaceInColumnsButton\")\n self.CleanLayout.addWidget(self.StripWhiteSpaceInColumnsButton, 1, 1, 1, 1)\n self.Tools.addWidget(self.CleanBox)\n \n #Set Up Transform Box\n self.TransformBox = QtWidgets.QGroupBox(self.verticalLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.TransformBox.sizePolicy().hasHeightForWidth())\n self.TransformBox.setSizePolicy(sizePolicy)\n self.TransformBox.setObjectName(\"TransformBox\")\n self.gridLayoutWidget_2 = QtWidgets.QWidget(self.TransformBox)\n self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 20, 681, 171))\n self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\")\n self.TransformLayout = QtWidgets.QGridLayout(self.gridLayoutWidget_2)\n self.TransformLayout.setContentsMargins(0, 0, 0, 0)\n self.TransformLayout.setSpacing(0)\n self.TransformLayout.setObjectName(\"TransformLayout\")\n \n #advanced Mathematics button\n self.AdvancedMathematicsButton = QtWidgets.QToolButton(self.gridLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.AdvancedMathematicsButton.sizePolicy().hasHeightForWidth())\n self.AdvancedMathematicsButton.setSizePolicy(sizePolicy)\n self.AdvancedMathematicsButton.setObjectName(\"AdvancedMathematicsButton\")\n self.TransformLayout.addWidget(self.AdvancedMathematicsButton, 1, 2, 1, 1)\n \n #Melt Data Button\n self.MeltTransposeDataButton = QtWidgets.QToolButton(self.gridLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.MeltTransposeDataButton.sizePolicy().hasHeightForWidth())\n self.MeltTransposeDataButton.setSizePolicy(sizePolicy)\n self.MeltTransposeDataButton.setObjectName(\"MeltTransposeDataButton\")\n self.TransformLayout.addWidget(self.MeltTransposeDataButton, 0, 1, 1, 1)\n \n #Cast Data type button \n self.CastDataTypesButton = QtWidgets.QToolButton(self.gridLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.CastDataTypesButton.sizePolicy().hasHeightForWidth())\n self.CastDataTypesButton.setSizePolicy(sizePolicy)\n self.CastDataTypesButton.setObjectName(\"CastDataTypesButton\")\n self.TransformLayout.addWidget(self.CastDataTypesButton, 0, 0, 1, 1)\n \n #Basic Mathematics button\n self.BasicMathematicsButton = QtWidgets.QToolButton(self.gridLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.BasicMathematicsButton.sizePolicy().hasHeightForWidth())\n self.BasicMathematicsButton.setSizePolicy(sizePolicy)\n self.BasicMathematicsButton.setObjectName(\"BasicMathematicsButton\")\n self.TransformLayout.addWidget(self.BasicMathematicsButton, 0, 2, 1, 1)\n \n #Substitute Values Button\n self.SubstituteValuesButton = QtWidgets.QToolButton(self.gridLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.SubstituteValuesButton.sizePolicy().hasHeightForWidth())\n self.SubstituteValuesButton.setSizePolicy(sizePolicy)\n self.SubstituteValuesButton.setObjectName(\"SubstituteValuesButton\")\n self.TransformLayout.addWidget(self.SubstituteValuesButton, 1, 0, 1, 1)\n self.Tools.addWidget(self.TransformBox)\n \n #analyse Box\n self.AnalyseBox = QtWidgets.QGroupBox(self.verticalLayoutWidget_2)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.AnalyseBox.sizePolicy().hasHeightForWidth())\n self.AnalyseBox.setSizePolicy(sizePolicy)\n self.AnalyseBox.setObjectName(\"AnalyseBox\")\n self.gridLayoutWidget_5 = QtWidgets.QWidget(self.AnalyseBox)\n self.gridLayoutWidget_5.setGeometry(QtCore.QRect(10, 20, 681, 141))\n self.gridLayoutWidget_5.setObjectName(\"gridLayoutWidget_5\")\n self.AnalyseLayout = QtWidgets.QGridLayout(self.gridLayoutWidget_5)\n self.AnalyseLayout.setContentsMargins(0, 0, 0, 0)\n self.AnalyseLayout.setSpacing(0)\n self.AnalyseLayout.setObjectName(\"AnalyseLayout\")\n self.CountRowsButton = QtWidgets.QToolButton(self.gridLayoutWidget_5)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.CountRowsButton.sizePolicy().hasHeightForWidth())\n self.CountRowsButton.setSizePolicy(sizePolicy)\n self.CountRowsButton.setObjectName(\"CountRowsButton\")\n self.AnalyseLayout.addWidget(self.CountRowsButton, 0, 3, 1, 1)\n self.sumColumnButton = QtWidgets.QToolButton(self.gridLayoutWidget_5)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.sumColumnButton.sizePolicy().hasHeightForWidth())\n self.sumColumnButton.setSizePolicy(sizePolicy)\n self.sumColumnButton.setObjectName(\"sumColumnButton\")\n self.AnalyseLayout.addWidget(self.sumColumnButton, 0, 2, 1, 1)\n self.PrintDataTypesButton = QtWidgets.QToolButton(self.gridLayoutWidget_5)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.PrintDataTypesButton.sizePolicy().hasHeightForWidth())\n self.PrintDataTypesButton.setSizePolicy(sizePolicy)\n self.PrintDataTypesButton.setObjectName(\"PrintDataTypesButton\")\n self.AnalyseLayout.addWidget(self.PrintDataTypesButton, 0, 0, 1, 1)\n self.MemoryUsageEstimateButton = QtWidgets.QToolButton(self.gridLayoutWidget_5)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.MemoryUsageEstimateButton.sizePolicy().hasHeightForWidth())\n self.MemoryUsageEstimateButton.setSizePolicy(sizePolicy)\n self.MemoryUsageEstimateButton.setObjectName(\"MemoryUsageEstimateButton\")\n self.AnalyseLayout.addWidget(self.MemoryUsageEstimateButton, 1, 0, 1, 1)\n self.FindNullsButton = QtWidgets.QToolButton(self.gridLayoutWidget_5)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.FindNullsButton.sizePolicy().hasHeightForWidth())\n self.FindNullsButton.setSizePolicy(sizePolicy)\n self.FindNullsButton.setObjectName(\"FindNullsButton\")\n self.AnalyseLayout.addWidget(self.FindNullsButton, 0, 1, 1, 1)\n self.StorageUsageEstimateButton = QtWidgets.QToolButton(self.gridLayoutWidget_5)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.StorageUsageEstimateButton.sizePolicy().hasHeightForWidth())\n self.StorageUsageEstimateButton.setSizePolicy(sizePolicy)\n self.StorageUsageEstimateButton.setObjectName(\"StorageUsageEstimateButton\")\n self.AnalyseLayout.addWidget(self.StorageUsageEstimateButton, 1, 1, 1, 1)\n self.PrintDataTypesButton.raise_()\n self.FindNullsButton.raise_()\n self.sumColumnButton.raise_()\n self.CountRowsButton.raise_()\n self.MemoryUsageEstimateButton.raise_()\n self.StorageUsageEstimateButton.raise_()\n self.Tools.addWidget(self.AnalyseBox)\n DataSpanner.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(DataSpanner)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1440, 22))\n self.menubar.setNativeMenuBar(False)\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n self.menuFile.setObjectName(\"menuFile\")\n self.menuImport_Data = QtWidgets.QMenu(self.menubar)\n self.menuImport_Data.setObjectName(\"menuImport_Data\")\n self.menuExport_Data = QtWidgets.QMenu(self.menubar)\n self.menuExport_Data.setObjectName(\"menuExport_Data\")\n self.menuCloud_Settings = QtWidgets.QMenu(self.menubar)\n self.menuCloud_Settings.setObjectName(\"menuCloud_Settings\")\n DataSpanner.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(DataSpanner)\n self.statusbar.setObjectName(\"statusbar\")\n DataSpanner.setStatusBar(self.statusbar)\n self.CredentialsAWS = QtWidgets.QAction(DataSpanner)\n self.CredentialsAWS.setObjectName(\"CredentialsAWS\")\n self.CredentialsAzure = QtWidgets.QAction(DataSpanner)\n self.CredentialsAzure.setObjectName(\"CredentialsAzure\")\n self.ExportCSV = QtWidgets.QAction(DataSpanner)\n self.ExportCSV.setObjectName(\"ExportCSV\")\n self.ExportPSV = QtWidgets.QAction(DataSpanner)\n self.ExportPSV.setObjectName(\"ExportPSV\")\n self.ExportExcel = QtWidgets.QAction(DataSpanner)\n self.ExportExcel.setObjectName(\"ExportExcel\")\n self.ExportText = QtWidgets.QAction(DataSpanner)\n self.ExportText.setObjectName(\"ExportText\")\n self.ImportCSV = QtWidgets.QAction(DataSpanner)\n self.ImportCSV.setObjectName(\"ImportCSV\")\n self.ImportExcel = QtWidgets.QAction(DataSpanner)\n self.ImportExcel.setObjectName(\"ImportExcel\")\n self.Quit = QtWidgets.QAction(DataSpanner)\n self.Quit.setObjectName(\"Quit\")\n self.menuFile.addAction(self.Quit)\n self.menuImport_Data.addAction(self.ImportCSV)\n self.menuImport_Data.addAction(self.ImportExcel)\n self.menuExport_Data.addAction(self.ExportCSV)\n self.menuExport_Data.addAction(self.ExportPSV)\n self.menuExport_Data.addAction(self.ExportExcel)\n self.menuExport_Data.addAction(self.ExportText)\n self.menuCloud_Settings.addAction(self.CredentialsAWS)\n self.menuCloud_Settings.addAction(self.CredentialsAzure)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuImport_Data.menuAction())\n self.menubar.addAction(self.menuExport_Data.menuAction())\n self.menubar.addAction(self.menuCloud_Settings.menuAction())\n self.w = None\n\n self.retranslateUi(DataSpanner)\n self.CodeSwitch.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(DataSpanner)\n\n#Self Dependant Buttons \n self.CheckDataFrame.clicked.connect(self.checker)\n self.RestoreDFButton.clicked.connect(self.RestoreDF)\n self.SaveDFButton.clicked.connect(self.SaveDF)\n self.ColumnTitlesLowerCaseButton.clicked.connect(self.columnLowerCase)\n self.DropBlankColumnsButton.clicked.connect(self.dropBlankColumns)\n self.RemoveWhiteSpaceInHeadersButton.clicked.connect(self.RemoveSpacesInColumnTitle)\n self.CountRowsButton.clicked.connect(self.RowCount)\n self.PrintDataTypesButton.clicked.connect(self.printDatatypes)\n self.MemoryUsageEstimateButton.clicked.connect(self.memoryUsage)\n self.ImportCSV.triggered.connect(self.openCSV)\n self.ImportExcel.triggered.connect(self.openxls)\n self.StorageUsageEstimateButton.clicked.connect(self.StorageUsage)\n\n#Open Window Button\n \n self.CastDataTypesButton.clicked.connect(self.dataTypeWindowOpen)\n self.StripWhiteSpaceInColumnsButton.clicked.connect(self.whiteSpaceStripWindowOpen)\n self.sumColumnButton.clicked.connect(self.sumColumnWindowOpen)\n self.MeltTransposeDataButton.clicked.connect(self.melterWindowOpen)\n self.FindNullsButton.clicked.connect(self.nullFinderWindowOpen)\n self.SubstituteValuesButton.clicked.connect(self.subValueWindowOpen)\n self.BasicMathematicsButton.clicked.connect(self.basicMathWindowOpen)\n\n #slot connections\n @QtCore.pyqtSlot(str)\n def updateOutput(self, Output):\n self.OutputText.insertPlainText(Output)\n self.OutputText.repaint()\n\n @QtCore.pyqtSlot(pd.DataFrame)\n def updateDF(self, df):\n self.OutputText.insertPlainText(df.to_string())\n self.OutputText.repaint()\n\n @QtCore.pyqtSlot(str)\n def addPythoncode(self, newCode):\n self.PandasCode.insertPlainText(newCode)\n self.PandasCode.repaint()\n\n #Window Definitions\n def subValueWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_Replacer()\n self.ui.setupUi(self.window, df)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignalReplacer.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def sumColumnWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_Summer()\n self.ui.setupUi(self.window, df, self.OutputText)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignalSum.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint() \n\n def melterWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_Melter()\n self.ui.setupUi(self.window, df)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignalMelter.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def dataTypeWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_DataTypeWindow()\n self.ui.setupUi(self.window, df)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignalDataType.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def nullFinderWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_Unnull()\n self.ui.setupUi(self.window, df)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignalNulls.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def whiteSpaceStripWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_RemoveWhiteSpace()\n self.ui.setupUi(self.window, df)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignal.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def basicMathWindowOpen(self):\n try:\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_basicMathWindow()\n self.ui.setupUi(self.window, df)\n self.ui.resultSignal.connect(self.updateOutput)\n self.ui.dfSignal.connect(self.updateDF)\n self.ui.pandasSignal.connect(self.addPythoncode)\n self.window.show()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n\n def retranslateUi(self, DataSpanner):\n _translate = QtCore.QCoreApplication.translate\n DataSpanner.setWindowTitle(_translate(\"DataSpanner\", \"DataSpanner\"))\n self.Outputs_Label.setText(_translate(\"DataSpanner\", \"Outputs\"))\n self.CodeSwitch.setTabText(self.CodeSwitch.indexOf(self.Pandas), _translate(\"DataSpanner\", \"Pandas\"))\n self.CodeSwitch.setTabText(self.CodeSwitch.indexOf(self.PySpark), _translate(\"DataSpanner\", \"PySpark\"))\n self.CheckDataFrame.setText(_translate(\"DataSpanner\", \"Check Data Frame\"))\n self.ToolsLabel.setText(_translate(\"DataSpanner\", \"Tools\"))\n self.UtilitiesBox.setTitle(_translate(\"DataSpanner\", \"Utilities\"))\n self.MakeCSVButton.setText(_translate(\"DataSpanner\", \"Make CSV\"))\n self.RestoreDFButton.setText(_translate(\"DataSpanner\", \"Restore DF\"))\n self.SaveDFButton.setText(_translate(\"DataSpanner\", \"Save DF\"))\n self.CleanBox.setTitle(_translate(\"DataSpanner\", \"Clean\"))\n self.RemoveValueInColumnButton.setText(_translate(\"DataSpanner\", \"Remove Value in Column\"))\n self.ColumnTitlesLowerCaseButton.setText(_translate(\"DataSpanner\", \"Column Titles Lower Case\"))\n self.DropBlankColumnsButton.setText(_translate(\"DataSpanner\", \"Drop Blank Columns\"))\n self.RemoveWhiteSpaceInHeadersButton.setText(_translate(\"DataSpanner\", \"Remove White Space In Headers\"))\n self.StripWhiteSpaceInColumnsButton.setText(_translate(\"DataSpanner\", \"Strip White Space in Columns\"))\n self.TransformBox.setTitle(_translate(\"DataSpanner\", \"Transform\"))\n self.AdvancedMathematicsButton.setText(_translate(\"DataSpanner\", \"Advanced Mathematic Functions\"))\n self.MeltTransposeDataButton.setText(_translate(\"DataSpanner\", \"Melt/Transpose Data\"))\n self.CastDataTypesButton.setText(_translate(\"DataSpanner\", \"Cast Data Types\"))\n self.BasicMathematicsButton.setText(_translate(\"DataSpanner\", \"Basic Mathematics\"))\n self.SubstituteValuesButton.setText(_translate(\"DataSpanner\", \"Substitute Values\"))\n self.AnalyseBox.setTitle(_translate(\"DataSpanner\", \"Analyse\"))\n self.CountRowsButton.setText(_translate(\"DataSpanner\", \"Count Rows\"))\n self.sumColumnButton.setText(_translate(\"DataSpanner\", \"Sum Column\"))\n self.PrintDataTypesButton.setText(_translate(\"DataSpanner\", \"Print Data Types\"))\n self.MemoryUsageEstimateButton.setText(_translate(\"DataSpanner\", \"Memory Usage Estimate\"))\n self.FindNullsButton.setText(_translate(\"DataSpanner\", \"Find Nulls\"))\n self.StorageUsageEstimateButton.setText(_translate(\"DataSpanner\", \"Storage Usage Estimate\"))\n self.menuFile.setTitle(_translate(\"DataSpanner\", \"File\"))\n self.menuImport_Data.setTitle(_translate(\"DataSpanner\", \"Import Data\"))\n self.menuExport_Data.setTitle(_translate(\"DataSpanner\", \"Export Data\"))\n self.menuCloud_Settings.setTitle(_translate(\"DataSpanner\", \"Cloud Settings\"))\n self.CredentialsAWS.setText(_translate(\"DataSpanner\", \"AWS\"))\n self.CredentialsAzure.setText(_translate(\"DataSpanner\", \"Azure\"))\n self.ExportCSV.setText(_translate(\"DataSpanner\", \"CSV\"))\n self.ExportPSV.setText(_translate(\"DataSpanner\", \"PSV\"))\n self.ExportExcel.setText(_translate(\"DataSpanner\", \"Excel\"))\n self.ExportText.setText(_translate(\"DataSpanner\", \"Text\"))\n self.ImportCSV.setText(_translate(\"DataSpanner\", \"CSV\"))\n self.ImportExcel.setText(_translate(\"DataSpanner\", \"Excel\"))\n self.Quit.setText(_translate(\"DataSpanner\", \"Quit\"))\n\n\n\n def checker(self):\n try:\n self.OutputText.insertPlainText(\"\\nData as at: \" + str(datetime.datetime.now()) +'\\n' + df.to_string() + '\\n\\n')\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n \n def dropBlankColumns(self):\n global df\n try:\n df=df\n df=df.loc[:, ~df.columns.str.contains('^Unnamed')]\n self.PandasCode.insertPlainText(\"\\ndf=df.loc[:, ~df.columns.str.contains('^Unnamed')]\")\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n \n \n # you can easily make a xls version of this, remember to alter the inital dir\n def openCSV(self):\n try:\n path = QtWidgets.QFileDialog.getOpenFileName(None, \"Select CSV\", \"*/Escanor\")[0]\n global df\n df=pd.read_csv(filepath_or_buffer=path)\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def openxls(self):\n try:\n path = QtWidgets.QFileDialog.getOpenFileName(None, \"xls\", \"/Desktop\")[0]\n global df\n df=pd.read_excel(io=path, filepath_or_buffer=path)\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n \n # ## TO DO: let them name their results themselves, not you. \n # def makecsv():\n # try:\n # df.to_csv(path_or_buf=str(path)+ \"results.csv\")\n #self.OutputText.repaint()\n # except Exception as e:\n # self.OutputText.insertPlainText('\\n'+str(e)+'\\n') \n #self.OutputText.repaint()\n\n\n def printDatatypes(self):\n global df\n try:\n df=df\n self.OutputText.insertPlainText(str(df.dtypes))\n self.PandasCode.insertPlainText(f\"\"\"\\n(df.dtypes)\"\"\")\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n \n def columnLowerCase(self):\n global df\n try:\n df=df\n df.columns=map(str.lower, df.columns)\n self.PandasCode.insertPlainText(f\"\"\"\\ndf.columns=map(str.lower, df.columns)\"\"\")\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n \n def RemoveSpacesInColumnTitle(self):\n global df\n try:\n df=df\n df.columns=df.columns.str.replace(' ', '_')\n self.PandasCode.insertPlainText(f\"\"\"\\ndf.columns=df.columns.str.replace(' ', '_')\"\"\")\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n\n def SaveDF(self):\n global df\n global dfsaved\n try:\n dfsaved=df\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def RestoreDF(self):\n global df\n global dfsaved\n try:\n df=dfsaved\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def RowCount(self):\n global df\n try:\n self.OutputText.insertPlainText('\\n'+str(len(df)+'\\n'))\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def memoryUsage(self):\n global df\n try:\n self.OutputText.insertPlainText('\\n'+ str(df.memory_usage(index=True).sum())+' mb\\n')\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n def StorageUsage(self):\n global df\n try:\n self.OutputText.insertPlainText('\\n'+ str(float(sys.getsizeof(df)/1000000))+'GB\\n')\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n# def AWSreader():\n# credFNameEntry.delete('1.0', END)\n# credEntry.delete('1.0', END)\n# global credFName\n# credFName=filedialog.askopenfilename(initialdir=r\"\")\n# credFNameEntry.insert(END, credFName)\n# with open(credFName, \"r\") as f:\n# credEntry.insert(END, f.read())\n \n# def AWSwriter():\n# fileName=credFNameEntry.get('1.0', END)\n# with open(fileName.rstrip(), 'w') as cred_obj:\n# cred_obj.write(credEntry.get('1.0', 'end-1c'))\n# cred_obj.close()\n \n# def S3BucketData():\n# global df\n# targetObject=s3_client.get_object(Bucket=bucketEntry.get('1.0', 'end-1c'), Key=keyEntry.get('1.0', 'end-1c'))['Body'].read()\n# if sheetEntry.get('1.0', 'end-1c') == '':\n# df=pd.read_excel(io.BytesIO(targetObject), encoding='utf-8')\n# else:\n# df=pd.read_excel(io.BytesIO(targetObject), encoding='utf-8', sheet_name=sheetEntry.get('1.0', 'end-1c'))\n\n\n## TO DO:\n## row sum\n## row multi\n## row divide\n## filter for value - see and shape\n## rename columns function\n## none of your entry fields have descriptions to prompt\n## join function\n## union function\n\n\n\n\n def dropper(self):\n global df\n global columnsToDrop\n try:\n df=df.drop(columns=columnsToDrop.get().split(', '))\n self.OutputText.repaint()\n except Exception as e:\n self.OutputText.insertPlainText('\\n'+str(e)+'\\n')\n self.OutputText.repaint()\n\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n DataSpanner = QtWidgets.QMainWindow()\n ui = Ui_DataSpanner()\n ui.setupUi(DataSpanner)\n DataSpanner.show()\n sys.exit(app.exec_())\n\n\n\n","repo_name":"OrionAlpha48/EscDatatool","sub_path":"Escanor/DspannerMac2.py","file_name":"DspannerMac2.py","file_ext":"py","file_size_in_byte":41625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"15015354016","text":"from flask import Flask, render_template, request, session\nfrom flask_session import Session\n\napp = Flask(__name__)\n\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = \"filesystem\"\nSession(app)\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n\tif request.method == \"POST\":\n\t\tif session[\"notes\"] is None:\n\t\t\tsession[\"notes\"] = []\n\n\t\tnote = request.form['note']\n\t\tif note:\n\t\t\tsession[\"notes\"].append(note)\n\t\telse:\n\t\t\treturn render_template(\"index.html\", notes=session[\"notes\"], msg1_text=\"Note should not be empty!\")\n\n\treturn render_template(\"index.html\", notes=session[\"notes\"])\n\n@app.route(\"/remove\", methods=[\"GET\", \"POST\"])\ndef remove():\n\tif request.method == \"POST\":\n\t\tindex = int(request.form['index'])\n\t\tif index <= len(session[\"notes\"]) and index>=0:\n\t\t\tdel session[\"notes\"][index-1]\n\t\telse:\n\t\t\treturn render_template(\"index.html\", notes=session[\"notes\"], msg2_text=\"Invalid index!\")\n\n\t\treturn render_template(\"index.html\", notes=session[\"notes\"])\n\telse:\n\t\treturn \"Submit the form instead!\"","repo_name":"KaranSingh07/lecture2","sub_path":"testProject/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"16464556511","text":"# Script to train machine learning model.\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom joblib import dump\nfrom ml.data import process_data\nfrom ml.model import train_model, compute_model_metrics, inference, slice_performance\nimport json\n# Add the necessary imports for the starter code.\n\n# Add code to load in the data.\ndata = pd.read_csv('../data/census_cleaned.csv')\n# Optional enhancement, use K-fold cross validation instead of a train-test split.\ntrain, test = train_test_split(data, test_size=0.20)\n\ncat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n]\nX_train, y_train, encoder, lb = process_data(\n train, categorical_features=cat_features, label=\"salary\", training=True\n)\n# Proces the test data with the process_data function.\nX_test, y_test, _, _ = process_data(test, categorical_features=cat_features, label='salary', training=False,\n\t\t\tencoder=encoder, lb=lb)\n\n# Train and save a model.\nmodel = train_model(X_train, y_train)\n\ndump(model, '../model/classifier.joblib')\ndump(encoder, '../model/encoder.joblib')\ndump(lb, '../model/binariser.joblib')\n\n\n# Predict test split labels\npredicted_labels = inference(model, X_test)\n\n\n# Overall performance\noverall_performance = {}\npr, re, fb = compute_model_metrics(y_test, predicted_labels)\noverall_performance['precision'] = pr\noverall_performance['recall'] = re\noverall_performance['fbeta'] = fb\nwith open('Metrics.json', 'w') as outfile:\n json.dump(overall_performance, outfile)\n\n# Slices performance prediction\ntest['label'] = y_test\ntest['prediction'] = predicted_labels\n\nperformance_by_feature = {}\nfor cat in cat_features:\n performance_by_feature[cat] = slice_performance(test, cat)\n\nwith open('slice_output.txt', 'w') as outfile:\n json.dump(performance_by_feature, outfile)\n","repo_name":"hseelawi/projcet3-udacity-mlops","sub_path":"starter/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"2882364493","text":"import argparse\nimport os\nfrom datetime import datetime\nimport sys\nimport uuid\n\nfrom source.MANTIS import run_mantis, run_mantis_test,print_citation_mantis\nfrom source.MANTIS_NLP import test_nlp\nfrom source.MANTIS_Assembler import add_slash, \\\n get_path_level, \\\n check_installation, \\\n extract_nog_metadata, \\\n setup_databases, \\\n merge_hmm_folder\nfrom source.utils import MANTIS_FOLDER\n\n\nif __name__ == '__main__':\n print('Executing command:\\n', ' '.join(sys.argv))\n parser = argparse.ArgumentParser(description='___ ___ _ _ \\n'\n '| \\\\/ | | | (_) \\n'\n '| . . | __ _ _ __ | |_ _ ___ \\n'\n '| |\\\\/| | / _` || \\'_ \\\\ | __|| |/ __|\\n'\n '| | | || (_| || | | || |_ | |\\\\__ \\\\\\n'\n '\\\\_| |_/ \\\\__,_||_| |_| \\\\__||_||___/, a consensus driven protein function annotation tool\\n'\n , formatter_class=argparse.RawTextHelpFormatter)\n #run mantis\n parser.add_argument('execution_type',\n help='Please choose from :\\n\\trun_mantis\\n\\tsetup_databases\\n\\tmerge_hmm_folder\\n\\textract_nog_metadata\\n\\tcheck_installation\\n\\trun_test\\n\\n' +\n 'If this is your first time running this software, please run to download and unzip the necessary files.\\n'\n 'If you have custom hmms, please include them in the folder.\\n' +\n 'If your custom hmms are split 1 file/1 hmm please use followed by the hmm folder path. These will be automatically pressed\\n' +\n 'Custom hmms need to be pressed, to do so just run HMMER\\'s hmmpress.' +\n 'To check recognized hmms please run \\n\\n' +\n 'If you have a taxonomic classification of this sample, include <-od> followed by the organism name or NCBI taxon ID\\n' +\n 'For multiple protein fastas annotations, use , with a tsv file path.\\n' +\n 'This file should have the following structure:\\n' +\n '\\tQuery name\\tQuery path\\tOrganism details\\n' +\n '\\tquery_name_1\\ttarget_path_1\\t561\\n' +\n '\\tquery_name_2\\ttarget_path_2\\tProteobacteria\\n' +\n '\\tquery_name_3\\ttarget_path_3\\t\\n' +\n '\\tquery_name_4\\ttarget_path_4\\tEscherichia coli\\n',\n choices=['run_mantis', 'setup_databases', 'merge_hmm_folder', 'check_installation', 'run_test',\n 'extract_nog_metadata','test_nlp','citation'])\n parser.add_argument('-t', '--target', help='[required]\\tAnnotation target file path. Required when using .')\n parser.add_argument('-o', '--output_folder', help='[optional]\\tOutput folder path')\n parser.add_argument('-mc', '--mantis_config',\n help='Custom MANTIS.config file. Default is in Mantis\\' folder')\n parser.add_argument('-et', '--evalue_threshold',\n help='[optional]\\tCustom e-value threshold. Default is 1e-3. You can use to take into account sequence length.')\n parser.add_argument('-ov', '--overlap_value',\n help='[optional]\\tcustom value for the allowed overlap between hits! Default is 0.1, maximum is 0.3')\n parser.add_argument('-mco', '--minimum_consensus_overlap',\n help='[optional]\\tcustom value for the minimum overlap between hits when generating the consensus annotation. Default is 0.7, 0 to accept any consistent hit, regardless of coordinate overlap.')\n parser.add_argument('-da', '--domain_algorithm', choices=['dfs', 'heuristic', 'bpo'],\n help='[optional]\\tChoose how multiple domains should be processed. Default is dfs, more information on the algorithms in the wiki page.')\n parser.add_argument('-tl', '--time_limit',\n help='[optional]\\ttime limit in seconds when running Mantis\\' DFS algorithm. Default is 60 seconds')\n parser.add_argument('-od', '--organism_details',\n help='[optional]\\tIf your target fasta has been taxonimically classified please introduce details.\\n'\n '\\t\\tTwo formats are allowed:\\n'\n '\\t\\t\\ttaxon name, e.g. \"Proteobacteria\" or \"Escherichia coli\"\\n'\n '\\t\\t\\tNCBI taxon ID, e.g.: 561 for Escherichia coli\\n'\n 'Providing NCBI IDs is faster and safer.')\n parser.add_argument('-gc', '--genetic_code',\n help='[optional]\\tIf you want Mantis to translate your target fasta, please provide a genetic code. Default is 11. \\n'\n '\\t\\tFor further details please see https://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=cgencodes\\n')\n parser.add_argument('-k', '--keep_files', action='store_true',\n help='[optional]\\tKeep intermediary output files')\n parser.add_argument('-sc', '--skip_consensus', action='store_true',\n help='[optional]\\tSkip consensus generation.')\n parser.add_argument('-nuf', '--no_unifunc', action='store_true',\n help='[optional]\\tdo not use UniFunc for similarity analysis during consensus generation.')\n parser.add_argument('-nce', '--no_consensus_expansion', action='store_true',\n help='[optional]\\tdo not expand hits during consensus generation.')\n parser.add_argument('-km', '--kegg_matrix', action='store_true',\n help='[optional]\\tgenerate KEGG modules completeness matrix.')\n parser.add_argument('-fo', '--force_output', action='store_true',\n help='[optional]\\tIf you would like to force the output to the folder you specified. This may result in errrors!')\n #setup databases\n\n\n parser.add_argument('-f', '--force_download', action='store_true',\n help='[optional]\\tIf you would like to force the download of the databases when running ')\n\n\n #general args\n parser.add_argument('-dw', '--default_workers',\n help='[optional]\\tnumber of virtual workers used by Mantis. This is different from the physical . Default number of workers corresponds to the number of physical cores.')\n parser.add_argument('-cs', '--chunk_size', help='[optional]\\tchunk size when running Mantis')\n parser.add_argument('-ht', '--hmmer_threads', help='[optional]\\tnumber of threads used by HMMER. Default is 1.')\n parser.add_argument('-c', '--cores',\n help='[optional]\\tset the number of physical cores used by Mantis. Mantis uses all available physical cores by default.')\n parser.add_argument('-m', '--memory',\n help='[optional]\\tset the amount of RAM used by Mantis (in GB). Mantis uses all available RAM by default.')\n\n #developers only / testing tools\n parser.add_argument('-bcf', '--best_combo_formula', choices=[str(i) for i in range(1,13)],\n help='[developers_only]\\tChoose which scoring formula to use to determine the best combination.')\n parser.add_argument('-st', '--sorting_type', choices=['evalue', 'bitscore'],\n help='[developers_only]\\tPlease choose the score to sort hits.')\n parser.add_argument('-smm', '--skip_managed_memory', action='store_true',\n help='[developers_only]\\tskip memory management. No HMMER memory management (less stable), but may allow for runs to finish in low memory environments')\n\n\n\n\n args = parser.parse_args()\n # if no input is given , arg is of class None. If it's a store_true or store_false , arg is bool\n # otherwise it's a str\n if args.execution_type == 'run_mantis':\n target_path = args.target\n output_folder = args.output_folder\n mantis_config = args.mantis_config\n evalue_threshold = args.evalue_threshold\n overlap_value = args.overlap_value\n minimum_consensus_overlap = args.minimum_consensus_overlap\n organism_details = args.organism_details\n genetic_code = args.genetic_code\n domain_algorithm = args.domain_algorithm\n best_combo_formula = args.best_combo_formula\n sorting_type = args.sorting_type\n keep_files = args.keep_files\n skip_consensus = args.skip_consensus\n skip_managed_memory = args.skip_managed_memory\n no_consensus_expansion = args.no_consensus_expansion\n no_unifunc = args.no_unifunc\n kegg_matrix = args.kegg_matrix\n force_output = args.force_output\n default_workers = args.default_workers\n chunk_size = args.chunk_size\n time_limit = args.time_limit\n hmmer_threads = args.hmmer_threads\n cores = args.cores\n memory = args.memory\n if target_path:\n if os.path.exists(target_path):\n if not output_folder:\n datetime_str = str(datetime.now().strftime(\"%Y-%m-%dT%H%M%S\"))\n output_folder = add_slash(os.getcwd()) + get_path_level(target_path,remove_extension=True) + '_' + datetime_str\n print(f'No output folder provided! Saving data to: {output_folder}')\n if os.path.exists(output_folder):\n if not force_output and os.listdir(output_folder):\n datetime_str = str(datetime.now().strftime(\"%Y-%m-%dT%H%M%S\"))\n hex_random= '_hex_'+uuid.uuid4().hex[:10]\n output_folder += '_' + datetime_str+hex_random\n print(f'The output folder already contains something! New output folder will be: {output_folder}')\n output_folder = add_slash(output_folder)\n\n run_mantis(target_path=target_path,\n output_folder=output_folder,\n mantis_config=mantis_config,\n evalue_threshold=evalue_threshold,\n overlap_value=overlap_value,\n minimum_consensus_overlap=minimum_consensus_overlap,\n organism_details=organism_details,\n genetic_code=genetic_code,\n domain_algorithm=domain_algorithm,\n best_combo_formula=best_combo_formula,\n sorting_type=sorting_type,\n keep_files=keep_files,\n skip_consensus=skip_consensus,\n skip_managed_memory=skip_managed_memory,\n no_consensus_expansion=no_consensus_expansion,\n no_unifunc=no_unifunc,\n kegg_matrix=kegg_matrix,\n default_workers=default_workers,\n chunk_size=chunk_size,\n time_limit=time_limit,\n hmmer_threads=hmmer_threads,\n cores=cores,\n memory=memory,\n )\n print_citation_mantis()\n\n else:\n print('Target path not found, quitting now!')\n else:\n print(\"Missing target, quitting now!\")\n elif args.execution_type == 'setup_databases':\n mantis_config = args.mantis_config\n force_download = args.force_download\n chunk_size = args.chunk_size\n cores = args.cores\n setup_databases(force_download=force_download, chunk_size=chunk_size, mantis_config=mantis_config,cores=cores)\n print_citation_mantis()\n elif args.execution_type == 'citation':\n print_citation_mantis()\n elif args.execution_type == 'merge_hmm_folder':\n target = args.target\n merge_hmm_folder(target_folder=target)\n print_citation_mantis()\n elif args.execution_type == 'check_installation':\n mantis_config = args.mantis_config\n check_installation(mantis_config=mantis_config)\n elif args.execution_type == 'extract_nog_metadata':\n output_folder = args.output_folder\n if not output_folder:\n output_folder = add_slash(os.getcwd()) + 'metadata_extraction'\n print(f'No output folder provided! Saving data to: {output_folder}')\n output_folder = add_slash(output_folder)\n extract_nog_metadata(metadata_path=output_folder)\n print_citation_mantis()\n\n elif args.execution_type == 'test_nlp':\n test_nlp()\n elif args.execution_type == 'run_test':\n output_folder = args.output_folder\n if not output_folder:\n output_folder = add_slash(os.getcwd()) + 'test_run'\n print(f'No output folder provided! Saving data to: {output_folder}')\n if os.path.exists(output_folder):\n if os.listdir(output_folder):\n datetime_str = str(datetime.now().strftime(\"%Y-%m-%dT%H%M%S\"))\n hex_random = '_hex_' + uuid.uuid4().hex[:10]\n output_folder += '_' + datetime_str+hex_random\n print(f'The output folder already contains something! New output folder will be: {output_folder}')\n output_folder = add_slash(output_folder)\n run_mantis_test(target_path=add_slash(MANTIS_FOLDER + 'tests')+ 'test_sample.faa',\n output_folder=output_folder,\n mantis_config=add_slash(MANTIS_FOLDER + 'tests')+ 'test_MANTIS.config',\n )\n print_citation_mantis()\n\n","repo_name":"Mxrcon/mantis","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":13952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"97"} +{"seq_id":"40535561556","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, request, jsonify, session\nfrom Configuration.inventory.property_inv import add_property, update_property, find_properties_id_name,\\\n find_a_property, delete_property\nfrom Configuration.inventory.constraint_inv import delete_constraint\nfrom Configuration.inventory.con_include_p_inv import find_constraints_id_by_property,\\\n delete_relation_by_constraint\nfrom Configuration.model.property import Property\n\nproperty_manager = Blueprint('property_manager', __name__, template_folder='templates')\n\n@property_manager.route('/property/item', methods=['POST'])\ndef find_property_by_id():\n data = request.get_json()\n property = find_a_property(data['id'])\n return jsonify(property)\n\n@property_manager.route('/property/create', methods=['POST'])\ndef create_property():\n data = request.get_json()\n datatype = data['datatype']\n domin_display = data['domin_display']\n display_list = domin_display.split(',')\n vars_list = range(len(display_list))\n vars_str_list = [str(i) for i in vars_list]\n domin = ','.join(vars_str_list)\n component_id = session['component_id']\n property_id = add_property(component_id, data['name'], data['introduction'], datatype, data['dataunit'], domin, domin_display)\n resp = {'id': property_id}\n return jsonify(resp)\n\n@property_manager.route('/property/update', methods=['POST'])\ndef update_a_property():\n data = request.get_json()\n datatype = data['datatype']\n domin_display = data['domin_display']\n display_list = domin_display.split(',')\n vars_list = range(len(display_list))\n vars_str_list = [str(i) for i in vars_list]\n domin = ','.join(vars_str_list)\n update_property(data['id'], data['name'], data['introduction'], datatype, data['dataunit'], domin, domin_display)\n return 'success'\n\n@property_manager.route('/property/delete', methods=['POST'])\ndef delete_a_property():\n data = request.get_json()\n property_id = data['id']\n constraints_id = find_constraints_id_by_property(property_id)\n for c_id in constraints_id:\n delete_constraint(c_id)\n delete_relation_by_constraint(c_id)\n delete_property(property_id)\n return 'success'","repo_name":"Maximilianxu/Configuration","sub_path":"controller/property_manager.py","file_name":"property_manager.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"32508510016","text":"import time\r\nfrom tkinter import *\r\n\r\nroot= Tk()\r\n\r\nroot.geometry(\"350x150+0+0\")\r\nroot.title(\"Clock\")\r\n\r\nroot.configure(background=\"pink\")\r\n\r\nroot.resizable(0, 0)\r\n\r\n\r\n\r\ndef start():\r\n text= time.strftime(\"%H:%M:%S\")\r\n label.config(text=text)\r\n label.after(100,start)\r\n\r\n\r\nlabel=Label(root,font=(\"DS-digital\",50),bg='pink',fg='blue',bd=50)\r\nlabel.grid(row=0,column=1)\r\nstart()\r\nprint(\"done\")\r\nroot.mainloop()\r\n","repo_name":"ThinuraSandaken/Thinura","sub_path":"#Clock.py","file_name":"#Clock.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"23681509048","text":"from mdf import (\n MDFContext,\n evalnode,\n queuenode,\n nansumnode,\n cumprodnode,\n delaynode,\n ffillnode,\n vargroup,\n datanode,\n run,\n DataFrameBuilder\n)\n\nfrom datetime import datetime\nfrom numpy.testing.utils import assert_almost_equal\nimport pandas as pd\nimport numpy as np\nimport unittest\nimport logging\nimport operator\n\n# this is necessary to stop namespace from looking\n# too far up the stack as it looks for the first frame\n# not in the mdf package\n\n__package__ = None\n\n_logger = logging.getLogger(__name__)\n\nparams = vargroup(C=None, D=None)\n\n@queuenode\ndef queue_output():\n return A() + B()\n\n@queuenode\ndef queue_yield():\n while True:\n yield A() + B()\n\n@evalnode\ndef queue_filter():\n yield False\n while True:\n yield True\n\n@queuenode(filter=queue_filter)\ndef queue_filter_test():\n yield \"THIS SHOULD NOT BE IN THE QUEUE\"\n while True:\n yield 0\n\n@nansumnode()\ndef nansum_output():\n return A() + sometimes_nan_B()\n\n@cumprodnode()\ndef cumprod_output():\n return A() + B()\n\n@evalnode\ndef sometimes_nan_B():\n b = B()\n return np.nan if b % 2 else b\n\n@evalnode\ndef A():\n return params.C() * params.D()\n\n@evalnode\ndef B():\n accum = 0\n\n while True:\n yield accum\n accum += 1\n\n@evalnode\ndef A_plus_B():\n return A() + B()\n\n@evalnode\ndef Counter():\n accum = -2.0\n while True:\n if accum != 0.0:\n yield accum\n accum += 0.5\n\nclass DelayNodeTest(object): \n\n @evalnode\n def initial_value(cls):\n return 0\n\n @delaynode(periods=1, initial_value=initial_value)\n def delayed_node(cls):\n i = 1\n while True:\n yield i\n i += 1\n\n @delaynode(periods=1, initial_value=initial_value, lazy=True)\n def delayed_node_lazy(cls):\n return cls.delay_test()[-1]\n\n @queuenode\n def delay_test(cls):\n return 1 + cls.delayed_node()\n\n @queuenode\n def delay_test_lazy(cls):\n return 1 + cls.delayed_node_lazy()\n\n@queuenode\ndef ffill_queue():\n return ffill_test()\n\n@ffillnode\ndef ffill_test():\n i = 0\n while True:\n yield np.nan if i % 2 else float(i)\n i += 1\n\n@ffillnode\ndef ffill_array_test():\n return ffill_array_test_not_filled()\n\n@evalnode\ndef ffill_array_test_not_filled():\n i = 0\n array = np.ndarray((5,), dtype=float)\n array.fill(10.0)\n yield array\n \n array.fill(np.nan)\n while True:\n yield array\n\nclass NodeTest(unittest.TestCase):\n\n def setUp(self):\n self.daterange = pd.bdate_range(datetime(1970, 1, 1), datetime(1970, 1, 10))\n self.ctx = MDFContext()\n self.ctx[params.C] = 10\n self.ctx[params.D] = 20\n\n def test_queuenode(self):\n self._run(queue_output)\n queue = self.ctx[queue_output]\n self.assertEqual(len(queue), len(self.daterange))\n\n def test_queueyield(self):\n self._run(queue_yield)\n queue = self.ctx[queue_yield]\n self.assertEqual(len(queue), len(self.daterange))\n\n def test_queue_filter(self):\n self._run(queue_filter_test)\n queue = self.ctx[queue_filter_test]\n self.assertEqual(list(queue), [0] * (len(self.daterange) - 1))\n \n @staticmethod\n def diff_dfs(lhs_df, rhs_df, tolerance):\n diffs = np.abs(lhs_df - rhs_df)\n mask = (diffs > tolerance).values\n mask &= ~(np.isnan(lhs_df) and np.isnan(rhs_df)).values\n mask |= np.isnan(lhs_df).values & ~np.isnan(rhs_df).values\n mask |= np.isnan(rhs_df).values & ~np.isnan(lhs_df).values\n return mask.any()\n\n def test_nansumnode(self):\n self._run(nansum_output)\n nansum = self.ctx[nansum_output]\n self.assertEqual(nansum, 812)\n\n def test_cumprodnode(self):\n self._run(cumprod_output)\n cumprod = self.ctx[cumprod_output]\n self.assertEqual(cumprod, 14201189062704000)\n\n def test_delaynode(self):\n self._run(DelayNodeTest.delay_test, DelayNodeTest.delay_test_lazy)\n value = self.ctx[DelayNodeTest.delay_test]\n value_lazy = self.ctx[DelayNodeTest.delay_test_lazy]\n self.assertEqual(list(value), list(range(1, len(self.daterange)+1)))\n self.assertEqual(list(value_lazy), list(range(1, len(self.daterange)+1)))\n\n def test_ffillnode(self):\n self._run(ffill_queue)\n value = self.ctx[ffill_queue]\n self.assertEqual(tuple(value), (0.0, 0.0, 2.0, 2.0, 4.0, 4.0, 6.0))\n\n def test_ffill_array(self):\n self._run(ffill_array_test)\n value = self.ctx[ffill_array_test]\n unfilled_value = self.ctx[ffill_array_test_not_filled]\n self.assertTrue(np.isnan(unfilled_value).all())\n self.assertEquals(value.tolist(), [10., 10., 10., 10., 10.])\n\n def test_datanode_ffill(self):\n data = pd.Series(range(len(self.daterange)), self.daterange, dtype=float)\n data = data[[bool(i % 2) for i in range(len(data.index))]]\n\n expected = data.reindex(self.daterange, method=\"ffill\")\n expected[np.isnan(expected)] = np.inf\n\n node = datanode(\"test_datanode_ffill\", data, ffill=True, missing_value=np.inf)\n qnode = node.queuenode()\n\n self._run(qnode)\n value = self.ctx[qnode]\n\n self.assertEquals(list(value), expected.values.tolist())\n\n def test_lookahead_node(self):\n B_queue = B.queuenode()\n B_lookahead = B.lookaheadnode(periods=len(self.daterange))\n\n self.ctx.set_date(self.daterange[0])\n actual = self.ctx[B_lookahead]\n\n self._run(B_queue)\n expected = self.ctx[B_queue]\n\n self.assertEquals(actual.values.tolist(), list(expected))\n self.assertEquals(actual.index.tolist(), list(self.daterange))\n \n def test_apply_node(self):\n actual_node = A.applynode(func=operator.add, args=(B,)).queuenode()\n expected_node = A_plus_B.queuenode()\n\n self._run(actual_node, expected_node)\n actual = self.ctx[actual_node]\n expected = self.ctx[expected_node]\n\n self.assertEquals(actual, expected)\n\n def test_binary_operators_with_constant(self):\n self._test(Counter, [-2.0, -1.5, -1.0, -0.5, 0.5, 1.0, 1.5])\n self._test(Counter + 0.2, [-1.8, -1.3, -0.8, -0.3, 0.7, 1.2, 1.7])\n self._test(Counter - 0.2, [-2.2, -1.7, -1.2, -0.7, 0.3, 0.8, 1.3])\n self._test(Counter * 2.0, [-4.0, -3.0, -2.0, -1.0, 1.0, 2.0, 3.0])\n self._test(Counter / 0.5, [-4.0, -3.0, -2.0, -1.0, 1.0, 2.0, 3.0])\n\n self._test(0.2 + Counter, [-1.8, -1.3, -0.8, -0.3, 0.7, 1.2, 1.7])\n self._test(1.0 - Counter, [ 3.0, 2.5, 2.0, 1.5, 0.5, 0.0, -0.5])\n self._test(2.0 * Counter, [-4.0, -3.0, -2.0, -1.0, 1.0, 2.0, 3.0])\n self._test(12 / (Counter+.25), [-6.8571428571428568, -9.6000000000000014, -16.0,\n -48.0, 16.0, 9.6000000000000014, 6.8571428571428568])\n\n def test_binary_operators_with_node(self):\n self._test(Counter + Counter, [-4.0, -3.0, -2.0, -1.0, 1.0, 2.0, 3.0])\n self._test(Counter - Counter, [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n self._test(Counter * Counter, [ 4.0, 2.25, 1.0, 0.25, 0.25, 1.0, 2.25])\n self._test(Counter / Counter, [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n \n def _test(self, node, expected_values):\n values = node.queuenode()\n self._run(values)\n actual = self.ctx[values]\n self.assertEquals(list(actual), expected_values)\n \n def _run_for_daterange(self, date_range, *nodes):\n for t in date_range:\n self.ctx.set_date(t)\n for node in nodes:\n self.ctx[node] \n \n def _run(self, *nodes):\n self._run_for_daterange(self.daterange, *nodes)\n\n\n","repo_name":"man-group/mdf","sub_path":"mdf/tests/test_nodes.py","file_name":"test_nodes.py","file_ext":"py","file_size_in_byte":7806,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"97"} +{"seq_id":"24850284501","text":"from django.urls import path\nfrom posts import views\n\napp_name = \"posts\"\n\nurlpatterns = [\n path(\"\", views.home_page, name=\"home_page\"),\n path(\"single-post//\", views.single_post_page, name=\"single_post\"),\n path(\"create-post/\", views.create_post_page, name=\"create_post\"),\n\n # Auth URLs\n path(\"register/\", views.register_user, name=\"register\"),\n path(\"login/\", views.login_user, name=\"login\"),\n path(\"logout/\", views.logout_user, name=\"logout\")\n]\n","repo_name":"aybruhm/DataVault-Assessment","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"34854663507","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 10 19:21:03 2022\n\n@author: gulceonder\n\"\"\"\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader , random_split\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pickle\nfrom matplotlib.lines import Line2D\nimport os\n\ndef part2Plots(results, save_dir='', filename='', show_plot=True):\n color_list = ['#0000ff', '#ff0000', '#d2691e', '#ff00ff', '#00ff00', '#000000', '#373788']\n style_list = ['-', '--']\n\n num_results = len(results)\n\n plot_curve_args = [{'c': color_list[k],\n 'linestyle': style_list[0],\n 'linewidth': 2} for k in range(num_results)]\n\n plot_point_args = [{'c': color_list[k],\n 'marker': 'o',\n 'markersize': 9,\n 'markerfacecolor': color_list[k]} for k in range(num_results)]\n\n\n\n font_size = 18\n\n fig, axes = plt.subplots(2, 2, figsize=(16, 12))\n\n\n # training loss\n ax = axes[0, 0]\n ax.set_title('training_loss', loc='left', fontsize=font_size)\n for result, args in zip(results, plot_curve_args):\n ax.plot(np.arange(1, len(result['loss_curve']) + 1), result['loss_curve'], label=result['name'], **args)\n ax.set_xlabel(xlabel='step', fontsize=font_size)\n ax.set_ylabel(ylabel='loss', fontsize=font_size)\n ax.tick_params(labelsize=12)\n\n # get lines for global legend\n lines = ax.get_lines()\n\n\n # training and validation accuracy\n ax = axes[0, 1]\n ax.set_title('train_and_val_accuracies', loc='right', fontsize=font_size)\n for result, args in zip(results, plot_curve_args):\n ax.plot(np.arange(1, len(result['train_acc_curve']) + 1), result['train_acc_curve'], label=result['name'],\n **args)\n args['linestyle'] = style_list[1]\n ax.plot(np.arange(1, len(result['val_acc_curve']) + 1), result['val_acc_curve'], label=result['name'],\n **args)\n args['linestyle'] = style_list[0]\n ax.set_xlabel(xlabel='step', fontsize=font_size)\n ax.set_ylabel(ylabel='acc.', fontsize=font_size)\n ax.tick_params(labelsize=12)\n\n legend_elements = [Line2D([0], [0], color='k', linestyle=style_list[0], lw=2, label='train.'),\n Line2D([0], [0], color='k', linestyle=style_list[1], lw=2, label='val.')]\n\n ax.legend(fontsize=12, loc='best', handles=legend_elements)\n\n # validation vs training accuracy\n ax = axes[1, 1]\n ax.set_title('validation_vs_training_accuracy', loc='right', fontsize=font_size)\n for result, args in zip(results, plot_curve_args):\n ax.plot(result['train_acc_curve'], result['val_acc_curve'], label=result['name'], **args)\n ax.set_xlabel(xlabel='training', fontsize=font_size)\n ax.set_ylabel(ylabel='validation', fontsize=font_size)\n ax.tick_params(labelsize=12)\n\n\n\n # test vs training accuracy\n ax = axes[1, 0]\n ax.set_title('test_vs_training_accuracy', loc='left', fontsize=font_size)\n for result, args in zip(results, plot_point_args):\n train_acc = result['train_acc_curve'][-1]\n test_acc = result['test_acc']\n ax.plot(train_acc, test_acc, label=result['name'], **args)\n ax.set_xlabel(xlabel='training', fontsize=font_size)\n ax.set_ylabel(ylabel='test', fontsize=font_size)\n ax.tick_params(labelsize=12)\n\n # global legend\n fig.legend(labels=[line._label for line in lines],\n ncol=3, loc=\"upper center\", fontsize=font_size,\n handles=lines)\n\n if show_plot:\n plt.show()\n\n fig.savefig(os.path.join(save_dir, filename + '.png'))\n \n# Download training data from open datasets.6000 samples\ntraining_data = datasets.FashionMNIST(\n root=\"data\", #path where data is stored\n train=True, #train or testa\n download=True, #download if not in root\n transform=ToTensor(),#normalize pixel values betwenn [0,1]\n \n)\n\n# Download test data from open datasets.10000 samples\ntest_data = datasets.FashionMNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=ToTensor(),\n)\n\nunused, valid = random_split(training_data,[50000,10000])\nbatch_size = 64\n\n# Create data loaders\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size)\ntest_dataloader = DataLoader(test_data, batch_size=batch_size)\nvalid_dataloader = DataLoader(valid, batch_size=10000)\nval_size = len(valid_dataloader.dataset)\ntest_size = len(test_dataloader.dataset)\ntrain_size = len(train_dataloader.dataset)\n\n\n\n# Get cpu or gpu device for training.\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(f\"Using {device} device\")\n\n# example mlp classifier\nclass FullyConnected(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(FullyConnected, self).__init__()\n self.input_size = input_size\n self.fc1 = torch.nn.Linear(input_size, hidden_size)\n self.fc2 = torch.nn.Linear(hidden_size, num_classes)\n self.relu = torch.nn.ReLU()\n def forward(self, x):\n\n x = x.view(-1, self.input_size)\n\n hidden = self.fc1(x)\n relu = self.relu(hidden)\n output = self.fc2(relu)\n return output\n# initialize your model\nmodel = FullyConnected(784,64,10).to(device)\n\n\nprint(model)\n\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr = 0.001)\n\nloss_data=np.zeros((10,15, 94))#store training loss matrices \ntrain_acc_data= np.zeros((10,15,94)) #store training accuracy matrices\nvalid_acc_data= np.zeros((10,15,94)) #store validation accuracy matrices\ntest_acc_data= np.zeros((10)) #store test accuracy values\nweights_data =np.zeros((10,64,784))#weights data for mlp_1\n\n#reset parameterds after eaach run code from: https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819\ndef weight_reset(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n \n \n \ndef train( model, loss_fn, optimizer,epoch,run):\n\n\n model.train()\n\n\n for batch, (X, y) in enumerate(train_dataloader):\n correct= 0\n correct_val=0\n X, y = X.to(device), y.to(device)\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n # correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n\n if batch % 10 == 0:\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n loss, current = loss.item(), batch \n train_acc =100*(correct/batch_size) #training accuracy for the given batch\n #store the loss and accuracy in numpy arrays\n curr_batch=int((current/10))\n\n loss_data[run][epoch][curr_batch]=loss\n train_acc_data[run][epoch][curr_batch]=train_acc\n\n #calculate validation accuracy\n\n\n model.eval()\n with torch.no_grad():\n for X, y in valid_dataloader:\n X, y = X.to(device), y.to(device)\n pred2 = model(X)\n correct_val += (pred2.argmax(1) == y).type(torch.float).sum().item()\n\n val_acc = 100*(correct_val/val_size) #training accuracy up to the current batch \n valid_acc_data[run][epoch][curr_batch]=val_acc\n print(f\"loss: {loss:>7f} training accuracy: {train_acc:>0.1f}% , validation accuracy= {val_acc:>0.1f}% [{current:>5d}/930]\")\n \ndef test( model, loss_fn,run):\n\n # num_batches = len(test_dataloader)\n \n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in test_dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n # test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n # test_loss /= num_batches\n correct /= test_size\n # get the parameters 784x128 layer as numpy array\n weights_data[run]= model.fc1.weight.data.numpy()\n test_acc_data[run]= 100*correct\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}% \\n\")\n\n \nruns=10#number of times to run program\nfor run in range(runs): \n epochs = 15 #train and test data for 15 epochs \n for t in range(epochs):\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train( model, loss_fn, optimizer,t,run)\n test( model, loss_fn,run)#test 1 one time for each run\n model.apply(weight_reset)\n print(\"Done!\")\n \n\n\n# print (train_acc_data)\ntrain_acc_data=train_acc_data.reshape(10,1410);#linearize curves\n# print(train_acc_data.shape)\ntrain_acc_data=train_acc_data.mean(0) #take average of curves for 10 runs \n# print(train_acc_data.shape)\n# print(train_acc_data)\n\nvalid_acc_data=valid_acc_data.reshape(10,1410);#linearize curves\nvalid_acc_data=valid_acc_data.mean(0) #take average of curves for 10 runs \n\n\n#average \nloss_data=loss_data.reshape(10,1410);#linearize curves\nloss_data=loss_data.mean(0) #take average of curves for 10 runs \n\nmax_acc = np.max(test_acc_data) #best test accuracy from all runs\nmax_acc_index=np.argmax(test_acc_data)\n\n\nmax_acc_weights=weights_data[max_acc_index] #weights of the first layer of best test accuracy\n\n# Creating a Dictionary\nresult1 = {'name': 'mlp_1', 'loss_curve': loss_data, 'train_acc_curve': train_acc_data, 'val_acc_curve': valid_acc_data, 'test_acc': max_acc, 'weights': max_acc_weights}\nresult2 = {'name': 'mlp_1', 'loss_curve': loss_data, 'train_acc_curve': train_acc_data, 'val_acc_curve': valid_acc_data, 'test_acc': max_acc, 'weights': max_acc_weights}\nresults = [result1,result2]\n#save it using pickle\nfile_to_write = open(\"output.pkl\", \"wb\")\npickle.dump(results, file_to_write)\nprint(result1['train_acc_curve'])\nprint(result1['val_acc_curve'])\nprint(result1['test_acc'])\npart2Plots(results, save_dir= r'/Users/gulceonder/Desktop', filename='part2Plots',show_plot=True)\ntorch.save(model.state_dict(), \"model.pth\")\nprint(\"Saved PyTorch Model State to model.pth\")\n\n","repo_name":"gulceonder/Machine-Learning-Projects","sub_path":"Neural Networks/mlp_1.py","file_name":"mlp_1.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"16447520748","text":"\"\"\"Initial migration\n\nRevision ID: 073096486411\nRevises: \nCreate Date: 2023-10-31 22:37:45.869796\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '073096486411'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('genres',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('movies',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('image', sa.String(), nullable=True),\n sa.Column('release_date', sa.Date(), nullable=True),\n sa.Column('description', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(), nullable=True),\n sa.Column('email', sa.String(), nullable=True),\n sa.Column('password', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('username')\n )\n op.create_table('movie_genre_association',\n sa.Column('movie_id', sa.Integer(), nullable=False),\n sa.Column('genre_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['genre_id'], ['genres.id'], ),\n sa.ForeignKeyConstraint(['movie_id'], ['movies.id'], ),\n sa.PrimaryKeyConstraint('movie_id', 'genre_id')\n )\n op.create_table('ratings',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('movie_id', sa.Integer(), nullable=False),\n sa.Column('rating', sa.Integer(), nullable=False),\n sa.Column('review', sa.Text(), nullable=True),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.ForeignKeyConstraint(['movie_id'], ['movies.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('ratings')\n op.drop_table('movie_genre_association')\n op.drop_table('users')\n op.drop_table('movies')\n op.drop_table('genres')\n # ### end Alembic commands ###\n","repo_name":"ndwiga6769/Movie-review-app","sub_path":"server/migrations/versions/073096486411_initial_migration.py","file_name":"073096486411_initial_migration.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"21325994456","text":"from api import db\nfrom models import User, Deck\n\n# NOT REQUIRED TO RUN ANYMORE, USE ONLY AS TEMPLATE IF NEED TO CHANGE DB\n\n\ndef fix_users():\n users = User.query.all()\n for u in users:\n if u.favorites is None:\n u.favorites = []\n print(u.username)\n\n\ndef fix_decks():\n decks = Deck.query.all()\n for d in decks:\n if d.favorited is None:\n d.favorited = []\n print(d.deckid)\n\n\nif __name__ == \"__main__\":\n fix_users()\n fix_decks()\n db.session.commit()\n","repo_name":"smeea/vdb","sub_path":"backend/update_db.py","file_name":"update_db.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"97"} +{"seq_id":"28223509490","text":"import os\nfrom aqwebengine import AQWebEngineView, async_slot\nfrom aqwebengine.run import run\nfrom aqwebengine.qtx import QPushButton, QVBoxLayout, QWidget\n\n\nclass Main(QWidget):\n def __init__(self):\n super().__init__()\n self._l = QVBoxLayout()\n self.setLayout(self._l)\n\n self._w = AQWebEngineView(self)\n self._l.addWidget(self._w)\n\n self._b = QPushButton('hitme')\n self._l.addWidget(self._b)\n\n self._b.clicked.connect(async_slot(self.loadme))\n\n async def init(self):\n await self._w.init()\n\n async def loadme(self, *_):\n url = as_file_url('./example/sample-app/public/index.html')\n print(url)\n await self._w.load_async(url)\n\nasync def main():\n w = Main()\n await w.init()\n\n w.resize(800, 600)\n w.show()\n\ndef as_file_url(fname):\n p = os.path.abspath(fname).replace('\\\\', '/')\n if not p.startswith('/'):\n p = '/' + p\n return 'file://' + p\n\n\n\n\nrun(main(), app_arguments=['--single-process', '--disable-web-security', '--remote-debugging-port=9999', '--enable-logging'])\n","repo_name":"innodatalabs/aqwebengine","sub_path":"example/sample_app.py","file_name":"sample_app.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"13206597940","text":"# -*- coding: utf-8 -*-\n\n\n__all__ = ['k_shortest_paths']\n\nfrom heapq import heappush, heappop\nfrom itertools import count\n\nimport networkx as nx\n\ndef k_shortest_paths(G, source, target, k, weight='weight'):\n if source == target:\n return ([0], [[source]]) \n \n length, path = nx.single_source_dijkstra(G, source, target, weight=weight)\n # length, path = nx.bidirectional_dijkstra(G, source, target, weight=weight)\n # print('p1: ', path)\n\n if target not in length:\n raise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n \n lengths = [length[target]]\n paths = [path[target]]\n c = count() \n B = [] \n G_original = G.copy() \n \n for i in range(1, k):\n # print('i: ',i)\n for j in range(len(paths[-1]) - 1): \n spur_node = paths[-1][j]\n root_path = paths[-1][:j + 1]\n \n edges_removed = []\n for c_path in paths:\n if len(c_path) > j and root_path == c_path[:j + 1]:\n u = c_path[j]\n v = c_path[j + 1]\n if G.has_edge(u, v):\n edge_attr = G.edge[u][v]\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n \n for n in range(len(root_path) - 1):\n node = root_path[n]\n # out-edges\n for u, v, edge_attr in G.edges_iter(node, data=True):\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n if G.is_directed():\n # in-edges\n for u, v, edge_attr in G.in_edges_iter(node, data=True):\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n # spur_path_length, spur_path = nx.bidirectional_dijkstra(G, spur_node, target, weight=weight)\n spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)\n # print('p:',i, spur_path)\n if target in spur_path and spur_path[target]:\n total_path = root_path[:-1] + spur_path[target]\n total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target] \n heappush(B, (total_path_length, next(c), total_path))\n \n for e in edges_removed:\n # print ('Rem: ', e)\n u, v, edge_attr = e\n G.add_edge(u, v, edge_attr)\n \n if B:\n (l, _, p) = heappop(B) \n lengths.append(l)\n paths.append(p)\n else:\n break\n \n return (lengths, paths)\n\ndef get_path_length(G, path, weight='weight'):\n length = 0\n if len(path) > 1:\n for i in range(len(path) - 1):\n u = path[i]\n v = path[i + 1]\n \n length += G.edge[u][v].get(weight, 1)\n \n return length \n","repo_name":"JoaoP-Silva/reroutingStudies","sub_path":"k_shortest_paths.py","file_name":"k_shortest_paths.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"97"} +{"seq_id":"40251804219","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nmethods = ['direct_shared', 'unroll_cublass']\nlabels = []\ntimes = []\nisNormalized = False\nAreMetrics = True\n\nmetrics = ['sm_efficiency', 'achieved_occupancy', 'warp_execution_efficiency', 'inst_per_warp', 'gld_efficiency', 'gst_efficiency', 'shared_efficiency', 'shared_utilization',\n 'l2_utilization', 'global_hit_rate', 'tex_cache_hit_rate', 'tex_utilization', 'ipc', 'inst_issued', 'inst_executed', 'issue_slot_utilization', 'dram_utilization']\n\n\ndef isfloat(num):\n try:\n float(num)\n return True\n except ValueError:\n return False\n\n\nif not AreMetrics:\n for method in methods:\n height = []\n count = 0\n sum = 0\n with open(method + '_sum.csv', 'r') as csv_file:\n data = csv_file.readlines() # read data\n for record in data[1:]: # for every record\n vals = record.split(',') # split records into vals\n if vals[0] not in labels: # if label not added to list then add\n labels.append(vals[0])\n count += 1\n sum += float(vals[-2])\n height.append(float(vals[-2])) # append time\n height.append(sum / count)\n times.append(height)\n\n w = 1/(len(methods)+1) # width of the bar\n\n labels.append('Average')\n\n bar = np.arange(len(labels)) # places of the labels\n\n if not isNormalized:\n\n for i in range(0, len(times)):\n # print(str(len(bar)) + \" \" + str(len(times[i])))\n # plots abar on x axis\n # print(times[i])\n plt.bar(bar + w * i, times[i], w, label=methods[i])\n plt.ylabel(\"Time(ms)\") # ylabel\n\n else:\n base = times[0].copy()\n\n for i in range(0, len(times)):\n for j in range(0, len(times[i])):\n # print(times[i][j])\n times[i][j] = base[j] / times[i][j]\n # print()\n plt.bar(bar + w * i, times[i], w, label=methods[i])\n\n plt.ylabel(\"Speedup\") # ylabel\n #plt.setp(plt.gca(), ylim=(0, 7))\n\n plt.xlabel(\"Configurations (C, HW, K)\") # xlabel\n plt.title(\"Execution time of different configurations\") # title\n plt.xticks(bar + w * len(times)/2, labels, rotation='vertical') # xtrics\n plt.legend()\n plt.show()\n\n# else:\n# for metric in metrics:\n# height = []\n# count = 0\n# sum = 0\n# with open('metrics/'+metric+'_sum.csv', 'r') as input:\n# data = input.readlines()\n# for record in data[1:]:\n# vals = record.split(',')\n# if vals[0] not in labels: # if label not added to list then add\n# labels.append(vals[0])\n# for i in range(1, len(vals)):\n# if not isfloat(vals[i]):\n# vals[i] = 0\n# height.append(float(vals[i]))\n# times.append(height)\n\n# w = 1/(len(methods)+1) # width of the bar\n# bar = np.arange(len(labels))\n# for i in range(0, len(times)):\n# print(str(len(bar)) + \" \" + str(len(times[i])))\n# # plots abar on x axis\n# # print(times)\n# plt.bar(bar + w * i, times[i], w, label=methods[i])\n# plt.ylabel(\"Time(ms)\") # ylabel\n","repo_name":"PikaBeka/mbNet-tester","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"72277561918","text":"T = int(input()) #총 테스트 케이스 수\n\nfor t in range(T):\n st = input()[::-1] #거울에 비춰질 문자열\n answer = \"\"\n\n for s in st:\n if s == 'b':\n answer += 'd'\n elif s == 'd':\n answer += 'b'\n elif s == 'p':\n answer += 'q'\n elif s == 'q':\n answer += 'p'\n\n print(\"#\" + str(t+1), answer)\n","repo_name":"ShinhyeongPark/Algorithm","sub_path":"SWEA/10804.py","file_name":"10804.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"36360611619","text":"from channels.routing import route, include\nfrom gglobal.cms.consumers import phone_numbers_connect, phone_numbers_recive, phone_numbers_disconnect\nfrom gglobal.crm.consumers import ws_add, ws_disconnect,\\\n\t\t\t\t\t\t\t\tchat_connect, chat_message, chat_disconnect\n\n\nonlinestatus_channel_routing = [\n route(\"websocket.connect\", ws_add),\n route(\"websocket.disconnect\", ws_disconnect),\n]\n\nphonenumbers_channel_routing = [\n route(\"websocket.connect\", phone_numbers_connect),\n route(\"websocket.receive\", phone_numbers_recive),\n route(\"websocket.disconnect\", phone_numbers_disconnect)\n]\n\nchat_routing = [\n\troute(\"websocket.connect\", chat_connect),\n\troute(\"websocket.receive\", chat_message),\n\troute(\"websocket.disconnect\", chat_disconnect),\n]\n\nchannel_routing = [\n include(phonenumbers_channel_routing, path=r'^/phonenumbers/'),\n include(onlinestatus_channel_routing, path=r'^/onlinestatus/'),\n include(chat_routing, path=r'^/chat/'),\n\n]\n\n\n\n\n","repo_name":"vladimirmyshkovski/gglobal","sub_path":"config/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"6493431297","text":"#!/usr/bin/python\n#-*- coding:utf-8 -*-\nimport os\nimport CommonMethods\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport GeometryMethod\n\nconn = CommonMethods.DatabaseConn()\ncursor = conn.cursor()\n\ncount = 0\nstrSql = 'select gid,st_astext(geom) from wstation order by gid'\nrows = CommonMethods.GetDataFromTable(cursor,strSql)\nfor row in rows:\n print(count)\n count += 1\n gid = row[0]\n geom = row[1]\n coor = geom.replace('(','').replace(')','').replace('POINT','').split(' ')\n strSql = 'update wstation set x = %f,y=%f where gid = %d;'%(float(coor[0]),float(coor[1]),gid)\n cursor.execute(strSql)\n conn.commit()","repo_name":"yangjnMapBox/GISPlatform","sub_path":"tools/pointAddCoor.py","file_name":"pointAddCoor.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"40942781796","text":"from collections import defaultdict\n\ndef vertical_traversal(root):\n\tm = defaultdict(list)\n\tm[0].append(root.val)\n\n\thd = defaultdict()\n\thd[root] = 0\n\n\tq = []\n\tq.append(root)\n\twhile q:\n\t\tele = q.pop(0)\n\n\t\tif ele.left:\n\t\t\thd[ele.left] = hd[ele]-1\n\t\t\tm[hd[ele.left]].append(ele.left.val)\n\t\t\tqueue.append(ele.left)\n\n\t\tif ele.right:\n\t\t\thd[ele.right] = hd[ele]+1\n\t\t\tm[hd[ele.right]].append(ele.right.val)\n\t\t\tqueue.append(ele.right)\n\n\tm = sorted(m.items(), key = lambda x:x[0])\n\tres = []\n\tfor i in m:\n\t\tres.append(i[1])\n\treturn res","repo_name":"ratedRA/python","sub_path":"DS/vertical2.py","file_name":"vertical2.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"33877340724","text":"import os\nimport os.path as osp\nimport numpy as np\n\nimport glob\nimport tqdm\nimport shutil\nimport pytz\nimport datetime\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom models.isicnet import ISICNet\nfrom datasets.isicdataset import ISICDataset\n\n## Define evaluation function\ndef _fast_hist(label_true, label_pred, n_class):\n hist = np.bincount(\n n_class * label_true.astype(int) +\n label_pred.astype(int), minlength=n_class ** 2).reshape(n_class, n_class)\n return hist\n\ndef label_accuracy_score(label_trues, label_preds, n_class=8):\n hist = np.zeros((n_class, n_class))\n hist += _fast_hist(label_trues, label_preds, n_class)\n acc = np.diag(hist).sum() / hist.sum()\n with np.errstate(divide='ignore', invalid='ignore'):\n precision = np.diag(hist) / hist.sum(axis=1)\n mean_precision = np.nanmean(precision)\n with np.errstate(divide='ignore', invalid='ignore'):\n recall = np.diag(hist) / hist.sum(axis=0)\n mean_recall = np.nanmean(recall)\n with np.errstate(divide='ignore', invalid='ignore'):\n iou = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iou = np.nanmean(iou)\n with np.errstate(divide='ignore', invalid='ignore'):\n f1 = (2 * np.diag(hist))/ (hist.sum(axis=1) + hist.sum(axis=0) + 2 * np.diag(hist))\n mean_f1 = np.nanmean(f1)\n return acc, mean_precision, mean_recall, mean_iou, mean_f1\n\n## Define directory of output\nhere = osp.dirname(osp.abspath(__file__))\nout_dir = osp.join(here, 'output')\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\nruns = sorted(glob.glob(os.path.join(out_dir, 'run_*')))\nrun_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0\nexperiment_dir = os.path.join(out_dir, 'run_{}'.format(str(run_id)))\nif not os.path.exists(experiment_dir):\n os.makedirs(experiment_dir)\n\n## Define (input) variables from Docker Container environment variables\nfhir_server = str(os.environ['FHIR_SERVER'])\nfhir_port = str(os.environ['FHIR_PORT'])\n# num_station = int(os.environ['NUM_STATION'])\n# sid = int(os.environ['SID'])\nbatch_size = int(os.environ['BATCH_SIZE'])\nnum_epoch = int(os.environ['NUM_EPOCH'])\nlr = float(os.environ['LR'])\nweight_decay = float(os.environ['WEIGTH_DECAY'])\nmodel_name = str(os.environ['MODEL_NAME'])\n\n## Define (output) file formats\nif not osp.exists(osp.join(experiment_dir, 'val_log.csv')):\n with open(osp.join(experiment_dir, 'val_log.csv'), 'w') as f:\n header = ['epoch', 'Loss', 'Acc', 'Precision', 'Recall', 'Iou', 'F1Score', 'train/Loss', 'elapsed_time']\n header = map(str, header)\n f.write(','.join(header) + '\\n')\n print(\"Initial Log file\")\n\ncuda = torch.cuda.is_available()\ntorch.manual_seed(1337)\nif cuda:\n torch.cuda.manual_seed(1337)\n\n## Initial Model\nprint(\"Initial Model\")\nmodel = ISICNet(backbone=model_name)\nprint(\"Initial Model {}\".format(model_name))\nif cuda:\n print(\"Cuda:\", cuda)\n model = model.cuda()\n\n## Initial Datasets of train and val on station 1, 2, 3 and test\nkwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}\nprint(\"Initial Training Dataset\")\ntrain_dataloader = torch.utils.data.DataLoader(ISICDataset(fhir_server, fhir_port, split='train'), batch_size=batch_size, shuffle=True, **kwargs)\nprint(\"Initial Val Dataset\")\nval_dataloader = torch.utils.data.DataLoader(ISICDataset(fhir_server, fhir_port, split='val'), batch_size=batch_size, shuffle=False, **kwargs)\n## Initial criterion (Cross Entropy Loss)\nprint(\"Initial Loss function\")\ncriterion = nn.CrossEntropyLoss()\n## Initial Optimizers for station\nprint(\"Initial Optimizer\")\noptim = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\n## Load model from previous train\nif run_id > 0:\n prev_experiment_dir = osp.join(out_dir, 'run_{}'.format(str(run_id - 1)))\n if osp.exists(osp.join(prev_experiment_dir, 'best_model.pth.tar')):\n prev_best_model = torch.load(osp.join(prev_experiment_dir, 'best_model.pth.tar'))\n model.load_state_dict(prev_best_model['model_state_dict'])\n optim.load_state_dict(prev_best_model['optim_state_dict'])\n shutil.copy(osp.join(prev_experiment_dir, 'best_model.pth.tar'),\n osp.join(experiment_dir, 'best_model.pth.tar'))\n print(\"Model loaded from previous train.\")\n else:\n print(\"No previous best model found!\")\nelse:\n torch.save({\n 'epoch': 0,\n 'optim_state_dict': optim.state_dict(),\n 'model_state_dict': model.state_dict(),\n 'best_acc': 0.0,\n }, osp.join(experiment_dir, 'best_model.pth.tar'))\n\ntimestamp_start = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))\nbest_acc = 0.0\n## Run the training processing on the station\nfor epoch in range(num_epoch):\n model.train()\n train_loss = 0.0\n for batch_idx, sample in tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader), desc='Station Train epoch=%d' % epoch, ncols=80, leave=False):\n assert model.training\n img, lbl = sample['image'], sample['label']\n if cuda:\n img, lbl = img.cuda(), lbl.cuda()\n img, lbl = Variable(img), Variable(lbl)\n optim.zero_grad()\n pred = model(img)\n loss = criterion(pred, lbl)\n train_loss = train_loss + loss.data.item()\n loss.backward()\n optim.step()\n\n train_loss = train_loss / len(train_dataloader)\n print(\"Train epoch {} finished with average train loss of {}.\".format(epoch, train_loss))\n\n model.eval()\n val_loss = 0.0\n label_trues, label_preds = [], []\n for batch_idx, sample in tqdm.tqdm(enumerate(val_dataloader), total=len(val_dataloader), desc='Station Val epoch=%d' % epoch, ncols=80, leave=False):\n img, lbl = sample['image'], sample['label']\n if cuda:\n img, lbl = img.cuda(), lbl.cuda()\n img, lbl = Variable(img), Variable(lbl)\n with torch.no_grad():\n pred = model(img)\n loss = criterion(pred, lbl)\n val_loss = val_loss + loss.data.item()\n lbl = lbl.data.cpu().numpy()\n pred = pred.data.max(1)[1].cpu().numpy()\n label_trues = np.concatenate((label_trues, lbl), axis=0)\n label_preds = np.concatenate((label_preds, pred), axis=0)\n val_loss = val_loss / len(val_dataloader)\n acc, mean_precision, mean_recall, mean_iou, mean_f1 = label_accuracy_score(label_trues, label_preds)\n with open(osp.join(experiment_dir, 'val_log.csv'), 'a') as f:\n elapsed_time = (datetime.datetime.now(pytz.timezone('Asia/Tokyo')) - timestamp_start).total_seconds()\n log = [epoch, val_loss, acc, mean_precision, mean_recall, mean_iou, mean_f1, train_loss, elapsed_time]\n log = map(str, log)\n f.write(','.join(log) + '\\n')\n\n is_best = acc > best_acc\n if is_best:\n best_acc = acc\n torch.save({\n 'epoch': epoch,\n 'optim_state_dict': optim.state_dict(),\n 'model_state_dict': model.state_dict(),\n 'best_acc': best_acc,\n }, osp.join(experiment_dir, 'checkpoint.pth.tar'))\n if is_best:\n shutil.copy(osp.join(experiment_dir, 'checkpoint.pth.tar'), osp.join(experiment_dir, 'best_model.pth.tar'))\n print(\"Station Val epoch {} finished with loss of {}, acc of {}, precision of {}, recall of {}, iou of {}, f1-score of {}.\".format(epoch, val_loss, acc, mean_precision, mean_recall, mean_iou, mean_f1))\nprint(\"Finished training process\")\n\n\n\n\n\n\n\n\n\n","repo_name":"medizininformatik-initiative/usecase-cord-support","sub_path":"PHT_Beispiele/Usecases/ISIC2019/Code/image_classification/isic2019/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"97"} +{"seq_id":"74623240957","text":"from fonts import meteo as font\nimport color\n\nclass TeamIndicator:\n def __init__(self, display, color, name, x, y, width, height):\n self._tft = display\n self.name = name\n self.color = color\n self._x = x\n self._y = y\n self._width = width\n self._height = height\n self.value = 0\n\n def render(self):\n x = self._x\n self._tft.fill_rect(x, self._y, 30, 30, self.color)\n self._tft.draw(font, self.name, self._x + 40, int(self._y + (font.HEIGHT / 2)), color.WHITE)\n\n text_len = self._tft.draw_len(font, str(self.value))\n self._tft.draw(font, str(self.value), self._x + self._width - text_len - 10, int(self._y + (font.HEIGHT / 2)), color.WHITE)\n","repo_name":"area3001/mpy_blaster","sub_path":"team_indicator.py","file_name":"team_indicator.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"42932082474","text":"import interleaving as il\nimport numpy as np\nimport pytest\n\nclass TestMethods(object):\n\n @pytest.yield_fixture(autouse=True)\n def fix_seed(self):\n np.random.seed(0)\n\n def assert_almost_equal(self, a, b, error_rate=0.01):\n assert abs(a-b) < error_rate\n\n def interleave(self, method, lists, k, ideals, num=100):\n results = []\n for i in range(num):\n res = method(lists, max_length=k).interleave()\n results.append(tuple(res))\n results = set(results)\n possible_results = set([tuple(i) for i in ideals])\n assert results == possible_results\n\n def evaluate(self, method, ranking, clicks, result):\n res = method.evaluate(ranking, clicks)\n assert set(res) == set(result)\n\n","repo_name":"mpkato/interleaving","sub_path":"tests/test_methods.py","file_name":"test_methods.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"97"} +{"seq_id":"17828069091","text":"import functools\nfrom Monomial import Monomial, like_terms # for testing\nfrom MonomialOrder import MonomialOrder # also for testing\nfrom copy import copy\n\n\nclass Polynomial:\n\n\n def __init__(self, arg):\n \"\"\"\n Initialization always combines like terms\n \"\"\"\n # hacky way of doing constructor overloading like in Monomial class\n if isinstance(arg, str):\n self.string_init(arg)\n else:\n self.list_init(arg)\n\n\n def list_init(self, monomials):\n \"\"\"\n Initialize from a list of Monomials\n :param monomials: List[Monomial]\n \"\"\"\n self.terms = combine_terms(monomials)\n \n def string_init(self, str_representation):\n \"\"\"\n Initialize a polynomial from a string input\n :param str_representation: String with no spaces\n \"\"\"\n monomials = []\n # so that we can split on \"+\"\n str_representation = str_representation.replace(\"-\",\"+-\")\n terms = [term for term in str_representation.split(\"+\") if term != \"\"]\n for term in terms:\n monomial = Monomial(term)\n monomials.append(monomial)\n self.terms = combine_terms(monomials)\n \n\n def get_terms(self):\n \"\"\"\n :return: copy of the list of monomial terms\n \"\"\"\n return copy(self.terms)\n\n def reorder(self, monomial_order):\n \"\"\"\n Sort the terms based on the monomial order\n :param monomial_order: MonomialOrder\n \"\"\"\n self.terms = sorted(self.terms, key=functools.cmp_to_key(monomial_order.compare), reverse=True)\n\n\n def leading_term(self, monomial_order):\n \"\"\"\n :return: returns the leading term, a Monomial\n \"\"\"\n self.reorder(monomial_order)\n return self.get_terms()[0]\n\n\n def ordered_str(self, monomial_order):\n \"\"\"\n Orders the monomial terms, and the variables within the monomial terms\n :param monomial_order: MonomialOrder\n :return: String\n \"\"\"\n self.reorder(monomial_order)\n ret = \" + \".join([term.ordered_str(monomial_order.get_variable_order()) for term in self.terms])\n ret = ret.replace(\"+ -\", \"- \")\n return ret\n \n def __repr__(self):\n \"\"\"\n Useful default string for debugging. Not robust given other variable choices\n :return: String\n \"\"\"\n # prints using lex order, xyz\n # TODO: handle variables other than xyz\n default_order = MonomialOrder('xyz','lex')\n return self.ordered_str(default_order)\n\n def __eq__(self, other):\n \"\"\"\n Order does not matter when determining equality\n :param other: Polynomial\n :return: boolean\n \"\"\"\n self_terms = set(self.terms)\n other_terms = set(other.terms)\n return self_terms == other_terms\n \n def __add__(self, other):\n \"\"\"\n Add two Polynomials\n :param other: Polynomial\n :return: the sum\n \"\"\"\n return Polynomial(self.get_terms() + other.get_terms())\n \n def __sub__(self, other):\n \"\"\"\n Subtract two Polynomials\n :param other: Polynomial\n :return: the difference\n \"\"\"\n negative_other = other * Polynomial('-1')\n return self + negative_other\n \n def __mul__(self, other):\n \"\"\"\n Multiple two Polynomials\n :param other: Polynomial\n :return: the product\n \"\"\"\n prod_terms = []\n for term1 in self.get_terms():\n for term2 in other.get_terms():\n prod_term = term1 * term2\n prod_terms.append(prod_term)\n return Polynomial(prod_terms)\n\ndef combine_terms(terms):\n \"\"\"\n Combines like terms\n :param terms: List[Monomial]\n :return: a new list of Monomials, with terms combined\n \"\"\"\n new_terms = []\n combined = []\n\n for i, m1 in enumerate(terms):\n if m1 not in combined:\n term_group = [m1]\n for m2 in terms[i+1:]:\n if m2 not in combined:\n if like_terms(m1, m2):\n term_group.append(m2)\n combined.append(m2)\n new_coefficient = sum([m.coefficient for m in term_group])\n new_vars = m1.get_vars()\n new_monomial = Monomial(new_vars, new_coefficient)\n new_terms.append(new_monomial)\n\n if new_terms != [Monomial('0')]:\n new_terms = [term for term in new_terms if term != Monomial('0')]\n\n return new_terms\n\n\nif __name__ == \"__main__\":\n # string = \"5*x^5*y^4*z\"\n # string2 = \"y^2*z^9\"\n # string3 = \"x^3*z^8\"\n # string4 = \"2\"\n # string5 = \"0\"\n\n # m = Monomial(string)\n # m2 = Monomial(string2)\n # m3 = Monomial(string3)\n # m4 = Monomial(string4)\n # m5 = Monomial(string5)\n # p = Polynomial([m, m2, m3, m4, m5])\n\n # lt = p.leading_term(order)\n # print(lt.ordered_str(\"xyz\"))\n\n # m1 = Monomial(\"5.3*x^2*y^2\")\n # m2 = Monomial(\"-3*x^2*y^2\")\n # m3 = Monomial({'x':1,'y':1},1)\n # p = Polynomial([m1,m2,m3])\n \n # order = MonomialOrder(\"xyz\", \"lex\")\n # print(p.ordered_str(order))\n\n # p1 = Polynomial([Monomial('2*x')])\n # p2 = Polynomial([Monomial('3*y^2')])\n # [print(m) for m in poly_multiply(p1,p2).get_terms()]\n # [print(m) for m in poly_multiply(p2,p1).get_terms()]\n # [print(m) for m in poly_add(p1,p2).get_terms()]\n\n p = Polynomial('x^2*y+x*y^2+x-1.0*x+y^2+0.0+0.0+0.0+0.0+0.0')\n print(p.ordered_str(MonomialOrder('zyx','lex')))","repo_name":"nrholley/poly-long-division","sub_path":"Polynomial.py","file_name":"Polynomial.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"74096717437","text":"import os\nimport hashlib # standard hash library\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import ttk\nimport threading\nimport subprocess\nimport matplotlib.pyplot as plt\nimport graphviz\n\n\n# branch instructions\nbranch_inst = [\"beq\", \"bne\", \"blt\", \"bltu\", \"bge\", \"bgeu\", \"beqz\", \"bnez\", \"bltz\", \"blez\", \"bgtz\", \"bgez\", \"bgt\", \"bgtu\", \"ble\", \"bleu\"]\n# jump instruction\nunconditional_jump_inst = [\"jal\", \"j\"]\nindirect_jump_inst = [\"jr\", \"jalr\"]\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n#Change the current working directory to the directory of the script\nos.chdir(current_dir)\n\ndef main(objdump_file, Hash_algorithm, Hash_value_length, program_name):\n output_directory = os.path.join(os.path.dirname(os.getcwd()), 'output_files')\n # Check if the output directory exists, if not, create it\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n \n function_line_ranges, function_addr_ranges, function_instr = get_func_information(objdump_file)\n \n ### find the function to visit \n function_call_instr = {}\n # extra_func_name = extract_function_before_xx(objdump_file, '<__to_main>')\n to_visit_functions, visited_functions_id, visited_functions, function_call_instr \\\n = find_to_visit_function(objdump_file, function_instr,function_addr_ranges,\\\n '<__start>',function_call_instr, visited_functions = None,visited_functions_id=None)\n \n all_instr, all_control_transfer_instr_addr, sorted_functions_with_jump_instr_addr = \\\n get_all_control_transfer_instr(objdump_file, function_addr_ranges,visited_functions)\n\n ret_instr_addr, function_have_ret_instr = find_ret_instruction(visited_functions, function_addr_ranges, function_instr)\n \n function_call_relationship = get_function_call_relationship(function_call_instr, function_addr_ranges, output_directory, program_name)\n \n return_target = get_return_relationship(function_call_relationship, ret_instr_addr, function_call_instr, \\\n all_instr, function_addr_ranges)\n \n used_function_instr = extract_used_function_instr(function_instr, visited_functions)\n \n address, machine_code, mnemonic, operands = get_func_machine_code(used_function_instr)\n \n end_addr_list, branch_or_jump_target_addr, \\\n branch_taken_start_addr, all_taken_target_addr, order_start_addr_list= \\\n get_the_addr_information_for_basic_block(address, mnemonic, operands, function_addr_ranges)\n \n \n basic_block = create_basic_blocks_in_order(order_start_addr_list, end_addr_list, used_function_instr, function_addr_ranges,\\\n ret_instr_addr,return_target)\n \n basic_block = create_basic_blocks_start_with_taken_target(all_taken_target_addr, basic_block, order_start_addr_list, used_function_instr)\n \n sorted_basic_blocks = sort_basic_blocks(basic_block)\n \n\n export_results(function_addr_ranges, program_name + '_function_addr.txt',\n all_instr, sorted_functions_with_jump_instr_addr, program_name + '_forward_transfers.txt', \\\n program_name + '_control_transfer.bin',\\\n program_name + '_basic_block.txt', sorted_basic_blocks,\n program_name + '_bin_basic_block_inf.txt', program_name + '_hex_basic_block_inf.txt',\n Hash_algorithm, Hash_value_length, output_directory, program_name)\n \n generate_CFG(basic_block, program_name, output_directory)\n \n # generate_main_CFG(basic_block, program_name, output_directory, function_addr_ranges)\n\n\n\ndef get_func_machine_code(input_data):\n '''\n Extracts machine code information from the input data.\n\n Args:\n input_data (list): Used function instructions.\n\n Returns:\n tuple: A tuple containing lists of addresses, machine codes, mnemonics, and operands.\n\n '''\n # Initialize lists to store the machine code information\n address = []\n machine_code = []\n mnemonic = []\n operands = []\n\n # Iterate over the input data\n for line in input_data:\n if len(line) == 1:\n break\n\n # Split the line into tokens\n tokens = line.split()\n\n # Extract the address, machine code, mnemonic, and operands\n if len(tokens) == 3: \n address.append(tokens[0][:-1]) \n machine_code.append(tokens[1])\n mnemonic.append(tokens[2])\n operands.append('')\n else:\n address.append(tokens[0][:-1]) \n machine_code.append(tokens[1])\n mnemonic.append(tokens[2])\n operands.append(tokens[3])\n\n # Return the extracted machine code information as a tuple of lists\n return address, machine_code, mnemonic, operands\n\ndef get_func_information(objdump_file):\n '''\n Extract function information from the objdump file.\n\n Args:\n objdump_file (str): Path to the objdump file.\n\n Returns:\n tuple: A tuple containing dictionaries of function line ranges, function address ranges,\n and function instructions.\n\n '''\n # Read the objdump file\n with open(objdump_file, 'r') as f:\n lines = f.readlines()\n # Initialize variables\n function_line_ranges = {}\n function_addr_ranges = {}\n function_instr = {}\n start_line = 0\n in_function = False\n function_name = \"\"\n # Process each line in the file\n for i, line in enumerate(lines):\n # Check if the line indicates the start of a function\n if line.endswith(\">:\\n\"): \n in_function = True\n function_name = line.split()[-1][:-1] # Extract the function name\n start_line = i + 1\n func_instr = [] # Initialize a list to store the function instructions\n # Check if the line indicates the end of a function\n elif line.startswith(\"\\n\") or \"...\" in line: \n if in_function:\n in_function = False\n end_line = i - 1\n # Get the start and end addresses based on line numbers\n start_address = lines[start_line].split()[0][:-1]\n end_address = lines[end_line].split()[0][:-1]\n # Store the function information in the dictionaries\n function_line_ranges[function_name] = (start_line, end_line)\n function_addr_ranges[function_name] = (start_address, end_address)\n function_instr[function_name] = func_instr\n # Check if the line is an instruction within a function\n else:\n if in_function:\n instr = line.strip()\n func_instr.append(instr)\n \n return function_line_ranges, function_addr_ranges, function_instr\n\ndef write_functions_information(function_addr_ranges, output_file, output_directory):\n func_info_path = os.path.join(output_directory, output_file)\n with open(func_info_path, 'w') as f:\n for func_name, func_range in function_addr_ranges.items():\n f.write(func_name + ':' + '\\n' + '\\tstart_addr:' + ' ' + str(func_range[0]) \\\n +'\\n' + '\\tend_addr:' + ' ' + str(func_range[1]) + '\\n')\n\n \ndef find_to_visit_function(objdump_file, function_instr, function_addr_ranges, func_name, function_call_instr,\\\n visited_functions = None, visited_functions_id=None ):\n '''\n Find functions to visit based on the function's instructions and addresses.\n\n Args:\n objdump_file (str): Path to the objdump file.\n function_instr (dict): Dictionary containing the instructions of each function.\n function_addr_ranges (dict): Dictionary containing the address ranges of each function.\n func_name (str): Name of the current function.\n function_call_instr (dict): Dictionary to store the call instructions for each function.\n visited_functions (set, optional): Set of visited functions. Defaults to None.\n visited_functions_id (bool, optional): Identifier for whether visited_functions has been initialized. Defaults to None.\n\n Returns:\n tuple: A tuple containing the set of functions to visit, the visited_functions_id flag,\n the visited functions, and the function_call_instr dictionary.\n\n '''\n \n # Initialize visited_functions set if not already initialized\n if visited_functions_id is None:\n visited_functions = set()\n visited_functions_id = True\n # Read objdump file\n with open(objdump_file, 'r') as file:\n lines = file.readlines()\n\n func_addr_range = function_addr_ranges[func_name] \n call_instrs = []\n \n # Search for called functions in the function range\n to_visit_functions = set()\n for line in function_instr[func_name]:\n if len(function_instr[func_name]) == 1:\n if line.split()[2] == 'jal' or line.split()[2] == 'j' :\n operand = line.split()[3]\n if ',' in operand:\n jump_target = operand.split(',')[1]\n if int(jump_target,16) > int(func_addr_range[1],16) or int(jump_target,16) < int(func_addr_range[0],16):\n call_instrs.append(line)\n elif ',' not in operand:\n jump_target = operand\n if int(jump_target,16) > int(func_addr_range[1],16) or int(jump_target,16) < int(func_addr_range[0],16):\n call_instrs.append(line)\n \n for to_visit_func_name, func_addr_range in function_addr_ranges.items():\n if int(jump_target,16) >= int(func_addr_range[0],16) and int(jump_target,16) <= int(func_addr_range[1],16):\n to_visit_functions.add(to_visit_func_name)\n called_func_name = func_name\n break\n \n # branch instr \n elif any(all(instr_char in line.split()[2] for instr_char in instr) for instr in branch_inst):\n operand = line.split()[3]\n jump_target = operand.split(',')[-1]\n \n if line == function_instr[func_name][-1]:# Check if the last instruction is a branch instruction\n for b_next_func_name in function_addr_ranges.keys():\n current_func_addr_range = function_addr_ranges[func_name]\n next_func_addr_range = function_addr_ranges[b_next_func_name]\n if b_next_func_name != func_name and int(next_func_addr_range[0],16) > int(current_func_addr_range[1],16):\n to_visit_functions.add(b_next_func_name)\n called_func_name = func_name\n break\n for to_visit_func_name, func_addr_range in function_addr_ranges.items():\n if int(jump_target,16) >= int(func_addr_range[0],16) and int(jump_target,16) <= int(func_addr_range[1],16):\n to_visit_functions.add(to_visit_func_name)\n called_func_name = func_name\n break\n\n else:\n if line.split()[2] == 'jal' or line.split()[2] == 'j' :\n operand = line.split()[3]\n if ',' in operand:\n jump_target = operand.split(',')[-1]\n if int(jump_target,16) > int(func_addr_range[1],16) or int(jump_target,16) < int(func_addr_range[0],16):\n call_instrs.append(line)\n elif ',' not in operand:\n jump_target = operand\n if int(jump_target,16) > int(func_addr_range[1],16) or int(jump_target,16) < int(func_addr_range[0],16):\n call_instrs.append(line)\n\n for to_visit_func_name, func_addr_range in function_addr_ranges.items():\n if int(jump_target,16) >= int(func_addr_range[0],16) and int(jump_target,16) <= int(func_addr_range[1],16):\n to_visit_functions.add(to_visit_func_name)\n called_func_name = func_name\n break\n # branch instr\n elif any(all(instr_char in line.split()[2] for instr_char in instr) for instr in branch_inst):\n operand = line.split()[3]\n jump_target = operand.split(',')[-1]\n \n if line == function_instr[func_name][-1]:# # Check if the last instruction is a branch instruction\n for b_next_func_name in function_addr_ranges.keys():\n current_func_addr_range = function_addr_ranges[func_name]\n next_func_addr_range = function_addr_ranges[b_next_func_name]\n if b_next_func_name != func_name and int(next_func_addr_range[0],16) > int(current_func_addr_range[1],16):\n to_visit_functions.add(b_next_func_name)\n called_func_name = func_name\n break\n for to_visit_func_name, func_addr_range in function_addr_ranges.items():\n if int(jump_target,16) >= int(func_addr_range[0],16) and int(jump_target,16) <= int(func_addr_range[1],16):\n to_visit_functions.add(to_visit_func_name)\n called_func_name = func_name\n break\n\n function_call_instr[func_name] = call_instrs\n # If no called functions found, add the next sequential function as to visit\n if not to_visit_functions:\n found = False\n for next_func_name in function_addr_ranges.keys():\n if found:\n to_visit_functions.add(next_func_name)\n called_func_name = func_name\n break\n if next_func_name == func_name:\n found = True\n \n # Recursively search for called functions in the called functions\n for been_called_func_name in to_visit_functions:\n if been_called_func_name not in visited_functions:\n visited_functions.add(called_func_name)\n visited_functions.add(been_called_func_name)\n find_to_visit_function(objdump_file, function_instr, function_addr_ranges, been_called_func_name, \\\n function_call_instr,visited_functions, visited_functions_id)\n \n\n visited_functions = sorted(visited_functions, key=lambda func_name: int(function_addr_ranges[func_name][0], 16))\n\n \n return to_visit_functions, visited_functions_id, visited_functions, function_call_instr \n\n\ndef find_ret_instruction(visited_functions, function_addr_ranges, function_instr):\n '''\n Find return instructions within visited functions.\n\n Args:\n visited_functions (list): List of visited function names.\n function_addr_ranges (dict): Dictionary mapping function names to their address ranges.\n function_instr (dict): Dictionary mapping function names to their instructions.\n\n Returns:\n tuple: A tuple containing a dictionary of return instruction addresses and a list of functions\n that have return instructions.\n\n '''\n \n # Initialize variables\n ret_instr_addr = {}\n function_have_ret_instr = []\n\n # Process each visited function\n for func_name in visited_functions:\n start_addr, end_addr = function_addr_ranges[func_name]\n instrs = function_instr[func_name]\n\n # Search for return instructions within the function's address range\n for line in instrs:\n tokens = line.split()\n instr_addr = tokens[0][:-1]\n mnemonic = tokens[2]\n\n # Check if the instruction is a return instruction\n if int(instr_addr, 16) >= int(start_addr, 16) and int(instr_addr, 16) <= int(end_addr, 16) \\\n and mnemonic == 'ret':\n # Store the return instruction address in the dictionary\n if func_name in ret_instr_addr:\n ret_instr_addr[func_name].append(instr_addr)\n else:\n ret_instr_addr[func_name] = [instr_addr]\n # Add the function to the list of functions with return instructions\n function_have_ret_instr.append(func_name)\n\n # Return the dictionary of return instruction addresses and the list of functions with return instructions\n return ret_instr_addr, function_have_ret_instr\n\ndef get_function_call_relationship(function_call_instr, function_addr_ranges, output_directory, program_name):\n '''\n Get the function call relationship between functions.\n\n Args:\n function_call_instr (dict): Dictionary mapping function names to their function call instructions.\n function_addr_ranges (dict): Dictionary mapping function names to their address ranges.\n\n Returns:\n dict: Dictionary representing the function call relationships.\n\n '''\n \n # Initialize dictionary to store the function call relationship\n function_call_relationship = {}\n \n # Iterate over the function call instructions\n for caller_func_name, call_instrs in function_call_instr.items():\n for call_instr in call_instrs:\n tokens = call_instr.split()\n instr_addr = tokens[0][:-1]\n mnemonic = tokens[2]\n operand = tokens[3]\n if ',' in operand:\n jump_target = operand.split(',')[-1]\n else:\n jump_target = operand\n \n # Iterate over the function address ranges\n for callee_func_name, address in function_addr_ranges.items():\n func_start_addr = address[0]\n func_end_addr = address[1]\n \n # Check if the jump target matches the start address of the callee function\n if mnemonic == 'jal':\n if jump_target in func_start_addr:\n if caller_func_name in function_call_relationship:\n function_call_relationship[caller_func_name].append(callee_func_name)\n else:\n function_call_relationship[caller_func_name] = [callee_func_name]\n break\n elif mnemonic == 'j':\n if jump_target in func_start_addr:\n if caller_func_name in function_call_relationship:\n function_call_relationship[caller_func_name].append(callee_func_name)\n else:\n function_call_relationship[caller_func_name] = [callee_func_name + ' *']\n break \n\n # Remove duplicates from the function call relationship\n for caller_func_name in function_call_relationship:\n function_call_relationship[caller_func_name] = list(set(function_call_relationship[caller_func_name]))\n # Sort the function call relationship based on the start address of the caller functions\n function_call_relationship = {k: v for k, v in sorted(function_call_relationship.items(), key=lambda item: int(function_addr_ranges[item[0]][0], 16))}\n \n G = graphviz.Digraph(format='svg')\n\n # Add nodes and edges to the graph\n for caller_func_name, callee_func_names in function_call_relationship.items():\n G.node(caller_func_name)\n for callee_func_name in callee_func_names:\n G.node(callee_func_name)\n G.edge(caller_func_name, callee_func_name)\n\n # Set the output file path\n output_file = os.path.join(output_directory, f'{program_name}_function_call_relationship')\n\n # Render the graph and save it to a file\n G.render(filename=output_file, cleanup=True, view=False)\n \n return function_call_relationship\n\ndef get_return_relationship(function_call_relationship, ret_instr_addr, function_call_instr, all_instr,function_addr_ranges):\n '''\n Get the return relationship between functions and the corresponding return targets.\n\n Args:\n function_call_relationship (dict): Dictionary representing the function call relationships.\n ret_instr_addr (dict): Dictionary mapping function names to their return instruction addresses.\n function_call_instr (dict): Dictionary mapping function names to their function call instructions.\n all_instr (list): List of all instructions.\n function_addr_ranges (dict): Dictionary mapping function names to their address ranges.\n\n Returns:\n dict: Dictionary representing the return targets for each function.\n\n '''\n \n # Initialize dictionaries to store the return relationship and return targets\n return_relationship = {}\n return_target = {}\n \n # Iterate over the function call relationship\n for caller_func_name, callee_func_names in function_call_relationship.items():\n for func_name in callee_func_names:\n try:\n callee_func_name = func_name.split()[0]\n # The function where the ret instruction is located is the jump target function of the j instruction\n if len(func_name.split()) != 1 and func_name.split()[1] == '*':\n last_func = [key for key, names in function_call_relationship.items() if caller_func_name.strip('<>').strip() in [name.strip('<>').strip() for name in names]]\n last_func_str = ' '.join(last_func)\n if callee_func_name in ret_instr_addr.keys():\n if callee_func_name in return_relationship:\n return_relationship[callee_func_name].append(last_func_str + ' ' + caller_func_name)\n else:\n return_relationship[callee_func_name] = [last_func_str + ' ' + caller_func_name]\n \n else:\n if callee_func_name in ret_instr_addr.keys():\n if callee_func_name in return_relationship:\n return_relationship[callee_func_name].append(caller_func_name)\n else:\n return_relationship[callee_func_name] = [caller_func_name]\n\n except KeyError:\n print(f\"KeyError: {callee_func_name} not found in ret_instr_addr\")\n \n # Iterate over the return relationship to find return targets\n for ret_key, ret_funcs in return_relationship.items():\n for func in ret_funcs:\n func_n = func.split()[0]\n if func in function_call_instr.keys() and ' ' not in func:\n for jal_instr in function_call_instr[func_n]:\n tokens = jal_instr.split()\n instr_addr = tokens[0][:-1]\n instr_target_func = tokens[-1]\n if instr_target_func == ret_key:\n for i in range(len(all_instr)):\n if int(all_instr[i].split()[0][:-1],16) == int(instr_addr,16):\n for func_name, addr in function_addr_ranges.items():\n if int(all_instr[i+1].split()[0][:-1],16) >= int(addr[0],16) and \\\n int(all_instr[i+1].split()[0][:-1],16) <= int(addr[1],16):\n if ret_key in return_target:\n return_target[ret_key].append(func_name + ' '+all_instr[i+1].split()[0][:-1])\n else:\n return_target[ret_key] = [func_name + ' '+all_instr[i+1].split()[0][:-1]]\n break\n else:\n continue\n else:\n continue\n elif func_n in function_call_instr.keys() and ' ' in func:\n func_n2 = func.split()[-1]\n for jal_instr in function_call_instr[func_n]:\n tokens = jal_instr.split()\n instr_addr = tokens[0][:-1]\n instr_target_func = tokens[-1]\n if instr_target_func == func_n2:\n for i in range(len(all_instr)):\n if int(all_instr[i].split()[0][:-1],16) == int(instr_addr,16):\n for func_name, addr in function_addr_ranges.items():\n if int(all_instr[i+1].split()[0][:-1],16) >= int(addr[0],16) and \\\n int(all_instr[i+1].split()[0][:-1],16) <= int(addr[1],16):\n if ret_key in return_target:\n return_target[ret_key].append(func_name + ' '+all_instr[i+1].split()[0][:-1])\n else:\n return_target[ret_key] = [func_name + ' '+all_instr[i+1].split()[0][:-1]]\n break\n else:\n continue\n else:\n continue\n \n return return_target\n\n\ndef get_all_control_transfer_instr(objdump_file, function_addr_ranges,visited_functions):\n '''\n Get all control transfer instructions within the given objdump file.\n\n Args:\n objdump_file (str): Path to the objdump file.\n function_addr_ranges (dict): Dictionary mapping function names to their address ranges.\n visited_functions (list): List of visited function names.\n\n Returns:\n tuple: A tuple containing the list of all instructions, a list of all control transfer instruction addresses,\n and a dictionary mapping visited function names to their control transfer instruction addresses.\n\n '''\n # Initialize lists and dictionaries\n all_instr = []\n address = []\n machine_code = []\n mnemonic = []\n operands = []\n all_control_transfer_instr_addr = [] \n \n # Read the objdump file and extract instructions\n with open(objdump_file,'r') as file:\n for line in file:\n if line.startswith(\" \"):\n all_instr.append(line.strip())\n \n # Process each instruction and extract relevant information \n for i in range(len(all_instr)):\n tokens = all_instr[i].split()\n if len(tokens) == 3: \n address.append(tokens[0][:-1]) \n machine_code.append(tokens[1])\n mnemonic.append(tokens[2])\n operands.append('')\n else:\n address.append(tokens[0][:-1]) \n machine_code.append(tokens[1])\n mnemonic.append(tokens[2])\n operands.append(tokens[3])\n \n # Find all control transfer instruction addresses\n for i in range(len(mnemonic)):\n if mnemonic[i] in branch_inst:\n operand = operands[i].split(',')\n all_control_transfer_instr_addr.append(address[i] + ',' + operand[-1])\n \n elif mnemonic[i] == 'jal':\n operand = operands[i].split(',')\n all_control_transfer_instr_addr.append(address[i] + ',' + operand[-1]) \n \n elif mnemonic[i] == 'j':\n if ',' in operands[i]:\n operand = operands[i].split(',')\n all_control_transfer_instr_addr.append(address[i] + ',' + operand[-1])\n elif ',' not in operands[i]:\n operand = operands[i].split() \n all_control_transfer_instr_addr.append(address[i] + ',' + operand[-1]) \n \n # Create a dictionary to store control transfer instruction addresses for each visited function\n functions_with_jump_instr_addr = {func_name: [] for func_name in visited_functions}\n \n # Assign control transfer instruction addresses to their corresponding functions\n for i in range(len(all_control_transfer_instr_addr)):\n for func_name in visited_functions:\n func_addr_range = function_addr_ranges[func_name]\n if int(all_control_transfer_instr_addr[i].split(',')[0],16) >= int(func_addr_range[0],16) and \\\n int(all_control_transfer_instr_addr[i].split(',')[0],16) <= int(func_addr_range[1],16):\n functions_with_jump_instr_addr[func_name].append(all_control_transfer_instr_addr[i])\n break\n \n # Sort the dictionary based on the starting address of each function\n sorted_functions_with_jump_instr_addr = {k: v for k, v in sorted(functions_with_jump_instr_addr.items(), \\\n key=lambda item: int(function_addr_ranges[item[0]][0], 16))}\n \n return all_instr, all_control_transfer_instr_addr, sorted_functions_with_jump_instr_addr\n\ndef write_in_may_used_control_transfer_instr(all_instr, functions_with_jump_instr_addr, output_file1, output_file2 ,\\\n output_directory, program_name):\n ct_path = os.path.join(output_directory, output_file1)\n bin_path = os.path.join(output_directory, output_file2)\n \n trans_count = 0\n \n with open (ct_path,'w',encoding='utf-8') as file1, open(bin_path,'wb') as file2:\n for func_name in functions_with_jump_instr_addr:\n file1.write('\\n' + func_name + ':\\n'+'\\n')\n \n for line in functions_with_jump_instr_addr[func_name]:\n addr, taken_target = line.split(',')\n target_line_num = None\n \n int_addr = int(addr, 16)\n int_target = int(taken_target, 16)\n bin_addr = bin(int_addr)[2:].zfill(16)\n bin_target = bin(int_target)[2:].zfill(16)\n addr_bytes = bin_addr.encode('utf-8')\n target_bytes = bin_target.encode('utf-8')\n file2.write(addr_bytes + target_bytes + b'\\n')\n \n trans_count += 1\n\n for line_num , instr in enumerate(all_instr):\n if instr.startswith(taken_target):\n target_line_num = line_num\n break\n \n if target_line_num is not None:\n jump_instr_line_num = None\n for line_num , instr in enumerate(all_instr):\n if instr.startswith(addr):\n jump_instr_line_num = line_num\n break \n if jump_instr_line_num is not None:\n file1.write('j/b_instr: '+all_instr[jump_instr_line_num] + '\\n')\n file1.write('t_instr: '+all_instr[target_line_num] + '\\n')\n file1.write('\\n')\n \n with open(ct_path,'r+',encoding='utf-8') as file1, open(bin_path, 'r+', encoding='utf-8') as file2:\n content1 = file1.read()\n file1.seek(0,0)\n file1.write(\"trans_num: \" + str(trans_count) + '\\n' + content1)\n \n content2 = file2.read()\n file2.seek(0,0)\n bin_trans_count = bin(trans_count)[2:].zfill(16)\n file2.write(str(bin_trans_count) + '\\n' + content2)\n\n \n \n instruction_count = []\n function_names = []\n\n for function_name, jump_instructions in functions_with_jump_instr_addr.items():\n instruction_count.append(len(jump_instructions))\n function_names.append(function_name)\n\n # Set the figure size and spacing\n fig, ax = plt.subplots(figsize=(12, 6))\n plt.subplots_adjust(bottom=0.3)\n\n # Plot the bar chart\n bars = plt.bar(function_names, instruction_count)\n plt.xlabel('Function Name')\n plt.ylabel('Transfer Instructions')\n plt.title(program_name + ' Transfers per Function (Forward)')\n\n # Rotate the x-axis labels to prevent overlap\n plt.xticks(rotation=90)\n\n # Add data labels to the bars\n for bar in bars:\n height = bar.get_height()\n ax.text(bar.get_x() + bar.get_width() / 2, height, str(int(height)), ha='center', va='bottom')\n\n # Save the figure to a file\n plt.savefig(os.path.join(output_directory, program_name + '_forward_transfers_per_function.svg'))\n\n\ndef extract_used_function_instr(function_instr, visited_functions):\n used_function_instr = []\n \n for func_name, instr_list in function_instr.items():\n if func_name in visited_functions:\n used_function_instr.extend(instr_list)\n \n used_function_instr.sort(key=lambda x: int(x.split(':')[0], 16))\n # with open(output_file,'w',encoding='utf-8') as file :\n # for instr in used_function_instr:\n # file.write(instr + '\\n')\n \n return used_function_instr\n\ndef get_the_addr_information_for_basic_block(address, mnemonic, operands, function_addr_ranges):\n '''\n Get address information for basic block.\n\n Args:\n address (list): List of instruction addresses.\n mnemonic (list): List of instruction mnemonics.\n operands (list): List of instruction operands.\n function_addr_ranges (dict): Dictionary mapping function names to their address ranges.\n\n Returns:\n tuple: A tuple containing the list of end addresses for each function, a list of branch or jump target addresses,\n a list of start addresses for branches that are taken, a list of all taken target addresses,\n and a list of start addresses in order.\n\n '''\n # Initialize lists and variables\n branch_or_jump_target_addr = []\n order_start_addr_list = []\n end_addr_list = []\n func_end_addr_list = []\n branch_taken_start_addr = []\n branch_i = 0\n all_taken_target_addr = [] # All taken_target address\n \n # Extract function end addresses from function address ranges\n for func, addresses in function_addr_ranges.items():\n func_end_addr = addresses[-1]\n func_end_addr_list.append(func_end_addr)\n \n # Process each instruction and extract address information\n for i in range(len(mnemonic)):\n if i == 0:\n order_start_addr_list.append(address[i])\n \n # function without transfer instruction \n if address[i] in func_end_addr_list and i+1 <= len(mnemonic):\n end_addr_list.append(address[i])\n if i+1 < len(mnemonic):\n order_start_addr_list.append(address[i+1])\n # all_taken_target_addr.append(address[i] + ',' + address[i+1])\n elif i+1 == len(mnemonic):\n continue\n\n # Deal with branch instructions\n if mnemonic[i] in branch_inst and i+1 < len(mnemonic):\n branch_i += 1\n end_addr_list.append(address[i])\n order_start_addr_list.append(address[i+1])\n branch_or_jump_target_addr.append(address[i+1]+' bnt' + ' ' + str(branch_i))\n operand = operands[i].split(',')\n branch_taken_start_addr.append(operand[-1])\n branch_or_jump_target_addr.append(operand[-1]+' bt' + ' ' + str(branch_i))\n all_taken_target_addr.append(address[i] + ',' + operand[-1])\n\n # Deal with direct jump instructions\n for i in range(len(mnemonic)):\n if (mnemonic[i] == 'ret'):\n end_addr_list.append(address[i])\n if i+1 < len(mnemonic) and mnemonic[i+1]:\n order_start_addr_list.append(address[i+1])\n \n elif (mnemonic[i] == 'jal'):\n end_addr_list.append(address[i])\n operand = operands[i].split(',')\n branch_or_jump_target_addr.append(operand[-1]+' jal')\n all_taken_target_addr.append(address[i] + ',' + operand[-1])\n if i+1 < len(mnemonic):\n order_start_addr_list.append(address[i+1])\n \n elif (mnemonic[i] == 'j'):\n end_addr_list.append(address[i])\n if i+1 < len(mnemonic):\n order_start_addr_list.append(address[i+1])\n\n if ',' in operands[i]:\n operand = operands[i].split(',')\n branch_or_jump_target_addr.append(operand[0]+' j')\n all_taken_target_addr.append(address[i] + ',' + operand[-1])\n \n elif ',' not in operands[i]:\n operand = operands[i]\n branch_or_jump_target_addr.append(operand+' j') \n all_taken_target_addr.append(address[i] + ',' + operand)\n\n # Deal with indirect jump instructions\n for i in range(len(mnemonic)):\n if (mnemonic[i] == 'jalr'):\n end_addr_list.append(address[i])\n if i+1 < len(mnemonic):\n order_start_addr_list.append(address[i+1])\n branch_or_jump_target_addr.append('ffff'+' jalr')\n \n elif (mnemonic[i] == 'jr'):\n end_addr_list.append(address[i])\n if i+1 < len(mnemonic): \n order_start_addr_list.append(address[i+1])\n branch_or_jump_target_addr.append('ffff'+' jr')\n \n # Sort the lists\n all_taken_target_addr = sorted(all_taken_target_addr, key=lambda x: int(x.split(',')[0], 16))\n order_start_addr_list = sorted(list(set(order_start_addr_list)),key=lambda x: int(x, 16))\n end_addr_list = sorted(list(set(end_addr_list)),key=lambda x: int(x, 16))\n \n return end_addr_list, branch_or_jump_target_addr, branch_taken_start_addr, \\\n all_taken_target_addr, order_start_addr_list\n\nclass BasicBlock:\n def __init__(self, name, func, start, end, length, taken_target, not_taken_target, \\\n start_instr, end_instr, taken_target_instr, not_taken_target_instr, instr):\n self.name = name\n self.func = func\n self.start = start\n self.end = end\n self.length = length\n self.taken_target = taken_target\n self.not_taken_target = not_taken_target\n self.start_instr = start_instr\n self.end_instr = end_instr\n self.taken_target_instr = taken_target_instr\n self.not_taken_target_instr = not_taken_target_instr\n self.instr = instr \n\ndef calculate_block_length(start_addr, end_addr, used_function_instr):\n start_line = get_line_number(start_addr, used_function_instr)\n end_line = get_line_number(end_addr, used_function_instr)\n block_length = end_line - start_line + 1\n return block_length\n\ndef get_line_number(addr, used_function_instr):\n for i, instr in enumerate(used_function_instr):\n if instr.startswith(addr + \":\"):\n return i\n\n \ndef create_basic_blocks_in_order(order_start_addr_list, end_addr_list, used_function_instr, function_addr_ranges,\\\n ret_instr_addr, return_target):\n '''\n Create basic blocks in order based on the provided address information.\n\n Args:\n order_start_addr_list (list): List of start addresses for basic blocks in order.\n end_addr_list (list): List of end addresses for basic blocks.\n used_function_instr (list): List of used function instructions.\n function_addr_ranges (dict): Dictionary mapping function names to their address ranges.\n ret_instr_addr (dict): Dictionary mapping function names to their return instruction addresses.\n return_target (dict): Dictionary mapping function names to their return targets.\n\n Returns:\n list: List of created basic blocks.\n\n '''\n # Initialize a list to store the basic blocks\n basic_block = []\n # Create a BasicBlock object for each start address\n for i in range(len(order_start_addr_list)):\n basic_block.append(BasicBlock(0, '', 0, 0, 0, '', '', '', '', '', '', ''))\n \n # Populate the basic block objects with information\n for i in range(len(order_start_addr_list)):\n basic_block[i].name = i\n basic_block[i].start = order_start_addr_list[i]\n basic_block[i].end = end_addr_list[i]\n basic_block[i].length = calculate_block_length(basic_block[i].start, basic_block[i].end, used_function_instr)\n \n # Get the function name for each basic block\n func_name_l = [key for key ,addr in function_addr_ranges.items() if \\\n int(basic_block[i].start,16) >= int(addr[0],16) and int(basic_block[i].start,16) <= int(addr[1],16)]\n func_name = func_name_l[0]\n basic_block[i].func = func_name\n \n # Find the start and end instructions for each basic block \n block_instr_list = []\n for line in used_function_instr:\n if int(order_start_addr_list[i],16) == int(line[:line.index(':')],16):\n basic_block[i].start_instr = line\n if int(order_start_addr_list[i],16) <= int(line[:line.index(':')],16) <= int(end_addr_list[i],16):\n block_instr_list.append(line)\n if int(end_addr_list[i],16) == int(line[:line.index(':')],16):\n basic_block[i].end_instr = line\n break\n basic_block[i].instr = block_instr_list\n \n # Determine the taken and not-taken targets for each basic block \n tokens = basic_block[i].end_instr.split()\n if len(tokens) == 3: \n mnemonic = tokens[2]\n operands = ''\n else:\n mnemonic = tokens[2]\n operands = tokens[3]\n \n if mnemonic in branch_inst:\n operand = operands.split(',')\n basic_block[i].taken_target = operand[-1]\n \n elif mnemonic in unconditional_jump_inst:\n if ',' in operands:\n operand = operands.split(',')\n basic_block[i].taken_target = operand[-1]\n elif ',' not in operands:\n operand = operands.split()\n basic_block[i].taken_target = operand[-1]\n \n # Deal with indirect jump's target\n elif mnemonic in indirect_jump_inst:\n basic_block[i].taken_target = 'register: ' + operands\n # basic_block[i].taken_target = 'FFFF'\n basic_block[i].taken_target_instr = 'FFFFFFFF'\n\n \n # Deal with 'ret' target\n elif mnemonic == 'ret':\n for func, addresses in ret_instr_addr.items():\n for ret_addr in addresses:\n if int(basic_block[i].end,16) == int(ret_addr,16):\n if func in return_target.keys():\n basic_block[i].taken_target = return_target[func]\n break\n else:\n continue\n \n #branch not taken target \n if i+1 < len(order_start_addr_list) and mnemonic in branch_inst:\n basic_block[i].not_taken_target = order_start_addr_list[i+1]\n \n # Find the taken target and not taken target instructions\n for line in used_function_instr:\n if basic_block[i].taken_target == line[:line.index(':')]:\n basic_block[i].taken_target_instr = line\n if mnemonic in branch_inst and basic_block[i].not_taken_target == line[:line.index(':')]:\n basic_block[i].not_taken_target_instr = line \n \n return basic_block \n\ndef create_basic_blocks_start_with_taken_target(all_taken_target_addr, basic_block, order_start_addr_list, used_function_instr):\n '''\n The create_basic_blocks_start_with_taken_target function takes in several parameters including a list of all taken target addresses, \\\n a list of existing basic blocks, a list of starting addresses, and a list of used function instructions. \n The function creates new basic blocks that start with a taken target address and adds them to the existing list of basic blocks. \n The function returns the updated list of basic blocks.\n '''\n \n # Iterate through all_taken_target_addr to handle new basic block creation\n for addr_pair in all_taken_target_addr:\n jump_addr, target_addr = addr_pair.split(\",\")\n \n # Check if target address is not already in order_start_addr_list\n if target_addr not in order_start_addr_list:\n # Check if a basic block with the same start address exists\n existing_bb_with_start = next((bb for bb in basic_block if bb.start == target_addr), None)\n \n if existing_bb_with_start:\n in_bb = next((bb for bb in basic_block if int(bb.start,16) <= int(target_addr,16) <= int(bb.end,16)), None)\n # Update the existing basic block's end address\n existing_bb_with_start.end = in_bb.end\n existing_bb_with_start.length = calculate_block_length(existing_bb_with_start.start, existing_bb_with_start.end, used_function_instr)\n else:\n # Find the basic block that contains the target address\n for bb in basic_block:\n if int(bb.start,16) <= int(target_addr,16) <= int(bb.end,16):\n # Create a new basic block with the target address as start and bb.end as end\n new_bb_name = str(len(basic_block)) + ' start_with_taken_target'\n new_bb_start = target_addr\n new_bb_end = bb.end\n new_bb_length = calculate_block_length(new_bb_start, new_bb_end, used_function_instr)\n new_bb_func = bb.func\n block_instr_list = []\n for instr in used_function_instr:\n if int(new_bb_start,16) == int(instr[:instr.index(':')],16):\n new_bb_start_instr = instr\n if int(new_bb_start,16) <= int(instr[:instr.index(':')],16) <= int(new_bb_end,16):\n block_instr_list.append(instr)\n if int(new_bb_end,16) == int(instr[:instr.index(':')],16):\n new_bb_end_instr = instr\n if int(new_bb_end,16) == int(bb.end,16):\n new_bb_taken_target = bb.taken_target\n new_bb_not_taken_target = bb.not_taken_target\n new_bb_taken_target_instr = bb.taken_target_instr\n new_bb_not_taken_target_instr = bb.not_taken_target_instr\n\n new_bb_instr = block_instr_list\n \n new_bb = BasicBlock(new_bb_name, new_bb_func, new_bb_start, new_bb_end,\\\n new_bb_length, new_bb_taken_target, new_bb_not_taken_target, new_bb_start_instr, \\\n new_bb_end_instr, new_bb_taken_target_instr, new_bb_not_taken_target_instr, new_bb_instr)\n basic_block.append(new_bb)\n break\n\n return basic_block\n\n\ndef sort_basic_blocks(basic_block):\n sorted_basic_blocks = sorted(basic_block, key=lambda bb: int(bb.start, 16))\n print(\"basic blocks' num: \"+str(len(sorted_basic_blocks)))\n return sorted_basic_blocks\n\ndef write_basic_blocks_to_file(file_name, basic_block, output_directory):\n basic_block_path = os.path.join(output_directory, file_name)\n with open(basic_block_path, 'w', encoding='utf-8') as file:\n for bb in basic_block:\n file.write(f'Basic_block Name: {bb.name}\\n')\n file.write(f'In Function: {bb.func}\\n')\n file.write(f'Start address: {bb.start}\\n')\n file.write(f'End address: {bb.end}\\n')\n file.write(f'Start instruction: \\n\\t{bb.start_instr.strip()}\\n')\n file.write(f'End instruction: \\n\\t{bb.end_instr.strip()}\\n')\n file.write(f'Length: {bb.length}\\n')\n file.write(f'Taken_Target address: {bb.taken_target}\\n')\n file.write(f'Taken_Target instruction: \\n\\t{bb.taken_target_instr.strip()}\\n')\n file.write(f'Not_Taken_Target address: {bb.not_taken_target}\\n')\n file.write(f'Not_Taken_Target instruction: \\n\\t{bb.not_taken_target_instr.strip()}\\n')\n file.write('Instruction: '+'\\n')\n for line in bb.instr:\n file.write(f'\\t{line.strip()}\\n')\n file.write('\\n\\n')\n\ndef generate_CFG(basic_block, program_name, output_directory):\n # Create a new Graphviz graph\n graph1 = graphviz.Digraph(format='svg')\n \n # Create a mapping of basic block names to their respective nodes\n bb_nodes = {}\n\n # Add nodes to the graph\n for bb in basic_block:\n label = f'Basic_block Name: {bb.name}\\nIn Function: {bb.func}\\nStart address: {bb.start}\\nEnd address: {bb.end}\\nLength: {bb.length}\\nTaken_Target: {bb.taken_target}'\n if bb.not_taken_target is not None:\n label += f'\\nNot_Taken_Target address: {bb.not_taken_target}'\n\n node_name = str(bb.name)\n graph1.node(node_name, label=label, shape='box')\n bb_nodes[bb.name] = node_name \n\n # Add edges to the graph\n for i, bb in enumerate (basic_block):\n if bb.taken_target != '':\n if isinstance(bb.taken_target, list):\n for target_str in bb.taken_target:\n target = target_str.split()[1]\n for b_num, node_name in bb_nodes.items():\n if isinstance(b_num, str):\n num = int(b_num.split()[0])\n if target == basic_block[num].start:\n graph1.edge(bb_nodes[bb.name], node_name)\n else:\n if target == basic_block[b_num].start:\n graph1.edge(bb_nodes[bb.name], node_name)\n\n else:\n for b_num, node_name in bb_nodes.items():\n if isinstance(b_num, str):\n num = int(b_num.split()[0])\n if bb.taken_target == basic_block[num].start:\n graph1.edge(bb_nodes[bb.name], node_name)\n else:\n if bb.taken_target == basic_block[b_num].start:\n graph1.edge(bb_nodes[bb.name], node_name)\n \n elif bb.taken_target == '' and i+1 < len(basic_block):\n next_bb = basic_block[i+1]\n graph1.edge(bb_nodes[bb.name], bb_nodes[next_bb.name])\n\n if bb.not_taken_target != '':\n for b_num, node_name in bb_nodes.items():\n if isinstance(b_num, str):\n num = int(b_num.split()[0])\n if bb.not_taken_target == basic_block[num].start:\n graph1.edge(bb_nodes[bb.name], node_name,style='dashed',color = 'red')\n else:\n if bb.not_taken_target == basic_block[b_num].start:\n graph1.edge(bb_nodes[bb.name], node_name, style='dashed',color = 'red')\n \n # Set the output file path\n output_file = os.path.join(output_directory, f'{program_name}_CFG')\n\n # Render the graph and save it to a file\n graph1.render(filename=output_file, cleanup=True, view=False) \n\ndef convert_to_binary(basic_block, output_file_name, Hash_algorithm, value_length, output_directory):\n '''\n The convert_to_binary function takes in a list of basic blocks and an output file name. \n The function converts the machine code instructions in each basic block to binary format and writes the binary instructions \\\n and a hash value to the output file.\n '''\n binary_file_path = os.path.join(output_directory, output_file_name)\n \n with open (binary_file_path,'w',encoding='utf-8') as file:\n for i in range (len(basic_block)):\n bb_instr = basic_block[i].instr\n address, machine_code, mnemonic, operands = get_func_machine_code(bb_instr)\n \n # Get binary_machine_code \n binary_machine_code = []\n for j in range (len(machine_code)):\n int_machine_code = int(machine_code[j], 16)\n bin_machine_code = bin(int_machine_code)[2:].zfill(32)\n binary_machine_code.append(bin_machine_code)\n\n # Get binary_address\n binary_address = []\n for m in range (len(address)):\n int_address = int(address[m], 16)\n bin_address = bin(int_address)[2:].zfill(16)\n binary_address.append(bin_address)\n \n # Get hash value\n hash_value = calculate_hash_value(binary_machine_code, Hash_algorithm, value_length)\n\n # Write to file\n file.write(f'Basic_block: {basic_block[i].name}\\n')\n file.write(f'bin_basic_block_instructions: \\n')\n \n for i in range(len(binary_address)):\n file.write(f'\\t{binary_address[i]}: {binary_machine_code[i]}\\n')\n\n file.write(f'hash_value: \\n\\t{hash_value}\\n')\n file.write('\\n')\n\ndef convert_to_hex(basic_block, output_file, Hash_algorithm, value_length, output_directory):\n '''\n The convert_to_hex function takes in a list of basic blocks and an output file name. \n The function converts the machine code instructions in each basic block to hexadecimal format and\\\n writes the hexadecimal instructions and a hash value to the output file.\n '''\n hex_file_path = os.path.join(output_directory, output_file)\n \n with open(hex_file_path,'w',encoding='utf-8') as file:\n for i in range (len(basic_block)):\n bb_instr = basic_block[i].instr\n address, machine_code, mnemonic, operands = get_func_machine_code(bb_instr)\n \n # Get hash value\n hash_value = calculate_hash_value(machine_code, Hash_algorithm, value_length)\n\n # Write to file\n file.write(f'Basic_block: {basic_block[i].name}\\n')\n file.write(f'bin_basic_block_instructions: \\n')\n \n for i in range(len(address)):\n file.write(f'\\t{address[i]}: {machine_code[i]}\\n')\n\n file.write(f'hash_value: \\n\\t{hash_value}\\n')\n file.write('\\n')\n \ndef calculate_hash_value(data, algorithm, value_length):\n binary_data = ''.join(data)\n binary_data = binary_data.encode('utf-8')\n\n # Create hash object based on selected algorithm\n if algorithm == 'SHA-256':\n hash_type = hashlib.sha256()\n elif algorithm == 'MD5':\n hash_type = hashlib.md5()\n elif algorithm == 'SHA-1':\n hash_type = hashlib.sha1()\n elif algorithm == 'SHA-512':\n hash_type = hashlib.sha512()\n # Custom hash algorithm\n # elif algorithm == \" \":\n # hash_type = xxx.xxx()\n\n # Calculate hash value\n hash_type.update(binary_data)\n\n # Get hexdigest of hash value\n hash_value = hash_type.hexdigest()\n \n # Get the hash value of the specified number of digits\n hash_value_spl = hash_value[:int(value_length)]\n\n return hash_value_spl\n\ndef export_results(function_addr_ranges, function_information_file,\\\n all_instr, functions_with_jump_instr_addr, control_transfer_file,bin_file,\\\n basicblock_file_name, basic_block,\\\n block_binary_file_name, hex_file_name, Hash_algorithm, value_length, output_directory, program_name):\n \n # write_functions_information(function_addr_ranges, function_information_file, output_directory)\n \n write_in_may_used_control_transfer_instr(all_instr, functions_with_jump_instr_addr, control_transfer_file, \\\n bin_file, output_directory, program_name)\n \n write_basic_blocks_to_file(basicblock_file_name, basic_block, output_directory)\n \n convert_to_binary(basic_block, block_binary_file_name, Hash_algorithm, value_length, output_directory)\n \n convert_to_hex(basic_block, hex_file_name, Hash_algorithm, value_length, output_directory)\n \n# main(objdump_file) \n \n## UI\n\ndef judge_file_type(input_file_path):\n type = None\n with open(input_file_path, 'r') as file:\n lines = file.readlines()\n for line in lines[:15]:\n if line.startswith('#'):\n type = 1\n return type\n\nclass CFIEE_UI:\n def __init__(self, master):\n\n self.master = master\n master.title(\"CFIEE: A Critical Metadata Extraction Engine for RISC-V CFI Scheme\")\n # master.geometry(\"800x600\") \n \n # Column 1: Select .elf file and Disassemble\n elf_file_frame = tk.Frame(master)\n elf_file_frame.grid(row=0, column=0, padx=30, pady=30, sticky=\"nw\")\n elf_file_label = tk.Label(elf_file_frame, text=\"STEP1: Disassemble ELF File\", font=(\"Arial\", 10, \"bold\"))\n elf_file_label.pack(side=tk.TOP, anchor=\"n\", pady=20)\n\n # File Selection Row\n file_select_frame = tk.Frame(elf_file_frame)\n file_select_frame.pack(side=tk.TOP, anchor=\"n\")\n self.elf_file_select_button = tk.Button(file_select_frame, text=\"Select ELF file\", command=self.select_elf_file, padx=10, pady=5, bd=1, relief=\"raised\")\n self.elf_file_select_button.pack(side=tk.TOP, padx=10, anchor=\"n\")\n self.elf_file_path_var = tk.StringVar()\n self.elf_file_path_label = tk.Label(file_select_frame, textvariable=self.elf_file_path_var, wraplength=150, anchor=\"w\", bg=\"white\", bd=1,\\\n relief=\"groove\", padx=5)\n self.elf_file_path_label.pack(side=tk.TOP, fill=tk.X, anchor=\"n\", padx=10, pady=20)\n\n\n # Disassemble Row\n disassemble_frame = tk.Frame(elf_file_frame)\n disassemble_frame.pack(side=tk.TOP, anchor=\"n\", pady=10)\n self.disassemble_button = tk.Button(disassemble_frame, text=\"Disassemble\", command=self.disassemble_program, font=(\"Arial\", 10, \"bold\"), bg=\"lightgray\",\\\n padx=10, pady=5, bd=1, relief=\"raised\", state=tk.DISABLED)\n self.disassemble_button.pack(side=tk.TOP, anchor=\"n\")\n self.disassemble_label = tk.Label(disassemble_frame, wraplength=200, anchor=\"w\", font=(\"Arial\", 10), justify=tk.LEFT)\n self.disassemble_label.pack(side=tk.TOP, padx=10, fill=tk.X, expand=True)\n\n # Browse file section\n browse_frame = tk.Frame(elf_file_frame)\n browse_frame.pack(side=tk.TOP, anchor=\"n\", padx=10, pady=10)\n\n self.browse_section_label = tk.Label(browse_frame, text=\"STEP2: Select disassembly file(.txt)\", font=(\"Arial\", 10, \"bold\"))\n self.browse_section_label.pack(side=tk.TOP, anchor=\"n\", pady=10)\n\n self.browse_button = tk.Button(browse_frame, text=\"Browse File\", command=self.browse_file, padx=10, pady=5, bd=1, relief=\"raised\")\n self.browse_button.pack(side=tk.TOP, anchor=\"n\", padx=10, pady=10)\n\n self.file_path_var = tk.StringVar()\n self.file_path_label = tk.Label(browse_frame, textvariable=self.file_path_var, wraplength=150, anchor=\"w\", bg=\"white\", bd=1, relief=\"groove\", padx=5)\n self.file_path_label.pack(side=tk.TOP, fill=tk.X, anchor=\"n\", padx=10, pady=10)\n\n self.browse_label = tk.Label(browse_frame, wraplength=200, anchor=\"w\", font=(\"Arial\", 8), justify=tk.LEFT)\n self.browse_label.pack(side=tk.TOP, anchor=\"n\", pady=5)\n\n # Column 2: Preprocess, Analyze, Hash algorithm selection, and Data length selection\n section2_frame = tk.Frame(master)\n section2_frame.grid(row=0, column=2, padx=20, pady=30, sticky=\"nw\")\n # Preprocess section\n preprocess_frame = tk.Frame(section2_frame)\n preprocess_frame.pack(side=tk.TOP, anchor=\"n\", padx=10, pady=10)\n\n self.preprocess_section_label = tk.Label(preprocess_frame, text=\"STEP3: Data Preprocess\", font=(\"Arial\", 10, \"bold\"))\n self.preprocess_section_label.pack(side=tk.TOP, anchor=\"n\", padx= 10, pady=10)\n\n self.preprocess_button = tk.Button(preprocess_frame, text=\"Preprocess\", command=self.rewrite_file, state=tk.DISABLED, bg=\"lightgray\", \\\n padx=10, pady=5, bd=1, relief=\"raised\")\n self.preprocess_button.pack(side=tk.TOP, padx=10, pady=10, anchor=\"n\")\n\n self.rewrite_label = tk.Label(preprocess_frame, wraplength=200, anchor=\"center\", font=(\"Arial\", 8), justify=tk.CENTER)\n self.rewrite_label.pack(side=tk.TOP, fill=tk.X, pady=5)\n\n self.rewrite_file_path_var = tk.StringVar()\n\n # Analyze section\n analyze_frame = tk.Frame(section2_frame)\n analyze_frame.pack(side=tk.TOP, anchor='n', padx=10, pady=10)\n\n self.analyze_section_label = tk.Label(analyze_frame, text=\"STEP4: File Analyze\", font=(\"Arial\", 10, \"bold\"))\n self.analyze_section_label.pack(side=tk.TOP, anchor=\"n\", pady=10)\n\n self.analyze_label = tk.Label(analyze_frame, wraplength=200, anchor=\"center\", font=(\"Arial\", 8), justify=tk.CENTER)\n self.analyze_label.pack(side=tk.BOTTOM, fill=tk.X, padx=10, pady=5)\n\n hash_algorithm_frame = tk.Frame(analyze_frame)\n hash_algorithm_frame.pack(side=tk.TOP, anchor=\"w\", padx=10, pady=5)\n \n hash_algorithm_label = tk.Label(hash_algorithm_frame, text=\"Hash:\", font=(\"Arial\", 10))\n hash_algorithm_label.pack(side=tk.LEFT, anchor=\"w\",padx=10,pady = 5)\n\n self.hash_algorithm_var = tk.StringVar()\n hash_algorithm_options = [\"MD5\", \"SHA-1\", \"SHA-256\", \"SHA-512\"]\n self.hash_algorithm_menu = tk.OptionMenu(hash_algorithm_frame, self.hash_algorithm_var, *hash_algorithm_options)\n self.hash_algorithm_menu.pack(side=tk.LEFT, anchor=\"w\", padx = 10, pady = 5)\n\n data_length_frame = tk.Frame(analyze_frame)\n data_length_frame.pack(side=tk.TOP, anchor=\"w\", padx=10, pady=5)\n \n data_length_label = tk.Label(data_length_frame, text=\"Data Length:\", font=(\"Arial\", 10))\n data_length_label.pack(side=tk.LEFT, anchor=\"w\", padx=10, pady=5)\n\n self.data_length_var = tk.StringVar()\n data_length_options = [\"8\", \"16\", \"32\", \"Custom\"]\n self.data_length_menu = tk.OptionMenu(data_length_frame, self.data_length_var, *data_length_options)\n self.data_length_menu.pack(side=tk.LEFT, anchor=\"w\", padx=10, pady=5)\n \n custom_length_frame = tk.Frame(analyze_frame)\n custom_length_frame.pack(side=tk.TOP, anchor=\"w\", padx=10, pady=5)\n \n custom_length_label = tk.Label(custom_length_frame, text=\"Custom Length:\", font=(\"Arial\", 10))\n custom_length_label.pack(side=tk.LEFT, anchor=\"w\", padx=10, pady=5)\n\n self.custom_length_var = tk.StringVar()\n custom_length_entry = tk.Entry(custom_length_frame, textvariable=self.custom_length_var)\n custom_length_entry.pack(side=tk.LEFT, anchor=\"w\", padx=10, pady=5)\n # custom_length_entry.config(state=tk.DISABLED)\n \n self.analyze_button = tk.Button(analyze_frame, text=\"Analyze\", command=self.analyze_program, state=tk.DISABLED, bg=\"lightgray\", \\\n padx=10, pady=5, bd=1, relief=\"raised\")\n self.analyze_button.pack(side=tk.TOP, padx=10, pady=20, anchor=\"n\")\n \n # Progress bar \n self.progress_bar = ttk.Progressbar(analyze_frame, length=270, mode='determinate', orient=tk.HORIZONTAL)\n self.progress_bar.pack(side=tk.TOP, anchor=\"n\", padx=10, pady=20)\n # self.progress_bar.grid(row=1, column=2, padx=10, pady=20, columnspan=1, sticky=\"n\")\n \n # Column 3: Result output\n section3_frame = tk.Frame(master)\n section3_frame.grid(row=0, column=4, padx=10, pady=120, sticky=\"nw\")\n\n self.output_section_label = tk.Label(section3_frame, text=\"STEP5: Output Files\", font=(\"Arial\", 10, \"bold\"))\n self.output_section_label.pack(side=tk.TOP, anchor=\"n\", pady=20)\n\n left_frame = tk.Frame(section3_frame)\n left_frame.pack(side=tk.LEFT, padx=10)\n\n right_frame = tk.Frame(section3_frame)\n right_frame.pack(side=tk.LEFT, padx=30)\n \n button_width = 15\n\n basic_block_info_button = tk.Button(left_frame, text=\"Basic Block Info\", command=self.show_basic_block_info, width=button_width)\n basic_block_info_button.pack(side=tk.TOP, pady=15)\n \n bin_bb_button = tk.Button(left_frame, text=\"Binary Basic Block\", command=self.show_bin_bb, width=button_width)\n bin_bb_button.pack(side=tk.TOP, pady=15)\n\n hex_bb_button = tk.Button(left_frame, text=\"Hex Basic Block\", command=self.show_hex_bb, width=button_width)\n hex_bb_button.pack(side=tk.TOP, pady=15)\n\n binary_data_button = tk.Button(left_frame, text=\"Binary Data\", command=self.show_binary_data, width=button_width)\n binary_data_button.pack(side=tk.TOP, pady=15)\n\n transfers_info_button = tk.Button(right_frame, text=\"Transfers Info\", command=self.show_transfers_info, width=button_width)\n transfers_info_button.pack(side=tk.TOP, pady=15)\n\n cfg_button = tk.Button(right_frame, text=\"CFG\", command=self.show_cfg, width=button_width)\n cfg_button.pack(side=tk.TOP, pady=15)\n\n transfers_number_button = tk.Button(right_frame, text=\"Transfers Number\", command=self.show_transfers_number, width=button_width)\n transfers_number_button.pack(side=tk.TOP, pady=15)\n\n function_call_button = tk.Button(right_frame, text=\"Function Call\", command=self.show_function_call, width=button_width)\n function_call_button.pack(side=tk.TOP, pady=15)\n\n # Add padding between columns\n separator1 = ttk.Separator(master, orient='vertical')\n separator1.grid(row=0, column=1, sticky=\"ns\", padx=10, pady=30)\n\n separator2 = ttk.Separator(master, orient='vertical')\n separator2.grid(row=0, column=3, sticky=\"ns\", padx=10, pady=30) \n\n # Help button\n self.help_button = tk.Button(elf_file_frame, text=\"Help\", command=self.show_help, padx=10, pady=5, bd=1, relief=\"raised\")\n self.help_button.pack(side=tk.TOP, anchor=\"n\", padx=10, pady=10)\n #self.help_button.grid(row=1, column=0, padx=10, pady=20,columnspan=1, sticky=\"n\")\n \n #Custom Label\n self.author_label = tk.Label(master, text=\"Github @Taurus052\", font=(\"Arial\", 8))\n self.author_label.grid(row=1, column=2, padx=10, pady=10, sticky=\"n\")\n \n\n \n def select_elf_file(self):\n \n current_directory = os.getcwd()\n parent_directory = os.path.dirname(current_directory)\n elf_files_directory = os.path.join(parent_directory, \"elf_files\")\n\n filetypes = [(\"ELF Files\", \"*.elf\")]\n file_path = filedialog.askopenfilename(initialdir=elf_files_directory, filetypes=filetypes)\n if file_path:\n self.elf_file_path_var.set(file_path)\n self.disassemble_button.config(state=tk.NORMAL)\n\n def disassemble_program(self):\n elf_file = self.elf_file_path_var.get()\n if not elf_file:\n self.disassemble_label.config(text=\"No file selected\")\n return\n try:\n output_directory = os.path.join(os.path.dirname(os.getcwd()), \"objdump_files\")\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n output_file = os.path.join(output_directory, os.path.splitext(os.path.basename(elf_file))[0] + \"_disassembly.txt\")\n process = subprocess.run([\"riscv64-unknown-elf-objdump\", \"-S\", elf_file], capture_output=True, text=True)\n with open(output_file, 'w') as file:\n file.write(process.stdout)\n self.disassemble_label.config(text=\"Disassembly complete.\")\n \n self.file_path_var.set(output_file)\n \n self.preprocess_button.config(state=tk.NORMAL)\n self.analyze_button.config(state=tk.NORMAL)\n return output_file\n \n except subprocess.CalledProcessError as e:\n error_message = f\"Error: {e.stderr}\"\n self.show_error_message(error_message)\n\n def browse_file(self):\n \n current_directory = os.getcwd()\n parent_directory = os.path.dirname(current_directory)\n elf_files_directory = os.path.join(parent_directory, \"objdump_files\")\n\n filetypes = [(\"TXT Files\", \"*.txt\")]\n objdump_file = filedialog.askopenfilename(initialdir=elf_files_directory, filetypes=filetypes)\n\n if not objdump_file:\n self.browse_label.config(text=\"No file selected\")\n return\n\n self.file_path_var.set(objdump_file)\n self.browse_label.config(text=\"Objdump file selected\")\n self.preprocess_button.config(state=tk.NORMAL)\n self.analyze_button.config(state=tk.NORMAL)\n \n type = judge_file_type(objdump_file)\n if type == 1:\n self.browse_label.config(\n text=\"\\nPlease click the 'preprocess' button first\")\n else:\n self.browse_label.config(\n text=\"\\nYou can click the 'analyze' button now!\")\n\n\n def rewrite_file(self):\n objdump_file = self.file_path_var.get()\n file_name = os.path.basename(objdump_file)\n file_directory = os.path.dirname(objdump_file)\n output_file = os.path.join(file_directory, os.path.splitext(file_name)[0] + '_preprocessed.txt')\n self.rewrite_file_path_var.set(output_file)\n self.master.update()\n \n self.progress_bar['value'] = 0 \n self.progress_bar.start() # startup progress bar\n \n subprocess.run(['python', 'file_preprocess.py', objdump_file, output_file])\n \n self.master.after(100, lambda: self.rewrite_label.config(text=\"Objdump file has been rewrited!\\n \\\n File path: {0}\\n\".format(output_file)))\n \n self.progress_bar.stop()\n self.progress_bar['value'] = 100 \n self.master.update()\n\n\n def analyze_program(self):\n input_file = self.file_path_var.get()\n rewrite_file = self.rewrite_file_path_var.get()\n program_name = os.path.basename(input_file)\n\n # Extract program name\n if \"_objdump\" in program_name:\n program_name = program_name.split(\"_objdump\")[0]\n elif \"_disassembly\" in program_name:\n program_name = program_name.split(\"_disassembly\")[0]\n\n type = judge_file_type(input_file)\n if type == 1:\n self.analyze_label.config(\n text=\"Please click the 'preprocess' button first and rechoose the new file\")\n else:\n t = threading.Thread(target=self.run_analyze_program, args=(input_file, rewrite_file, program_name))\n t.start()\n\n\n def run_analyze_program(self, input_file, rewrite_file, program_name):\n try:\n self.progress_bar['value'] = 0\n\n hash_algorithm = self.hash_algorithm_var.get()\n if self.data_length_var.get() == \"Custom\":\n result_length = self.custom_length_var.get()\n if result_length == \"\":\n self.analyze_label.config(\n text=\"Please enter a custom length\")\n return\n else:\n result_length = self.data_length_var.get()\n\n if not hash_algorithm or not result_length:\n self.analyze_label.config(\n text=\"Please select hash algorithm and result length\")\n return\n\n file_to_analyze = rewrite_file if os.path.exists(\n rewrite_file) else input_file\n self.progress_bar.start() # startup progress bar\n self.analyze_label.config(text=\"Analyzing...\")\n\n # Execute analysis program\n main(file_to_analyze, hash_algorithm, result_length, program_name)\n\n self.progress_bar.stop()\n self.progress_bar['value'] = 100\n\n self.analyze_label.config(text=\"Complete!\")\n \n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_help(self):\n try:\n # Open .md files with the default application associated with the system\n readme_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'Readme.md'))\n subprocess.Popen(['start', '', readme_path], shell=True)\n except FileNotFoundError:\n print(\"Unable to find a default application to open the .md file.\")\n\n def show_basic_block_info(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n lastest_modification_time = 0\n bb_info_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\"basic_block.txt\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > lastest_modification_time:\n lastest_modification_time = file_modification_time\n bb_info_file_path = file_path\n \n if bb_info_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else:\n try:\n os.startfile(bb_info_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_binary_data(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n lastest_modification_time = 0\n bin_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\".bin\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > lastest_modification_time:\n lastest_modification_time = file_modification_time\n bin_file_path = file_path\n \n if bin_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else:\n try:\n subprocess.Popen([\"notepad.exe\", bin_file_path])\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_transfers_info(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n lastest_modification_time = 0\n transfers_info_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\"transfers.txt\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > lastest_modification_time:\n lastest_modification_time = file_modification_time\n transfers_info_file_path = file_path\n \n if transfers_info_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else:\n try:\n os.startfile(transfers_info_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_cfg(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n latest_modification_time = 0\n cfg_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\".svg\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > latest_modification_time:\n latest_modification_time = file_modification_time\n cfg_file_path = file_path\n \n if cfg_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else:\n try:\n os.startfile(cfg_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_transfers_number(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n latest_modification_time = 0\n transfers_number_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\"per_function.svg\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > latest_modification_time:\n latest_modification_time = file_modification_time\n transfers_number_file_path = file_path\n \n \n if transfers_number_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else: \n try:\n os.startfile(transfers_number_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_function_call(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n latest_modification_time = 0\n function_call_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\"call_relationship.svg\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > latest_modification_time:\n latest_modification_time = file_modification_time\n function_call_file_path = file_path\n\n \n if function_call_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else: \n try:\n os.startfile(function_call_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_bin_bb(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n latest_modification_time = 0\n bin_bb_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\"bin_basic_block_inf.txt\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > latest_modification_time:\n latest_modification_time = file_modification_time\n bin_bb_file_path = file_path\n \n if bin_bb_file_path is None:\n error_message = \"Error: File not found\"\n self.show_error_message(error_message)\n \n else:\n try: \n os.startfile(bin_bb_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_hex_bb(self):\n output_files_dir = os.path.join(os.path.dirname(os.getcwd()), \"output_files\")\n latest_modification_time = 0\n hex_bb_file_path = None\n for filename in os.listdir(output_files_dir):\n if filename.endswith(\"hex_basic_block_inf.txt\"):\n file_path = os.path.join(output_files_dir, filename)\n file_modification_time = os.path.getmtime(file_path)\n \n if file_modification_time > latest_modification_time:\n latest_modification_time = file_modification_time\n hex_bb_file_path = file_path\n \n if hex_bb_file_path is None:\n error_message = \"Error:File not found\"\n self.show_error_message(error_message)\n else:\n try:\n os.startfile(hex_bb_file_path)\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.show_error_message(error_message)\n \n def show_error_message(self, error_message):\n error_window = tk.Toplevel(self.master)\n error_window.title(\"Error\")\n error_window.geometry(\"400x300\")\n\n scrollbar = tk.Scrollbar(error_window)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n \n error_text = tk.Text(error_window, wrap=tk.WORD, yscrollcommand=scrollbar.set)\n error_text.pack(fill=tk.BOTH, expand=True)\n\n scrollbar.config(command=error_text.yview)\n\n error_text.insert(tk.END, error_message)\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n gui = CFIEE_UI(root)\n root.mainloop()\n\n\n\n\n","repo_name":"Taurus052/CFIEE","sub_path":"src/CFIEE.py","file_name":"CFIEE.py","file_ext":"py","file_size_in_byte":81232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"19088788655","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nuser_agent =\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/777.0.0.0 Safari/537.36\"\nuser_data=r\"/Users/JaeSung/Desktop/파이썬_크롤링/김플_인프런/10_selenium_option/jaesung\"\n\noptions = Options()\n#화면 꺼지지 않음\noptions.add_experimental_option(\"detach\",True)\n\n#user-agent 정보 변경\noptions.add_argument(f\"user-agent={user_agent}\")\n#user_data 저장\noptions.add_argument(f\"user-data-dir={user_data}\")\n\n#(1) 화면 크기 최대\noptions.add_argument(\"--start-maximized\")\n#브라우저가 풀스크린 모드(F11)로 실행됩니다.\n# options.add_argument('--start-fullscreen')\n#화면 크기 조절\n# options.add_argument(\"window-size=500,500\")\n#브라우저에 음소거 옵션을 적용합니다.\noptions.add_argument('--mute-audio')\n#시크릿 모드의 브라우저가 실행됩니다.\n# options.add_argument('incognito') \n#화면창 안띄우고 작업하기\n# options.add_argument(\"--headless\")\n#(2) 브라우저 맨위에 '자동화 중입니다'문구 삭제\noptions.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n#(3) 터미널 불필요한 문구 삭제\noptions.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\n\n#강사님이 주로 사용하는건 1,2,3\n\nservice = Service(ChromeDriverManager().install())\ndriver = webdriver.Chrome(service=service, options=options)\n\nurl =\"https://naver.com\"\n\ndriver.get(url)\n# print(driver.page_source[:1000])\n# driver.quit()","repo_name":"Polar-jaesung/crawling_python","sub_path":"10_selenium_option.py","file_name":"10_selenium_option.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"26796730847","text":"from csvhandler import *\nimport logging\n\nclass DataFeed(object):\n \n def __init__(self, instrument):\n self.feed = None\n self.subscribe_to_price_feed_for_instrument(instrument)\n \n def n_day_moving_avg(self, instrument, date, price, n):\n '''\n trading, not calendar, day average\n '''\n j = -1\n for i in range(len(self.feed)):\n if (self.feed[i]['date'] == date):\n j = i \n break \n \n tally = Decimal(0)\n counted = Decimal(0)\n for i in range(n):\n if (j - i >= 0):\n tally = tally + self.feed[j - i][price]\n counted = counted + 1 \n \n if (counted == 0):\n return None \n \n avg = tally / counted\n \n return avg \n \n def get_price(self, instrument, date, price):\n for i in self.feed:\n if (i['date'] == date):\n return i[price]\n return None \n\n def get_price_info(self, instrument, date):\n for i in self.feed:\n if (i['date'] == date):\n return i\n return None\n \n def subscribe_to_price_feed_for_instrument(self, instrument): \n root = '/home/david/pycode/mp/data/csv/'\n file_name = root + instrument + '.csv'\n \n logging.info('creating data feed for')\n logging.info(file_name)\n \n raw_rows = load_csv_data_rows(file_name)\n dicts = rows_to_dicts(raw_rows) \n sorted_dicts = sorted(dicts, key=lambda k: k['date']) \n self.feed = sorted_dicts\n\n def date_is_trading_day(self, d): \n for day in self.feed:\n if (day['date'] == d):\n return True\n return False\n","repo_name":"davidbarkhuizen/simagora","sub_path":"datafeed.py","file_name":"datafeed.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"9595673747","text":"# Uses python3\nimport sys\nimport math\n\ndef optimal_sequence(n):\n dp = [None] * (n+1)\n dp[1:4] = [0,1,1]\n for i in range(4,n+1):\n a = dp[i-1]\n b = dp[i//2] if i%2 == 0 else math.inf\n c = dp[i//3] if i%3 == 0 else math.inf\n dp[i] = min(a,b,c) + 1\n \n\n sequence = []\n while n > 1:\n sequence.append(n)\n a = dp[n-1]\n b = dp[n//2] if n%2 == 0 else math.inf\n c = dp[n//3] if n%3 == 0 else math.inf\n m = min(a,b,c)\n if n % 3 == 0 and dp[n//3] == m:\n n = n // 3\n elif n % 2 == 0 and dp[n//2] == m:\n n = n // 2\n else:\n n = n - 1\n sequence.append(1)\n return reversed(sequence)\n\ninput = sys.stdin.read()\nn = int(input)\nsequence = list(optimal_sequence(n))\nprint(len(sequence) - 1)\nfor x in sequence:\n print(x, end=' ')\n","repo_name":"spkapust/Algorithms-and-Data-Structures-Specialization","sub_path":"1) Algorithmic Toolbox/week5_dynamic_programming1/2_primitive_calculator/my_primitive_calculator.py","file_name":"my_primitive_calculator.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"40643053874","text":"n = int(input())\nmoney = list(map(int, input().split()))\nmoney.sort()\n\ntarget = 1\nfor i in money:\n # 만들 수 없는 금액을 찾았을 때 반복 종료\n if target < i:\n break\n target += i\n\n# 만들 수 없는 금액 출력\nprint(\"최종\" + str(target))","repo_name":"MinDongRyul/pythonworkstation","sub_path":"grid/gird4.py","file_name":"gird4.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"12131418467","text":"import os\nfrom subprocess import call\n\nimport shutil\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\nwith open('requirements.txt') as f:\n lines = f.readlines()\n\nshutil.rmtree('./dist', ignore_errors=True)\nfor req in lines:\n call(['pip2', 'install', req, '-t', './dist'])\ncopytree('./source', './dist')\nshutil.make_archive('./curator-aws-es', 'zip', './dist')\n\n","repo_name":"logindex/curator-aws-es","sub_path":"package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"17083764923","text":"import gi\r\ngi.require_version(\"Gtk\", \"3.0\")\r\nfrom gi.repository import Gtk, Gdk, GLib\r\nimport cairo\r\nfrom math import *\r\nimport sys\r\nimport numpy as np\r\n\r\n## Program that reads in two files -- a PTSP problem file and a file containing a tree and partial trajectory from DIRT -- and draws them.\r\n## Author: Bryan McKenney\r\n\r\n# Global constants\r\nSCALE = 5 # Scale the world size to the screen by this amount\r\nREFLECT_X = cairo.Matrix(1, 0, 0, -1) # Matrix to reflect something over the x-axis\r\nTRI_SIZE = 20 # The size to draw the triangle representing the vehicle\r\nCOLOR_MODES = [\"grayscale\", \"neon\", \"unh\"] # The different color modes that the animation can be displayed in\r\n\r\n# Global variables\r\ncolorMode = 2\r\nworldWidth = 0\r\nworldHeight = 0\r\nworldMap = [[]]\r\nwaypoints = [[]]\r\ntrajPoints = []\r\ntreeNodes = []\r\ninitDir = [] # Will be a numpy array\r\ninitPos = [] # Will be a numpy array\r\n\r\n\r\nclass DrawingWin(Gtk.Window):\r\n def __init__(self):\r\n super(DrawingWin, self).__init__()\r\n self.initUi()\r\n \r\n \r\n def initUi(self):\r\n darea = Gtk.DrawingArea()\r\n darea.connect(\"draw\", self.onDraw)\r\n self.add(darea)\r\n\r\n self.set_title(\"DIRT Visualizer\")\r\n self.resize(worldWidth * SCALE, worldHeight * SCALE)\r\n self.set_position(Gtk.WindowPosition.CENTER)\r\n self.connect(\"destroy\", Gtk.main_quit)\r\n self.show_all()\r\n \r\n \r\n def onDraw(self, wid, cr):\r\n # Flip the Context about the x-axis so that right and up are positive directions\r\n cr.translate(0, worldHeight * SCALE)\r\n cr.transform(REFLECT_X)\r\n\r\n # Color background\r\n setDrawColor(cr, \"background\")\r\n cr.paint()\r\n\r\n # Calculate cell size for drawing obstacles\r\n cellSize = worldWidth // len(worldMap[0])\r\n\r\n # Draw obstacles\r\n for row in range(len(worldMap)):\r\n for col in range(len(worldMap[0])):\r\n if (worldMap[row][col] == '#'):\r\n x = col * cellSize\r\n y = worldHeight - (row * cellSize) - cellSize\r\n cr.rectangle(x * SCALE, y * SCALE, cellSize * SCALE, cellSize * SCALE)\r\n \r\n # Color obstacles\r\n setDrawColor(cr, \"obstacle\")\r\n cr.fill()\r\n\r\n # Set up for drawing text\r\n cr.select_font_face(\"Monospace\", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)\r\n cr.set_font_size(20)\r\n setDrawColor(cr, \"waypoint\")\r\n \r\n # Draw waypoints\r\n for i in range(len(waypoints)):\r\n pt = waypoints[i]\r\n x = pt[0] * SCALE\r\n y = pt[1] * SCALE\r\n r = pt[2] * SCALE\r\n cr.move_to(x + r, y) # Arcs start drawing from the point at 0 rad, so shift over by radius\r\n cr.arc(x, y, r, 0, 2 * pi) # Arc from 0 to 2pi radians is a circle\r\n cr.stroke()\r\n setDrawColor(cr, \"text\")\r\n drawText(cr, x - 6, y - 7, str(i + 1)) # Draw waypoint number in center\r\n\r\n # Draw tree nodes\r\n cr.set_source_rgb(1, 0, 1)\r\n cr.set_line_width(0.5)\r\n for node in treeNodes:\r\n x = node[0] * SCALE\r\n y = node[1] * SCALE\r\n r = node[2] * SCALE\r\n cr.move_to(x + r, y)\r\n cr.arc(x, y, r, 0, 2 * pi)\r\n cr.stroke()\r\n #cr.fill()\r\n\r\n # Move to the starting point of the vehicle to start drawing path\r\n cr.move_to(initPos[0] * SCALE, initPos[1] * SCALE)\r\n cr.set_line_width(1)\r\n\r\n # Draw solution trajectory\r\n for pt in trajPoints:\r\n x = pt[0] * SCALE\r\n y = pt[1] * SCALE\r\n cr.line_to(x, y)\r\n\r\n setDrawColor(cr, \"path\")\r\n cr.stroke()\r\n\r\n # Draw solution node circles over in green\r\n for pt in trajPoints:\r\n x = pt[0] * SCALE\r\n y = pt[1] * SCALE\r\n r = pt[2] * SCALE\r\n cr.set_line_width(1.5)\r\n cr.move_to(x + r, y)\r\n cr.arc(x, y, r, 0, 2 * pi)\r\n\r\n cr.set_source_rgb(0, 1, 0)\r\n cr.stroke()\r\n\r\n # Draw vehicle\r\n x = initPos[0]\r\n y = initPos[1]\r\n xDir = initDir[0]\r\n yDir = initDir[1]\r\n theta = atan2(yDir, xDir)\r\n setDrawColor(cr, \"vehicle\")\r\n drawTriangle(cr, x * SCALE, y * SCALE, theta)\r\n \r\n \r\ndef drawText(cr, x, y, text):\r\n cr.move_to(x, y) # Go to the position for drawing\r\n cr.save()\r\n cr.translate(x, y) # Move Context to the position for reflection\r\n cr.transform(REFLECT_X) # Reflect Context about the x-axis (so the text does not draw upside-down)\r\n cr.show_text(text) # Draw the text\r\n cr.restore() # Undo translate and transform of Context\r\n\r\n\r\ndef drawTriangle(cr, x, y, theta):\r\n cr.save()\r\n cr.translate(x, y)\r\n cr.rotate(-pi / 2 + theta)\r\n cr.move_to(-(TRI_SIZE / 2), -(TRI_SIZE / 2))\r\n cr.line_to(TRI_SIZE / 2, -(TRI_SIZE / 2))\r\n cr.line_to(0, TRI_SIZE / 1.5)\r\n cr.close_path()\r\n cr.stroke()\r\n cr.restore()\r\n\r\n\r\ndef setDrawColor(cr, forWhat):\r\n colors = COLOR_MODES[colorMode]\r\n \r\n # Depending on the color mode and what is going to be drawn, apply the correct color\r\n if (colors == \"grayscale\"):\r\n if (forWhat == \"background\"):\r\n cr.set_source_rgb(0.95, 0.95, 0.95) # Light gray\r\n elif (forWhat == \"waypoint\"):\r\n cr.set_source_rgb(0.6, 0.6, 0.6) # Gray\r\n elif (forWhat == \"path\"):\r\n cr.set_source_rgb(0.8, 0.8, 0.8) # Light gray\r\n elif (forWhat == \"obstacle\"):\r\n cr.set_source_rgb(0.4, 0.4, 0.4) # Dark gray\r\n else: # Text, vehicle, waypointHit\r\n cr.set_source_rgb(0, 0, 0) # Black\r\n elif (colors == \"neon\"):\r\n if (forWhat == \"background\"):\r\n cr.set_source_rgb(0, 0, 0) # Black\r\n elif (forWhat == \"waypoint\"):\r\n cr.set_source_rgb(0, 1, 1) # Cyan\r\n elif (forWhat == \"waypointHit\"):\r\n cr.set_source_rgb(1, 1, 0) # Yellow\r\n elif (forWhat == \"path\"):\r\n cr.set_source_rgb(1, 0.4, 0) # Orange\r\n elif (forWhat == \"text\"):\r\n cr.set_source_rgb(1, 0, 1) # Magenta\r\n elif (forWhat == \"obstacle\"):\r\n cr.set_source_rgb(0, 0, 1) # Blue\r\n else: # Vehicle\r\n cr.set_source_rgb(0, 1, 0) # Green\r\n else: # UNH\r\n if (forWhat == \"background\"):\r\n cr.set_source_rgb(1, 1, 1) # White\r\n elif (forWhat == \"waypoint\"):\r\n cr.set_source_rgb(0.6, 0.6, 0.6) # Gray\r\n elif (forWhat == \"waypointHit\"):\r\n cr.set_source_rgb(0, 1, 1) # Cyan\r\n elif (forWhat == \"path\"):\r\n cr.set_source_rgb(0.49, 0.75, 0.93) # Sky blue\r\n elif (forWhat == \"text\"):\r\n cr.set_source_rgb(0.6, 0.6, 0.6) # Gray\r\n elif (forWhat == \"obstacle\"):\r\n cr.set_source_rgb(0, 0, 1) # Blue\r\n else: # Vehicle\r\n cr.set_source_rgb(0, 0.75, 1) # Deep sky blue\r\n\r\n\r\ndef readStandardIn():\r\n global worldWidth, worldHeight, worldMap, waypoints, initDir, initPos, trajPoints, treeNodes\r\n fillMode = \"none\"\r\n firstLine = True\r\n mapRow = 0\r\n i = 0\r\n \r\n if (len(sys.argv) > 2):\r\n # Read from first command line argument (problem file)\r\n with open(sys.argv[1], \"r\") as file:\r\n for line in file:\r\n # Strip newline from line and split by whitespace\r\n line = line.strip().split()\r\n \r\n # Read in data\r\n if (line != []):\r\n if (fillMode == \"none\"):\r\n if (line[0] == \"WORLD_DIMENSIONS:\"):\r\n worldWidth = int(line[1])\r\n worldHeight = int(line[2])\r\n elif (line[0] == \"MAP\"):\r\n fillMode = \"worldMap\"\r\n elif (line[0] == \"INITIAL_DIR:\"):\r\n theta = radians(float(line[1]))\r\n x = cos(theta)\r\n y = sin(theta)\r\n initDir = np.array([x, y])\r\n elif (line[0] == \"INITIAL_POS:\"):\r\n initPos = np.array([float(line[1]), float(line[2])])\r\n elif (line[0] == \"NUM_WAYPOINTS:\"):\r\n waypoints = [None for j in range(int(line[1]))]\r\n elif (line[0] == \"WAYPOINTS\"):\r\n fillMode = \"waypoints\"\r\n elif (fillMode == \"worldMap\"):\r\n # Initialize worldMap to the correct dimensions\r\n if (worldMap == [[]]):\r\n mapWidth = len(line[0])\r\n scaleFactor = worldWidth // mapWidth\r\n mapHeight = worldHeight // scaleFactor\r\n worldMap = [None for j in range(mapHeight)]\r\n\r\n # Convert the line into an array of characters to be the map row\r\n worldMap[mapRow] = list(line[0])\r\n mapRow += 1\r\n\r\n # Stop filling worldMap when it's full\r\n if (mapRow >= len(worldMap)):\r\n fillMode = \"none\"\r\n else: # Fill waypoints\r\n line.pop(0) # Get rid of first element of line (waypoint number)\r\n waypoints[i] = [float(strNum) for strNum in line]\r\n i += 1\r\n \r\n # Stop filling waypoints when it's full\r\n if (i >= len(waypoints)):\r\n fillMode = \"none\"\r\n\r\n # Read from second command line argument (tree node file)\r\n with open(sys.argv[2], \"r\") as file:\r\n for line in file:\r\n # Strip newline from line and split by whitespace\r\n line = line.strip().split()\r\n\r\n # Read in data\r\n if (line != []):\r\n if (line[0] == \"TREE_NODES\"):\r\n fillMode = \"treeNodes\"\r\n elif (line[0] == \"TRAJECTORY\"):\r\n fillMode = \"trajPoints\"\r\n elif (fillMode == \"treeNodes\"):\r\n treeNodes.append([float(strNum) for strNum in line])\r\n elif (fillMode == \"trajPoints\"):\r\n trajPoints.append([float(strNum) for strNum in line])\r\n else:\r\n # If too few command-line arguments are given, inform the user and exit the program with error status\r\n print(\"Missing command-line arguments. The first should be the problem file, and the second should be the tree node file.\")\r\n sys.exit(1)\r\n\r\n\r\ndef main():\r\n readStandardIn()\r\n win = DrawingWin()\r\n Gtk.main()\r\n \r\n \r\nif __name__ == \"__main__\": \r\n main()","repo_name":"bfm1009/reap-ptsp","sub_path":"dirtVisualizer/dirtVisualizer.py","file_name":"dirtVisualizer.py","file_ext":"py","file_size_in_byte":10936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"9043042881","text":"import cv2\nimport numpy as np\nimport os\n\nchar = 'A'\nfor i in range(0,26):\n\tif char == 'I':\n\t\tchar = chr(ord(char) + 1)\n\t\ti = i+1\n\t\tcontinue\n\tvidcap = cv2.VideoCapture('video2\\\\' + char + '.mp4')\n\tsuccess,image = vidcap.read()\n\tcount = 0\n\tsuccess = True\n\twhile success:\n\t\tcv2.imwrite('signs_data\\\\' + char + \"\\\\\" + char + \"frontframe%d.jpg\" % count, image) # save frame as JPEG file\n\t\tsuccess,image = vidcap.read()\n\t\tprint(char + ' Read a new frame' + str(count) + ': ', success)\n\t\tcount += 1\n\tcount = 0\n\tchar = chr(ord(char) + 1)\n","repo_name":"suhanacharya/Indian-Sign-Language-Classifier","sub_path":"scripts/split_frames.py","file_name":"split_frames.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"27352390647","text":"# Наследуется ли один класс от другого - yes/no\n# 4\n# A\n# B : A\n# C : A\n# D : B C\n# 4\n# A B\n# B D\n# C D\n# D A\n\ndef inherit_check(child, parent):\n if child == parent:\n return 'Yes'\n elif inherit[child] == None:\n return 'No'\n elif parent in inherit[child]:\n return 'Yes'\n else:\n for classes in inherit[child]:\n smth = inherit_check(classes, parent)\n if smth == 'Yes':\n return smth\n return smth\n\n\ninherit = {}\nn = int(input())\nfor i in range(n):\n rule = input().split(' ')\n if len(rule) == 1:\n inherit[rule[0]] = None\n else:\n rule1, shit, *rule2 = rule\n inherit[rule1] = rule2\nprint(inherit)\n\nquery = int(input())\nfor i in range(query):\n parent, child = input().split(' ')\n answer = inherit_check(child, parent)\n print(answer)\n","repo_name":"MarinaSlashcheva/python_course1","sub_path":"homeworks_2_term/homework2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"35439273168","text":"#coding:utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.figure(1) # 创建图表1\nplt.figure(2) # 创建图表2\nax1 = plt.subplot(211) # 在图表2中创建子图1\nax2 = plt.subplot(212) # 在图表2中创建子图2\nx = np.linspace(0, 3, 100)\nfor i in xrange(5):\n\tplt.figure(1) #? # 选择图表1\n\tplt.plot(x, np.exp(i*x/3))\n\tplt.sca(ax1) #? # 选择图表2的子图1\n\tplt.plot(x, np.sin(i*x))\n\tplt.sca(ax2) # 选择图表2的子图2\n\tplt.plot(x, np.cos(i*x))\nplt.show()\nexit()\n\nN = 5\nmenMeans = (20, 35, 30, 35, 27)\nmenStd = (2, 3, 4, 1, 2)\n\nind = np.arange(N) # the x locations for the groups\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)\n\nwomenMeans = (25, 32, 34, 20, 25)\nwomenStd = (3, 5, 2, 3, 3)\nrects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)\n\n# add some\nax.set_ylabel('Scores')\nax.set_title('Scores by group and gender')\nax.set_xticks(ind+width)\nax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )\n\nax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )\n\ndef autolabel(rects):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\n\nplt.savefig('plot_test.png')\n#plt.savefig('plot_test.eps', dpi = 600)\n","repo_name":"xifengbishu/KERAS2","sub_path":"WGS/plot2.py","file_name":"plot2.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"18267319195","text":"import unittest\nimport os\nimport csv\nfrom data_extraction.parser import Parser\n\n\nclass ParserTest(unittest.TestCase):\n \n def test_write_to_file(self):\n data = [(\"Abstract Title Test\", \"Abstract Description Test\")]\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n parser.write_to_file(data, file_name='test_data.csv', headers=[\"Abstract Title\", \"Abstract Description\"])\n\n # Note this test may result in a failure if you do not have permissions for the file\n self.assertTrue(os.path.lexists('test_data.csv'))\n with open('test_data.csv', 'rb') as csv_file:\n f_reader = csv.reader(csv_file)\n temp_data = []\n for row in f_reader:\n temp_data.append(row)\n \n self.assertEqual(\n [[\"Abstract Title\", \"Abstract Description\"], \n [\"Abstract Title Test\", \"Abstract Description Test\"]],\n temp_data)\n\n # Clean up the file from testing\n os.remove('test_data.csv')\n\n def test_remove_sunday_session(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n data = parser._remove_sunday_session(True)\n\n # Just snag the first abstract entry as we only need to ensure the Sunday sessions are gone\n data = data[:200]\n\n # Ensure Inaugural is present as that indicates the first session is the Inaugural session on Monday.\n self.assertTrue(-1 != 'Inaugural')\n\n\n def test_parse_authors(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n authors = parser.parse_authors()\n\n first_author = \"Mark Stinner Statistics Canada\"\n last_author = \"Sotirios Damouras University of Toronto Scarborough, Sohee Kang University of Toronto Scarborough\"\n\n self.assertEqual(authors[0], first_author)\n self.assertEqual(authors[-1], last_author)\n \n\n def test_parse_times(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n times = parser.parse_times()\n\n first_time = \"Sunday 09:00-16:00\"\n last_time = \"Wednesday 16:00-16:15\"\n\n self.assertEqual(times[0], first_time)\n self.assertEqual(times[-1], last_time)\n\n def test_parse_abstract_titles(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n titles = parser.parse_abstract_titles()\n\n first_title = \"Disclosure Control Methods\"\n last_title = \"The Status of Statistics Curricula in Canada\"\n \n self.assertEqual(titles[0], first_title)\n self.assertEqual(titles[-1], last_title)\n\n def test_parse_abstract_descriptions(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n descriptions = parser.parse_abstract_descriptions()\n\n first_description = \"\"\n last_description = \"This talk will review the current state of undergraduate Statistics curricula at universities across Canada. We will present both quantitative and qualitative information on the structure and composition of Major programs in Statistics. More specifically, we will look at the number and type of course requirements for each program, the learning outcomes they serve, the topics and skills they develop, as well as other relevant information. The talk intends to give an overview of how we collectively educate Statisticians, with the ultimate goal of helping identify directions for future curricular development. \"\n\n self.assertEqual(descriptions[0], first_description)\n self.assertEqual(descriptions[-1], last_description)\n\n def test_parse_invited_talks(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n invited_talks = parser.parse_invited_or_contributed_talks(\"Invited\")\n\n first_invited_talk = (u'The Challenge of Creating Data Collection Methods that are Neither Too Far Ahead nor Behind our Survey Respondents', 'Invited')\n last_invited_talk = (u'Towards More Reliable Neuroimaging-Based Biomarkers in Mental Illness: The Case of Schizophrenia Discrimination using fMRI Data', 'Invited')\n\n self.assertEqual(invited_talks[0], first_invited_talk)\n self.assertEqual(invited_talks[-1], last_invited_talk)\n\n def test_parse_contributed_talks(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n contributed_talks = parser.parse_invited_or_contributed_talks(\"Contributed\")\n\n first_contributed_talk = (u'Causal Inference with Measurement Error in Outcomes: Bias Analysis and Estimation Methods', 'Contributed')\n last_contributed_talk = (u'The Status of Statistics Curricula in Canada', 'Contributed')\n\n self.assertEqual(contributed_talks[0], first_contributed_talk)\n self.assertEqual(contributed_talks[-1], last_contributed_talk)\n\n def test_parse_poster_talks(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n poster_talks = parser.parse_invited_or_contributed_talks(\"Poster\")\n \n first_poster_talk = (u'University of Toronto', 'Poster')\n last_poster_talk = (u'Modeling and Treatment of Surveillance Flu Data', 'Poster')\n\n self.assertEqual(poster_talks[0], first_poster_talk)\n self.assertEqual(poster_talks[-1], last_poster_talk)\n\n def test_parse_all(self):\n parser = Parser(\"data/conference_data/SSC/2017/abstracts.tex\", \"data/conference_data/SSC/2017/prog.tex\")\n presentation_data = parser.parse_all()\n\n first_presentation = (u'Welcome', u'', u'Jack Gambino Statistics Canada', u'Monday 08:25-08:30')\n last_presentation = (u'The Status of Statistics Curricula in Canada', u'This talk will review the current state of undergraduate Statistics curricula at universities across Canada. We will present both quantitative and qualitative information on the structure and composition of Major programs in Statistics. More specifically, we will look at the number and type of course requirements for each program, the learning outcomes they serve, the topics and skills they develop, as well as other relevant information. The talk intends to give an overview of how we collectively educate Statisticians, with the ultimate goal of helping identify directions for future curricular development. ', u'Sotirios Damouras University of Toronto Scarborough, Sohee Kang University of Toronto Scarborough', u'Wednesday 16:00-16:15')\n\n self.assertEqual(presentation_data[0], first_presentation)\n self.assertEqual(presentation_data[-1], last_presentation)","repo_name":"norberte/Statistical-Consulting","sub_path":"tests/data_extraction/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"70305795840","text":"from django.urls import path\nfrom . import views\n\n# 템플릿엥서 app name을 바로바로 url 타고 가기 쉽게 하기 위해서\n# app 이 여러개 일 떄\napp_name = \"bookmark\"\n\nurlpatterns = [\n path('', views.list, name=\"list\"),\n path('/', views.cate_detail, name=\"cate_detail\"),\n path('cate_new/', views.cate_new, name='cate_new'),\n path('/cafe_edit/', views.cate_edit, name='cate_edit'),\n path('/cate_delete/', views.cate_delete, name='cate_delete'),\n path('mark_new/', views.mark_new, name='mark_new'),\n path('/mark_edit/', views.mark_edit, name='mark_edit'),\n path('/mark_delete/', views.mark_delete, name='mark_delete'),\n]\n","repo_name":"hongsy0113/crud_practice","sub_path":"bookmark/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"5215178128","text":"\"\"\"\nStep 01 - Indexes\n=================\n\"\"\"\n\n\n#######################################################################\n#\n# Loading data\n# ------------\n#\n# .. image:: ../../../_static/imgs/susceptibility-test-record.png\n# :width: 200\n# :align: right\n# :alt: ASAI\n#\n# A ``Susceptibility test`` record (see figure 4.1) is composed by laboratory\n# identification number (LID), patient identification number (PID), date, sample\n# type, specimen or culture (e.g. blood or urine), pathogen, antimicrobial, reported\n# status and outcome (resistant, sensitive or intermediate). In this research,\n# the susceptibility test data were grouped firstly by specimen type. Moreover,\n# for each sample type, the data were grouped by pairs (pathogen, antimicrobial)\n# since it is widely accepted by clinicians as detailed in the UK five year\n# strategy in AMR.\n#\n# A small dataset will be used for this example.\n#\n\n# Libraries\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n# Import from pyAMR\nfrom pyamr.datasets.load import make_susceptibility\n\n# -------------------------------------------\n# Load data\n# -------------------------------------------\n# Load data\ndata = make_susceptibility()\ndata = data.drop_duplicates()\n\n# Show\nprint(\"\\nData:\")\nprint(data)\nprint(\"\\nColumns:\")\nprint(data.dtypes)\n\n# Show unique elements\nprint(\"\\nUnique values:\")\nfor c in ['microorganism_code',\n 'antimicrobial_code',\n 'specimen_code',\n 'laboratory_number']:\n print('%-18s -> %5s' % (c, data[c].nunique()))\n\n\n#######################################################################\n#\n# Computing SARI\n# --------------\n#\n# The Single Antimicrobial Resistance Index - ``SARI`` - describes the proportion\n# of resistant isolates for a given set of susceptibility tests. It provides a\n# value within the range [0, 1] where values close to one indicate high resistance.\n# It is agnostic to pathogen, antibiotic and/or time. The variables ``R``, ``I`` and\n# ``S`` represent the number of susceptibility tests with Resistant, Intermediate and\n# Susceptible outcomes respectively. The definition might vary slightly since the\n# intermediate category is not always considered.\n#\n# The parameter strategy accepts the following options:\n#\n# - ``soft`` as R / R+I+S\n# - ``medium`` as R / R+S\n# - ``hard`` as R+I / R+I+S\n# - ``other`` as R+0.5I / R+0.5I+S\n#\n# For more information see: :py:mod:`pyamr.core.sari.SARI`\n#\n# For more examples see:\n#\n# - :ref:`sphx_glr__examples_tutorial_indexes_plot_core_sari.py`\n# - :ref:`sphx_glr__examples_indexes_plot_sari_antibiogram.py`\n# - :ref:`sphx_glr__examples_indexes_plot_sari_clustermap.py`\n# - :ref:`sphx_glr__examples_indexes_plot_sari_relmap.py`\n#\n\n# -------------------------------------------\n# Compute SARI\n# -------------------------------------------\n# Libraries\nfrom pyamr.core.sari import SARI\n\n# Create sari instance\nsari = SARI(groupby=['specimen_code',\n 'microorganism_name',\n 'antimicrobial_name',\n 'sensitivity'])\n\n# Compute SARI overall\nsari_overall = sari.compute(data,\n return_frequencies=True)\n\n# Show\nprint(\"SARI (overall):\")\nprint(sari_overall)\n\n# Plot Heatmap\n# ------------\n# Filter\nmatrix = sari_overall.copy(deep=True)\nmatrix = matrix.reset_index()\nmatrix = matrix[matrix.freq > 100]\nmatrix = matrix[matrix.specimen_code.isin(['BLDCUL'])]\n\n# Pivot table\nmatrix = pd.pivot_table(matrix,\n index='microorganism_name',\n columns='antimicrobial_name',\n values='sari')\n\n# Create figure\nf, ax = plt.subplots(1, 1, figsize=(10, 4))\n\n# Create colormap\ncmap = sns.color_palette(\"Reds\", desat=0.5, n_colors=10)\n\n# Plot\nax = sns.heatmap(data=matrix*100, annot=True, fmt=\".0f\",\n annot_kws={'fontsize': 'small'}, cmap=cmap,\n linewidth=0.5, vmin=0, vmax=100, ax=ax,\n xticklabels=1, yticklabels=1)\n\n# Add title\nplt.suptitle(\"Antibiogram\", fontsize='xx-large')\n\n# Tight layout\nplt.tight_layout()\nplt.subplots_adjust(right=1.05)\n\n#######################################################################\n#\n# Computing ASAI\n# --------------\n#\n# The antimicrobial spectrum of activity refers to the range of microbe species\n# that are susceptible to these agents and therefore can be treated. In general,\n# antimicrobial agents are classified into broad, intermediate or narrow spectrum.\n# Broad spectrum antimicrobials are active against both Gram-positive and\n# Gram-negative bacteria. In contrast, narrow spectrum antimicrobials have limited\n# activity and are effective only against particular species of bacteria. While these\n# profiles appeared in the mid-1950s, little effort has been made to define them.\n# Furthermore, such ambiguous labels are overused for different and even contradictory\n# purposes.\n#\n# In order to compute the antimicrobial spectrum of activity index - ``ASAI`` -, it\n# is necessary to previously obtain the overall resistance (SARI) for all the\n# microbe-antimicrobial pairs. Furthermore, by following the criteria used in the\n# narrow-broad approach, these pairs were grouped into Gram-positive and Gram-negative.\n# Briefly, the weighted proportion of species to which the antimicrobial\n# is effective is computed for each genus. These are later added up and normalized by\n# the number of genera tested. An antimicrobial is considered effective to treat a\n# particular species when the corresponding resistance index (SARI) is lower than\n# a given threshold.\n#\n# For more information see: :py:mod:`pyamr.core.asai.ASAI`\n#\n# For more examples see:\n#\n# - :ref:`sphx_glr__examples_tutorial_indexes_plot_core_asai.py`\n# - :ref:`sphx_glr__examples_indexes_plot_spectrum_gramtype.py`\n# - :ref:`sphx_glr__examples_indexes_plot_spectrum_multiple.py`\n#\n#\n# In order to compute ``ASAI``, we need to have the following columns present\n# in our dataset: ``antimicrobial``, ``microorganism_genus``, ``microorganism_species``\n# and ``resistance``. Moreover, in this example we will compute the ASAI for each\n# ``gram_stain`` category independently so we will need the microorganism gram stain\n# information too. This information is available in the registries: :py:mod:`pyamr.datasets.registries`\n#\n# Lets include all this information using the ``MicroorganismRegistry``.\n#\n\n# ------------------------------\n# Include gram stain\n# ------------------------------\n# Libraries\nfrom pyamr.datasets.registries import MicroorganismRegistry\n\n# Load registry\nmreg = MicroorganismRegistry()\n\n# Format sari dataframe\ndataframe = sari_overall.copy(deep=True)\ndataframe = dataframe.reset_index()\n\n# Create genus and species\ndataframe[['genus', 'species']] = \\\n dataframe.microorganism_name \\\n .str.capitalize() \\\n .str.split(expand=True, n=1)\n\n# Combine with registry information\ndataframe = mreg.combine(dataframe, on='microorganism_name')\n\n# Fill missing gram stain\ndataframe.gram_stain = dataframe.gram_stain.fillna('u')\n\n##############################################################################\n#\n# Now that we have the ``genus``, ``species`` and ``gram_stain`` information,\n# lets compute ``ASAI``.\n#\n\n# -------------------------------------------\n# Compute ASAI\n# -------------------------------------------\n# Import specific libraries\nfrom pyamr.core.asai import ASAI\n\n# Create asai instance\nasai = ASAI(column_genus='genus',\n column_specie='species',\n column_resistance='sari',\n column_frequency='freq')\n\n# Compute\nscores = asai.compute(dataframe,\n groupby=['specimen_code',\n 'antimicrobial_name',\n 'gram_stain'],\n weights='uniform',\n threshold=0.5,\n min_freq=0)\n\n# Stack\nscores = scores.unstack()\n\n# Filter and drop index.\nscores = scores.filter(like='URICUL', axis=0)\nscores.index = scores.index.droplevel()\n\n# Show\nprint(\"\\nASAI (overall):\")\nprint(scores)\n\n#######################################################################\n#\n# Lets plot it now!\n\n# ----------------\n# Helper method\n# ----------------\ndef scalar_colormap(values, cmap, vmin, vmax):\n \"\"\"This method creates a colormap based on values.\n\n Parameters\n ----------\n values : array-like\n The values to create the corresponding colors\n\n cmap : str\n The colormap\n\n vmin, vmax : float\n The minimum and maximum possible values\n\n Returns\n -------\n scalar colormap\n \"\"\"\n # Create scalar mappable\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)\n mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)\n # Gete color map\n colormap = sns.color_palette([mapper.to_rgba(i) for i in values])\n # Return\n return colormap\n\n# ---------------------------------------------------------------\n# Plot\n# ---------------------------------------------------------------\n# .. note: In order to sort the scores we need to compute metrics\n# that combine the different subcategories (e.g. gram-negative\n# and gram-positive). Two possible options are: (i) use the\n# gmean or (ii) the width.\n# Measures\nscores['width'] = np.abs(scores['ASAI_SCORE'].sum(axis=1))\n\n# Variables to plot.\nx = scores.index.values\ny_n = scores['ASAI_SCORE']['n'].values\ny_p = scores['ASAI_SCORE']['p'].values\ny_u = scores['ASAI_SCORE']['u'].values\n\n# Constants\ncolormap_p = scalar_colormap(y_p, cmap='Blues', vmin=-0.1, vmax=1.1)\ncolormap_n = scalar_colormap(y_n, cmap='Reds', vmin=-0.1, vmax=1.1)\ncolormap_u = scalar_colormap(y_u, cmap='Greens', vmin=-0.1, vmax=1.1)\n\n# ----------\n# Example\n# ----------\n# This example shows an stacked figure using more than two categories.\n# For instance, it uses gram-positive, gram-negative and gram-unknown.\n# All the indexes go within the range [0,1].\n# Create the figure\nf, axes = plt.subplots(1, 3, figsize=(7, 9))\n\n# Plot each category\nsns.barplot(x=y_p, y=x, palette=colormap_p, ax=axes[0], orient='h',\n saturation=0.5, label='Gram-positive')\nsns.barplot(x=y_n, y=x, palette=colormap_n, ax=axes[1], orient='h',\n saturation=0.5, label='Gram-negative')\nsns.barplot(x=y_u, y=x, palette=colormap_u, ax=axes[2], orient='h',\n saturation=0.5, label='Gram-unknown')\n\n# Configure\nsns.despine(bottom=True)\n\n# Format figure\nplt.subplots_adjust(wspace=0.0, hspace=0.0)\n\n# Remove yticks\naxes[1].set_yticks([])\naxes[2].set_yticks([])\n\n# Set title\naxes[0].set_title('Gram-positive')\naxes[1].set_title('Gram-negative')\naxes[2].set_title('Gram-unknown')\n\n# Set x-axis\naxes[0].set_xlim([0, 1.1])\naxes[1].set_xlim([0, 1.1])\naxes[2].set_xlim([0, 1.1])\n\n# Adjust\nplt.tight_layout()\n\n\n\n#######################################################################\n#\n# Computing SART\n# --------------\n#\n# The single antimicrobial resistance trend - ``SART`` - measures the ratio\n# of change per time unit (e.g. monthly or yearly). To compute this metric,\n# it is necessary to generate a resistance time series from the susceptibility\n# test data. This is often achieved by computing the SARI on consecutive or\n# overlapping partitions of the data. Then, the trend can be extracted using\n# for example a linear model where the slope, which is represented by a value\n# within the range [-1, 1], indicates the ratio of change.\n#\n# For more information see: :py:mod:`pyamr.core.sart.SART`\n#\n# For more examples see:\n#\n# - :ref:`sphx_glr__examples_tutorial_indexes_plot_core_sart.py`\n# - :ref:`sphx_glr__examples_indexes_plot_trend_basic.py`\n#\n# .. note:: Be cautious when Computing the ``SART`` index using a small dataset\n# (e.g. a low number of susceptibility tests records) since it is very\n# likely that the statistics produced (e.g. kurtosis or skewness) will\n# be ill defined.\n#\n# Since it is necessary to have a decent amount of records to be\n# able to compute the trends accurately, lets filter and choose\n# the tuples were are interested in.\n\n\n# -------------------------------------------\n# Show top combinations\n# -------------------------------------------\nfrom pyamr.core.sari import SARI\n\n# Create SARI instance\nsar = SARI(groupby=['specimen_code',\n 'microorganism_code',\n 'antimicrobial_code',\n 'sensitivity'])\n\n# Compute SARI overall\nsari_overall = sar.compute(data,\n return_frequencies=True)\n\n# Compute top tuples\ntop = sari_overall \\\n .sort_values(by='freq', ascending=False) \\\n .head(10)\n\n# Show\nprint(\"\\nTop by Frequency:\")\nprint(top)\n\n# -------------------------------------------\n# Filter data\n# -------------------------------------------\n# Define spec, orgs, abxs of interest\nspec = ['URICUL']\norgs = ['ECOL']\nabxs = ['ACELX', 'ACIP', 'AAMPC', 'ATRI', 'AAUG',\n 'AMER', 'ANIT', 'AAMI', 'ACTX', 'ATAZ',\n 'AGEN', 'AERT', 'ACAZ', 'AMEC', 'ACXT']\n\n# Create auxiliary DataFrame\naux = data.copy(deep=True) \\\n\n# Filter\nidxs_spec = data.specimen_code.isin(spec)\nidxs_orgs = data.microorganism_code.isin(orgs)\nidxs_abxs = data.antimicrobial_code.isin(abxs)\n\n# Filter\naux = aux[idxs_spec & idxs_orgs & idxs_abxs]\n\n\n########################################################\n#\n# Now, lets compute the resistance trend.\n\n# Libraries\nimport warnings\n\n# Import specific libraries\nfrom pyamr.core.sart import SART\n\n# Variables\nshift, period = '10D', '180D'\n\n# Create instance\nsar = SART(column_specimen='specimen_code',\n column_microorganism='microorganism_code',\n column_antimicrobial='antimicrobial_code',\n column_date='date_received',\n column_outcome='sensitivity',\n column_resistance='sari')\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n # Compute resistance trends\n table, objs = sar.compute(aux, shift=shift,\n period=period, return_objects=True)\n\n######################################################\n#\n# Lets see the summary DataFrame (note it is transposed!)\n\n# Configure pandas\npd.set_option(\n 'display.max_colwidth', 20,\n 'display.width', 1000\n)\n\n# Show\nprint(\"Results:\")\nprint(table.T)\n\n######################################################\n#\n# Lets see the model summary for the first entry\n\n# Display\n# This example shows how to make predictions using the wrapper and how\n# to plot the result in data. In addition, it compares the intervals\n# provided by get_prediction (confidence intervals) and the intervals\n# provided by wls_prediction_std (prediction intervals).\n\n# Variables\nname, obj = objs[2] # AAUG\n\n# Series\nseries = obj.as_series()\n\n# Variables.\nstart, end = None, 50\n\n# Get x and y\nx = series['wls-exog'][:,1]\ny = series['wls-endog']\n\n# Compute predictions (exogenous?). It returns a 2D array\n# where the rows contain the time (t), the mean, the lower\n# and upper confidence (or prediction?) interval.\npreds = obj.get_prediction(start=start, end=end)\n\n# Create figure\nfig, ax = plt.subplots(1, 1, figsize=(11, 5))\n\n# Plotting confidence intervals\n# -----------------------------\n# Plot truth values.\nax.plot(x, y, color='#A6CEE3', alpha=0.5, marker='o',\n markeredgecolor='k', markeredgewidth=0.5,\n markersize=5, linewidth=0.75, label='Observed')\n\n# Plot forecasted values.\nax.plot(preds[0, :], preds[1, :], color='#FF0000', alpha=1.00,\n linewidth=2.0, label=obj._identifier(short=True))\n\n# Plot the confidence intervals.\nax.fill_between(preds[0, :], preds[2, :],\n preds[3, :],\n color='r',\n alpha=0.1)\n\n# Legend\nplt.legend()\nplt.title(name)\n\n\nprint(\"Name: %s\\n\" % str(name))\nprint(obj.as_summary())\n\n#######################################################\n#\n# Lets display the information as a table graph\n\n# Libraries\nfrom pyamr.graphics.table_graph import _DEFAULT_CONFIGURATION\nfrom pyamr.graphics.table_graph import vlinebgplot\n\n# Configuration\ninfo = _DEFAULT_CONFIGURATION\ninfo['freq'] = {\n 'cmap': 'Blues',\n 'title': 'Freq',\n 'xticks': [0, 8000],\n 'kwargs': {\n 's': 80,\n 'vmin': 0\n }\n}\n\n\nrename = {\n 'wls-x1_coef': 'sart_m',\n 'wls-const_coef': 'offset',\n 'wls-rsquared': 'r2',\n 'wls-rsquared_adj': 'r2_adj',\n 'wls-m_skew': 'skew',\n 'wls-m_kurtosis': 'kurtosis',\n 'wls-m_jb_prob': 'jb',\n 'wls-m_dw': 'dw',\n 'wls-const_tprob': 'ptm',\n 'wls-x1_tprob': 'ptn',\n 'wls-pearson': 'pearson',\n 'freq': 'freq',\n}\n\n\n# Combine with SARI\n\n# Format combined DataFrame\ncomb = table.join(sari_overall)\ncomb.index = comb.index.map('_'.join)\ncomb = comb.reset_index()\ncomb = comb.rename(columns=rename)\n\n# Add new columns\ncomb['sart_y'] = comb.sart_m * 12 # Yearly trend\ncomb['sari_pct'] = comb.sari * 100 # SARI percent\n\n# Sort by trend\ncomb = comb.sort_values(by='sart_y', ascending=False)\n\n# Select only numeric columns\n# data = comb.select_dtypes(include=np.number)\ndata = comb[[\n 'index',\n 'sart_m',\n #'sart_y',\n 'sari_pct',\n 'r2',\n #'r2_adj',\n 'skew',\n 'kurtosis',\n 'jb',\n 'dw',\n 'ptm',\n #'ptn',\n 'pearson',\n 'freq'\n]]\n\n# Show DataFrame\nprint(\"\\nResults:\")\nprint(data)\n\n# Create pair grid\ng = sns.PairGrid(data, x_vars=data.columns[1:],\n y_vars=[\"index\"], height=4, aspect=.45)\n\n# Set common features\ng.set(xlabel='', ylabel='')\n\n# Plot strips and format axes (skipping index)\nfor ax, c in zip(g.axes.flat, data.columns[1:]):\n\n # Get information\n d = info[c] if c in info else {}\n\n # .. note: We need to use scatter plot if we want to\n # assign colors to the markers according to\n # their value.\n\n # Using scatter plot\n sns.scatterplot(data=data, x=c, y='index', s=100,\n ax=ax, linewidth=0.75, edgecolor='gray',\n c=data[c], cmap=d.get('cmap', None),\n norm=d.get('norm', None))\n\n # Plot vertical lines\n for e in d.get('vline', []):\n vlinebgplot(ax, top=data.shape[0], **e)\n\n # Configure axes\n ax.set(title=d.get('title', c),\n xlim=d.get('xlim', None),\n xticks=d.get('xticks', []),\n xlabel='', ylabel='')\n ax.tick_params(axis='y', which='both', length=0)\n ax.xaxis.grid(False)\n ax.yaxis.grid(visible=True, which='major',\n color='gray', linestyle='-', linewidth=0.35)\n\n# Despine\nsns.despine(left=True, bottom=True)\n\n# Adjust layout\nplt.tight_layout()\nplt.show()\n","repo_name":"bahp/pyAMR","sub_path":"examples/tutorial/guide/plot_step_01.py","file_name":"plot_step_01.py","file_ext":"py","file_size_in_byte":18230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"12853842078","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nimport collections\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nprint (\"Packages loaded.\")\n# Configuration\nbatch_size = 20\nembedding_size = 2 # This is just for visualization\nnum_sampled = 15 # Number of negative examples to sample.\n# Sample sentences\nsentences = [\"the quick brown fox jumped over the lazy dog\",\n \"I love cats and dogs\",\n \"we all love cats and dogs\",\n \"cats and dogs are great\",\n \"sung likes cats\",\n \"she loves dogs\",\n \"cats can be very independent\",\n \"cats are great companions when they want to be\",\n \"cats are playful\",\n \"cats are natural hunters\",\n \"It's raining cats and dogs\",\n \"dogs and cats love sung\"]\n# 'sentences' is 'list'\nprint (\"'sentences' is %s and length is %d.\" % (type(sentences), len(sentences)))\nwords = \" \".join(sentences).split()\nprint (\"'words' is %s and length is %d.\" % (type(words), len(words)))\nprint (words)\n\ncount = collections.Counter(words).most_common()\nprint (\"'count' is %s and length is %d.\" % (type(count), len(count)))\nprint ((\"Word count of top five is %s\") % (count[:5]))\nprint (count)\nprint (words[0:5])\nprint (count[0:3])\nrdic = [i[0] for i in count] # reverse dic, idx -> word\ndic = {w: i for i, w in enumerate(rdic)} # dic, word -> id\nvoc_size = len(dic) # Number of vocabulary\nprint (\"'rdic' is %s and length is %d.\" % (type(rdic), len(rdic)))\nprint (\"'dic' is %s and length is %d.\" % (type(dic), len(dic)))\nprint (rdic)\nprint (dic)\nprint (rdic[0])\nprint (dic['cats'])\ndata = [dic[word] for word in words]\nprint (\"'data' is %s and length is %d.\" % (type(data), len(data)))\nprint('Sample data: numbers: %s / words: %s' % (data[:10], [rdic[t] for t in data[:10]]))\n# See what's in the data\nprint (data)\n# ([the, brown], quick), ([quick, fox], brown), ([brown, jumped], fox),\ncbow_pairs = [];\nfor i in range(1, len(data) - 1):\n cbow_pairs.append([[data[i - 1], data[i + 1]], data[i]])\nprint('Context pairs: %s' % (cbow_pairs[:10]))\nprint (\"'cbow_pairs' is %s and length is %d.\" % (type(cbow_pairs), len(cbow_pairs)))\n\n# (quick, the), (quick, brown), (brown, quick), (brown, fox), ...\nskip_gram_pairs = [];\nfor c in cbow_pairs:\n skip_gram_pairs.append([c[1], c[0][0]])\n skip_gram_pairs.append([c[1], c[0][1]])\n\nprint (\"'skip_gram_pairs' is %s and length is %d.\"\n % (type(skip_gram_pairs), len(skip_gram_pairs)))\nprint('skip-gram pairs', skip_gram_pairs[:5])\n\n\ndef generate_batch(size):\n assert size < len(skip_gram_pairs)\n x_data = []\n y_data = []\n r = np.random.choice(range(len(skip_gram_pairs)), size, replace=False)\n for i in r:\n x_data.append(skip_gram_pairs[i][0]) # n dim\n y_data.append([skip_gram_pairs[i][1]]) # n, 1 dim\n return x_data, y_data\n\n\n# generate_batch test\nprint ('Batches (x, y)', generate_batch(3))\n\n# Construct network\n# Input data\ntrain_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n# need to shape [batch_size, 1] for nn.nce_loss\ntrain_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n# missing GPU implementation?\nwith tf.device('/cpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([voc_size, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs) # lookup table\n\n# Construct the variables for the NCE loss\nnce_weights = tf.Variable(\n tf.random_uniform([voc_size, embedding_size], -1.0, 1.0))\nnce_biases = tf.Variable(tf.zeros([voc_size]))\n\n# Compute the average NCE loss for the batch.\nloss = tf.reduce_mean(\n tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,\n num_sampled, voc_size))\n\n# Use the adam optimizer\ntrain_op = tf.train.AdamOptimizer(0.01).minimize(loss)\nprint (\"Network ready\")\n\n# Launch the graph in a session\nwith tf.Session() as sess:\n # Initializing all variables\n tf.initialize_all_variables().run()\n\n for step in range(3000):\n batch_inputs, batch_labels = generate_batch(batch_size)\n _, loss_val = sess.run([train_op, loss],\n feed_dict={train_inputs: batch_inputs, train_labels: batch_labels})\n if step % 500 == 0:\n print(\"Loss at %d: %.5f\" % (step, loss_val))\n # Report the loss\n\n # Final embeddings are ready for you to use.\n # Need to normalize for practical use\n trained_embeddings = embeddings.eval()\ntrained_embeddings.shape\n# Show word2vec if dim is 2\nif trained_embeddings.shape[1] == 2:\n labels = rdic[:20] # Show top 20 words\n for i, label in enumerate(labels):\n x, y = trained_embeddings[i, :]\n plt.scatter(x, y)\n plt.annotate(label, xy=(x, y), xytext=(5, 2),\n textcoords='offset points', ha='right', va='bottom')\n plt.show()\n","repo_name":"mxmpersonal/tensorflow-101_prac","sub_path":"tensorflow-101/word2vec_simple.py","file_name":"word2vec_simple.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71578750073","text":"import re\n\nfrom asteval import Interpreter\n\nfrom tautbot.client.slack import slack_client\nfrom tautbot.plugin import PluginBase\nfrom tautbot.util.events import Observer\n\n\nclass Calc(PluginBase, Observer):\n def __init__(self, command='calc'):\n super(self.__class__, self).__init__(command=command)\n Observer.__init__(self)\n self.aeval = Interpreter()\n\n def events(self, *args, **kwargs):\n self.observe('channel_command', self.route_event)\n\n def route_event(self, command, channel, text, output):\n if re.match('^calc', command):\n text = text.replace(command, '').strip()\n self.calc(channel, text)\n\n def calc(self, channel, text):\n response = self.aeval(text)\n slack_client.api_call(\"chat.postMessage\", channel=channel,\n text=response, as_user=True)\n","repo_name":"z/tautbot","sub_path":"plugins/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25059692795","text":"#!/home/ec2-user/venv/python3/bin/python\n\n__author__ = ['[Gerald Sim](https://github.com/meappy)']\n__date__ = '2019.10.04'\n__version__ = \"1.0.0\"\n\n\"\"\"\nDeploy AWS CloudFormation\n\"\"\"\n\nimport sys\nimport json\nimport fire\nimport subprocess\n\ncform_file = 'launch-web.yaml'\nparam_file = 'params/launch-web.json'\n\n# https://bit.ly/2pjBDUd\n# https://bit.ly/2otFmOT\ndef read_param(param_index, param_name='ParameterValue'):\n with open(param_file) as param:\n data = json.load(param)\n return (data[param_index][param_name])\n\ndef run(x, silent=True):\n if __name__ == '__main__':\n return fire.Fire(x)\n\nstack_name = (run(read_param))\n\n# https://bit.ly/2AM43sp\n# https://bit.ly/2AFiXRn\ncmd='aws --profile default cloudformation create-stack \\\n --stack-name %s \\\n --parameters file://params/launch-web.json \\\n --template-body file://launch-web.yaml' % (stack_name).rstrip()\n\npush=subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE)\npush.wait()\nprint(push.returncode)\n","repo_name":"meappy/aws-cloudformation-web-autoscale","sub_path":"launch-web.py","file_name":"launch-web.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19196582325","text":"\n# import sys\nimport os\nimport traceback\nimport time\nimport json\nfrom yaml import load, dump\nimport base64\n# import tempfile\n\nfrom kubernetes import client, config\nfrom kafka import KafkaProducer, KafkaConsumer\n\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nGROUP_CONFIG_FILE = \"./group_config.yaml\"\n\ngroup_config = load(open(GROUP_CONFIG_FILE, 'r'))\nprint(\"group_config:\", group_config)\n\n# config.load_incluster_config()\nconfig.load_kube_config(config_file=os.environ['KUBECONFIG'])\n\nv1 = client.CoreV1Api()\n\nconsumer = KafkaConsumer(\n \"automation_v1_request\",\n bootstrap_servers=['192.168.0.62:31090',\n '192.168.0.62:31091', '192.168.0.62:31092']\n # bootstrap_servers=['127.0.0.1:9092']\n # bootstrap_servers=['kafka.kubeless.svc.cluster.local:9092']\n)\n\n\nclass Failed_Api_Response:\n active = None\n failed = 1\n succeeded = None\n\n\ndef get_job_status(batchApi, namespace, job_name):\n batchApi_resp = {}\n try:\n batchApi_resp = batchApi.read_namespaced_job(\n namespace=namespace,\n name=job_name\n )\n except client.rest.ApiException as e:\n print(\"Error calling k8s batch api: %s\\n\" % e)\n print(\"Error calling k8s batch api:\\n\", e)\n print(\"status: %s\\n\" % e.status)\n print(\"Exception body: %s\\n\" % e.body)\n return Failed_Api_Response()\n except Exception as err:\n print(str(err))\n print(traceback.format_exc())\n return {}\n print(\"read batchApi_resp:\", batchApi_resp)\n if batchApi_resp.status is None:\n print(\"No status returned from job: %s\\n\", batchApi_resp)\n return {}\n return batchApi_resp.status\n\n\ndef sendError(event_uuid, code, err, producer):\n try:\n response = {\n \"event_uuid\": event_uuid,\n \"code\": code,\n \"error\": err,\n \"stacktrace\": \"\"\n }\n\n new_event = bytearray(json.dumps(response), encoding='utf-8')\n producer.send('automation_v1_response', key=b'event',\n value=new_event).get(timeout=30)\n producer.flush(timeout=5)\n except Exception as err:\n print(str(err))\n print(traceback.format_exc())\n\n\ndef wrapped(evbody, producer):\n # NOTE: if you catch any errors here then they wont be reported back to the\n # user. See usage of sendError() for reporting errors back\n path = evbody[\"path\"]\n form = evbody[\"form\"] # e.g. ?group=goethite&name=my-task\n method = evbody[\"method\"] # POST/GET/etc\n headers = evbody[\"headers\"]\n\n data = {}\n try:\n dataStr = base64.b64decode(evbody[\"body\"])\n except TypeError as err:\n sendError(\n evbody[\"event_uuid\"],\n 400,\n \"Failed to decode base64 data: %s, err: %s\" % (\n evbody[\"body\"], err),\n producer\n )\n return\n\n try:\n data = json.loads(dataStr)\n except json.JSONDecodeError as err:\n sendError(\n evbody[\"event_uuid\"],\n 400,\n \"Failed to decode json data: %s, err: %s\" % (dataStr, err),\n producer\n )\n return\n\n print(\"%s: %s form: %s, data: %s, evbody: %s\" %\n (method, path, form, data, evbody))\n print(\"headers: %s\" % (headers))\n\n if \"targets\" not in data:\n sendError(evbody[\"event_uuid\"], 400,\n \"request body must have targets\", producer)\n return\n\n # body: {\n # targets: { // equiv to ansible inv\n # hosts: {\"host_a\": {host_vars_here...}, ...}, or\n # hosts: [\"host_a\", ...],\n # groups: {\n # a_group: {\n # hosts: as hosts above,\n # params: {group_vars_here...}\n # }, ...\n # },\n # params: {inv vars for all groups}\n # },\n # params: {overriding global vars, i.e. ansible --extra-vars=...}\n # }\n inv = {\n \"all\": {\n \"hosts\": {},\n \"children\": {}\n }\n }\n # hosts and host_vars in default \"all\" group\n if \"hosts\" in data[\"targets\"]:\n for host_name in data[\"targets\"][\"hosts\"]: # dicts of hosts objs\n if isinstance(data[\"targets\"][\"hosts\"], list):\n inv[\"all\"][\"hosts\"][host_name] = {}\n else:\n inv[\"all\"][\"hosts\"][host_name] = data[\"targets\"][\"hosts\"][host_name]\n\n if \"groups\" in data[\"targets\"]:\n all_children = inv[\"all\"][\"children\"]\n for group_name in data[\"targets\"][\"groups\"]:\n group = data[\"targets\"][\"groups\"][group_name]\n # hosts and host_vars for hosts in this group\n if \"hosts\" in group:\n for host_name in group[\"hosts\"]:\n if isinstance(group[\"hosts\"], list):\n all_children[group_name] = {}\n else:\n all_children[group_name] = group[\"hosts\"][host_name]\n # group_vars\n if \"params\" in group:\n all_children[group_name][\"vars\"] = group[\"params\"]\n\n # group \"all\" vars in inventory\n if \"params\" in data[\"targets\"]:\n inv[\"all\"][\"vars\"] = data[\"targets\"][\"params\"]\n\n extra_vars = {}\n if \"params\" in data:\n extra_vars = data[\"params\"]\n\n inv_yaml = dump(inv)\n print(\"inv_yaml:\\n%s\" % inv_yaml)\n inv_b64 = base64.b64encode(\n bytearray(inv_yaml, encoding=\"utf-8\")\n )\n\n # Encode data params to be injected as a extra_vars file (YAML)\n extra_vars_yaml = dump(extra_vars)\n print(\"extra_vars_yaml:\\n%s\" % extra_vars_yaml)\n extra_vars_b64 = base64.b64encode(\n bytearray(extra_vars_yaml, encoding='utf-8')\n )\n\n # Create inv/vars init container to prepopulate container\n volumes = [\n {\n \"name\": \"inventory\",\n \"medium\": \"Memory\",\n \"emptyDir\": {}\n }\n ]\n volume_mounts = [\n {\n \"mountPath\": \"/tmp/inv\",\n \"name\": \"inventory\"\n }\n ]\n inv_init_container = {\n \"name\": \"init-inventory\",\n \"image\": \"busybox\",\n \"command\": [\n \"sh\",\n \"-c\",\n \"\"\"\n echo %s | base64 -d > /tmp/inv/hosts.yaml &&\n echo %s | base64 -d > /tmp/inv/vars.yaml\n \"\"\" % (\n inv_b64.decode('utf-8'),\n extra_vars_b64.decode('utf-8')\n )\n ],\n \"volumeMounts\": volume_mounts\n }\n\n env_vars = [\n # TODO: for hashivault_vars plugin\n {\"name\": \"HASHIVAULT_VARS_DEBUG\", \"value\": \"0\"},\n {\"name\": \"VAULT_SKIP_VERIFY\", \"value\": \"1\"},\n {\"name\": \"VAULT_ADDR\",\n \"value\": \"http://127.0.0.1:8200\"},\n {\"name\": \"VAULT_TOKEN\", \"value\": \"TODO\"}\n # TODO: resolve token from pull-mode approle\n ]\n\n namespace = \"default\"\n\n body = {}\n # Routing\n if path == \"/ping\":\n body = {\n \"api_version\": \"batch/v1\",\n \"kind\": \"Job\",\n \"metadata\": {\"name\": \"myjob\"},\n \"spec\": {\n \"template\": {\n \"spec\": {\n \"initContainers\": [\n inv_init_container\n ],\n \"containers\": [\n {\n \"name\": \"myjob\",\n \"image\": \"goethite/gostint-ansible:2.7.5\",\n \"imagePullPolicy\": \"Always\",\n \"command\": [\"ansible\"],\n \"args\": [\n \"-i\", \"/tmp/inv/hosts.yaml\",\n \"-m\", \"ping\", \"127.0.0.1\",\n \"--extra-vars\", \"@/tmp/inv/vars.yaml\"\n ],\n \"volumeMounts\": volume_mounts,\n \"env\": env_vars\n }\n ],\n \"volumes\": volumes,\n \"restartPolicy\": \"Never\"\n }\n },\n \"parallelism\": 1,\n \"completions\": 1,\n \"backoffLimit\": 0\n }\n }\n send(evbody[\"event_uuid\"], namespace, body, producer)\n\n elif path == \"/play\":\n # TODO: demo loose coupling here\n group = form.get(\"group\")[0]\n name = form.get(\"name\")[0]\n\n if group is None or group == \"\":\n sendError(evbody[\"event_uuid\"], 400,\n \"param group is missing\", producer)\n return\n if name is None or name == \"\":\n sendError(evbody[\"event_uuid\"], 400,\n \"param name is missing\", producer)\n return\n\n if group not in group_config[\"groups\"]:\n sendError(\n evbody[\"event_uuid\"],\n 400,\n \"param group does not have entry in group_config\",\n producer\n )\n return\n image = group_config[\"groups\"][group][\"image\"]\n\n body = {\n \"api_version\": \"batch/v1\",\n \"kind\": \"Job\",\n \"metadata\": {\"name\": \"myjob\"}, # TODO:\n \"spec\": {\n \"backoffLimit\": 0,\n \"template\": {\n \"spec\": {\n \"initContainers\": [\n inv_init_container\n ],\n \"containers\": [\n {\n \"name\": \"myjob\", # TODO:\n \"image\": image,\n \"imagePullPolicy\": \"Always\",\n \"args\": [\n \"-i\", \"/tmp/inv/hosts.yaml\",\n name,\n \"--extra-vars\", \"@/tmp/inv/vars.yaml\"\n ],\n \"volumeMounts\": volume_mounts,\n \"env\": env_vars\n }\n ],\n \"volumes\": volumes,\n \"restartPolicy\": \"Never\"\n }\n },\n \"parallelism\": 1,\n \"completions\": 1,\n \"backoffLimit\": 0\n }\n }\n send(evbody[\"event_uuid\"], namespace, body, producer)\n\n else:\n sendError(evbody[\"event_uuid\"], 501,\n \"Path %s not implemented\" % path, producer)\n\n\ndef send(event_uuid, namespace, body, producer):\n batchApi = client.BatchV1Api()\n batchApi_resp = batchApi.create_namespaced_job(\n namespace=namespace,\n body=body\n )\n\n # print(\"create batchApi_resp:\", batchApi_resp)\n\n job_status = {}\n while True:\n time.sleep(1)\n job_status = get_job_status(batchApi, namespace, \"myjob\") # TODO:\n if job_status.active is None:\n break\n\n pods = v1.list_namespaced_pod(\n namespace, label_selector=\"job-name=myjob\") # TODO:\n\n if len(pods.items) > 0:\n pod = pods.items[0]\n pod_name = pod.metadata.name\n\n # Get log from job pod\n try:\n pod_log = v1.read_namespaced_pod_log(\n pod_name,\n namespace,\n # timestamps=True\n tail_lines=100 # limit to last n lines\n )\n except client.rest.ApiException as e:\n pod_log = \"Error: Failed to get job's POD log: %s\" % e\n else:\n pod_log = \"Error: No pod(s) found for Job\"\n print(\"pod_log:\", pod_log)\n\n try:\n batchApi.delete_namespaced_job(\n namespace=namespace,\n name=\"myjob\",\n body={}\n )\n except client.rest.ApiException as e:\n pod_log += \"\\nError: Failed to delete job: %s\" % e\n\n try:\n v1.delete_namespaced_pod(pod_name, namespace, body={})\n except client.rest.ApiException as e:\n pod_log += \"\\nError: Failed to delete job's pod: %s\" % e\n\n response = {\n \"event_uuid\": event_uuid,\n \"data\": {\n \"status\": {\n \"active\": job_status.active,\n \"failed\": job_status.failed,\n \"succeeded\": job_status.succeeded\n },\n \"log\": pod_log\n }\n }\n\n new_event = bytearray(json.dumps(response), encoding='utf-8')\n producer.send('automation_v1_response', key=b'event',\n value=new_event).get(timeout=30)\n producer.flush(timeout=5)\n\n\nfor event in consumer:\n # print(event)\n\n # Establish the producer for each function call, cannot be global...?\n producer = KafkaProducer(\n bootstrap_servers=['192.168.0.62:31090',\n '192.168.0.62:31091', '192.168.0.62:31092'])\n # bootstrap_servers=['127.0.0.1:9092'])\n\n evbody = {}\n value = event.value\n if value is not None and value != \"\":\n evbody = json.loads(value)\n # print(\"evbody:\", evbody)\n\n try:\n wrapped(evbody, producer)\n except Exception as err:\n try:\n response = {\n \"event_uuid\": evbody[\"event_uuid\"],\n \"code\": 500,\n \"error\": str(err),\n \"stacktrace\": traceback.format_exc()\n }\n\n new_event = bytearray(json.dumps(response), encoding='utf-8')\n producer.send('automation_v1_response', key=b'event',\n value=new_event).get(timeout=30)\n producer.flush(timeout=5)\n except Exception as err:\n print(str(err))\n print(traceback.format_exc())\n","repo_name":"goethite/lucygw","sub_path":"services/ansible-k8s/ansible-k8s.py","file_name":"ansible-k8s.py","file_ext":"py","file_size_in_byte":13627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"35883182304","text":"\"\"\"\nbangazon api model configuration for customer support ticket\n\"\"\"\n\nfrom django.db import models\nfrom bangazon.api.models import *\n\n\nclass CustomerSupportTicket(models.Model):\n \"\"\"\n This class models a customer support ticket in the API's database.\n\n ----Fields----\n customer_id(foreign key): refers to the Customer(CustomerID) the support ticket is assigned to with a foreign key\n ticket_description(character): description of what the ticket is about\n order_id(foreign key): refers to the Order(OrderID) the ticket is associated with\n date_created(date): the date a ticket was added to the database\n resolution_description(character): description of how to resolve the ticket\n date_resolved(date): the date a ticket was resolved\n\n Author: Adam Myers\n \"\"\"\n\n customer_id = models.ForeignKey(Customer)\n ticket_description = models.CharField(max_length=200)\n order_id = models.ForeignKey(Order)\n date_created = models.DateField()\n resolution_description = models.CharField(max_length=200, blank=True, null=True)\n date_resolved = models.DateField(blank=True, null=True)","repo_name":"solanum-tuberosums/bangazonOrientationAPI","sub_path":"bangazon/bangazon/api/models/model_customer_support_ticket.py","file_name":"model_customer_support_ticket.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"16163055870","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport matplotlib.colors as pltcolors\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pyFTS.common import Membership\n\n\ndef plotSets(data, sets, titles):\n num = len(sets)\n fig = plt.figure(figsize=[12, 10])\n maxx = max(data)\n minx = min(data)\n h = 1/num\n for k in range(num):\n ax0 = fig.add_axes([0, (k+1)*h, 0.65, h*0.7]) # left, bottom, width, height\n ax0.set_title(titles[k])\n ax0.set_ylim([0, 1])\n ax0.set_xlim([minx, maxx])\n for s in sets[k]:\n if s.mf == Membership.trimf:\n ax0.plot([s.parameters[0],s.parameters[1],s.parameters[2]],[0,1,0])\n elif s.mf == Membership.gaussmf:\n tmpx = [ kk for kk in np.arange(s.lower, s.upper)]\n tmpy = [s.membership(kk) for kk in np.arange(s.lower, s.upper)]\n ax0.plot(tmpx, tmpy)","repo_name":"cseveriano/solarenergyforecasting","sub_path":"pyFTS/partitioners/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"21271193708","text":"\"\"\"\nevaluate the impact of optimization epochs on LR-BA\n\"\"\"\n\nimport os\nimport sys\n\nimport numpy as np\nimport torch\n\nsys.path.append(os.path.abspath('%s/..' % sys.path[0]))\n\nfrom common.parser import get_args\nfrom datasets.base_dataset import get_dataloader\nfrom vfl.vfl import get_vfl\nfrom vfl.vfl_fixture import VFLFixture\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), \"../../../\")))\n\ntorch.backends.cudnn.benchmark = True\n\ngenerate_epochs_list = list(range(0, 150, 20))\n\nif __name__ == '__main__':\n args = get_args()\n train_dl, test_dl, backdoor_train_dl, backdoor_test_dl, _, \\\n backdoor_indices, _, labeled_dl, unlabeled_dl = get_dataloader(args)\n y_train = np.array(train_dl.dataset.targets) # only for cifar\n\n vfl = get_vfl(args=args,\n backdoor_indices=backdoor_indices)\n vfl_fixture = VFLFixture(vfl, args=args)\n\n args['lr_ba_generate_epochs'] = generate_epochs_list\n\n vfl_fixture.fit(\n train_dl, test_dl,\n backdoor_test_loader=backdoor_test_dl)\n\n vfl_fixture.lr_ba_attack(\n train_loader=train_dl,\n test_loader=test_dl,\n backdoor_train_loader=backdoor_train_dl,\n backdoor_test_loader=backdoor_test_dl,\n backdoor_indices=backdoor_indices,\n labeled_loader=labeled_dl,\n unlabeled_loader=unlabeled_dl)\n\n","repo_name":"guyuhao/LR-BA","sub_path":"tests/generate_epochs_compare.py","file_name":"generate_epochs_compare.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"11725790502","text":"import itertools\n\nimport cv2\nfrom skimage.metrics import peak_signal_noise_ratio\nimport numpy as np\nimport os\nimport time\nfrom functools import partial\nfrom multiprocessing import Process, Pipe, Pool\nimport torchvision.transforms as F\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom skimage.restoration import denoise_tv_chambolle\nfrom skimage.restoration import denoise_wavelet\n\nnoise_path = [\"../gaussian_noise\",\"../impulse_noise\",\"../shot_noise\"]\nnoise_level = [5]\nclean_path = \"../val\"\nckp = False\n\ndef cal_psnr(clean_image, denoise_image):\n if clean_image.dtype != denoise_image.dtype:\n print(clean_image.dtype, denoise_image.dtype)\n return peak_signal_noise_ratio(clean_image,denoise_image)\n\ndef filtering(noise_image, weight=1.0):\n # denoised = denoise_tv_chambolle(noise_image, weight=weight, multichannel=True)\n denoised = denoise_wavelet(noise_image, multichannel=True)\n denoised = (denoised*255).astype(\"uint8\")\n return denoised\n\ndef process_one_image_pair(image_name,clean_folder, noise_folder,clean_t,noise_t):\n cur_clean_image_path = os.path.join(clean_folder, image_name)\n cur_clean_image = Image.open(cur_clean_image_path)\n cur_clean_image = np.array(clean_t(cur_clean_image))\n cur_clean_image = cv2.cvtColor(cur_clean_image,cv2.COLOR_RGB2BGR)\n\n cur_noise_image_path = os.path.join(noise_folder, image_name)\n cur_noise_image = Image.open(cur_noise_image_path)\n cur_noise_image = np.array(noise_t(cur_noise_image))\n cur_noise_image=cv2.cvtColor(cur_noise_image,cv2.COLOR_RGB2BGR)\n denoised_image = filtering(cur_noise_image, weight=0.7)\n tmp_psnr = cal_psnr(cur_clean_image,denoised_image)\n return tmp_psnr\n\ndef main():\n # clean_cate_list = sorted(os.listdir(clean_path))\n res = {}\n final = {}\n\n # check whether one noise image corresponds to one original image\n if ckp:\n for one_noise_type in noise_path:\n for one_level in noise_level:\n cate_list = sorted(os.listdir(os.path.join(one_noise_type,str(one_level))))\n for one_cate in cate_list:\n noise_images = np.array(sorted(os.listdir(os.path.join(one_noise_type,str(one_level),one_cate))))\n clean_images = np.array(sorted(os.listdir(os.path.join(clean_path,one_cate))))\n if (noise_images==clean_images).all():\n continue\n else:\n raise ValueError\n print(\"success\")\n \n # process data and calculate metric\n clean_t = F.Compose([\n F.Resize(256),\n F.CenterCrop(224),\n F.Resize(256)\n ])\n noise_t = F.Compose([\n F.Resize(256),\n ])\n\n parallel_worker = Pool(processes=400)\n for one_noise_type in noise_path:\n res[one_noise_type] = {}\n for one_level in noise_level:\n res[one_noise_type][one_level] = []\n cate_list = sorted(os.listdir(os.path.join(one_noise_type, str(one_level))))\n start_time = time.time()\n for one_cate in cate_list:\n noise_images = sorted(os.listdir(os.path.join(one_noise_type, str(one_level), one_cate)))\n partial_job = partial(process_one_image_pair, clean_folder=os.path.join(clean_path,one_cate), noise_folder=os.path.join(one_noise_type,str(one_level),one_cate),clean_t=clean_t,noise_t=noise_t)\n batch_res = parallel_worker.map(partial_job, noise_images)\n tmp_metric = np.mean(np.array(batch_res))\n res[one_noise_type][one_level].append(tmp_metric)\n end_time = time.time()\n print(\"process time: \", end_time-start_time)\n for one_noise_type in res.keys():\n final[one_noise_type]={}\n for one_level in res[one_noise_type].keys():\n final[one_noise_type][one_level] = np.mean(res[one_noise_type][one_level])\n parallel_worker.close()\n parallel_worker.join()\n print(final)\n\nif __name__==\"__main__\":\n main()","repo_name":"sjtulyf123/DIP-PRJ","sub_path":"tvd_wd_denoising.py","file_name":"tvd_wd_denoising.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"23905064171","text":"from django.conf.urls import url\n\nfrom .webhook import GitHubIntegrationsWebhookEndpoint\nfrom .search import GitHubSearchEndpoint\n\nurlpatterns = [\n url(r\"^webhook/$\", GitHubIntegrationsWebhookEndpoint.as_view()),\n url(\n r\"^search/(?P[^\\/]+)/(?P\\d+)/$\",\n GitHubSearchEndpoint.as_view(),\n name=\"sentry-extensions-github-search\",\n ),\n]\n","repo_name":"imfht/djangoapps","sub_path":"sentry-master/src/sentry/integrations/github/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"70484055994","text":"# coding=utf-8\nimport os\nimport logging\nimport operator\n\nimport cv2\nimport numpy as np\nfrom tornado.options import options\n\nimport settings\n\n\ndef debug_show(winname, mat, level=\"DEBUG\"):\n if logging.getLevelName(level.upper()) < logging.getLevelName(options.logging.upper()):\n return\n\n height, width = mat.shape[:2]\n if height > 1200:\n mat = cv2.resize(mat, (int(width * 1200 / height), 1200))\n\n logging.info(\"%s size %s, %s\", winname, mat.shape[0], mat.shape[1])\n\n cv2.imshow(winname, mat)\n cv2.waitKey(0)\n cv2.destroyWindow(winname)\n\n\ndef save_image(file_path, img):\n \"\"\"保存图片\n\n :param file_path: 路径\n :param img: 图片\n :return: 保存后的路径\n \"\"\"\n dirname = os.path.dirname(file_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n cv2.imwrite(file_path, img)\n return file_path\n\n\ndef find_black_symbol(img_grey, width_erode=10, height_erode=10):\n \"\"\"寻找黑色标记点\n\n :param img: 原图\n :return: 灰度图,只有黑色标记点\n \"\"\"\n # 二值化\n img_grey = cv2.threshold(img_grey, 0, 256, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n debug_show(\"Image Threshold\", img_grey)\n\n # Dilation 膨胀\n img_grey = cv2.dilate(img_grey, kernel=np.ones((int(height_erode * 0.2), int(width_erode * 0.2)), dtype=np.uint8))\n debug_show(\"Image Dilate\", img_grey)\n\n # Erode 腐蚀\n img_grey = cv2.erode(\n img_grey,\n kernel=np.ones((int(height_erode * 1.2), int(width_erode * 1.2)), dtype=np.uint8),\n iterations=1\n )\n debug_show(\"Image Erode\", img_grey)\n\n # Dilation 膨胀\n img_grey = cv2.dilate(img_grey, kernel=np.ones((height_erode, width_erode), dtype=np.uint8))\n debug_show(\"Image Dilate\", img_grey)\n\n return img_grey\n\n\ndef center_of_gravity(contour):\n \"\"\"计算重心\n\n :param contour: 轮廓\n :return: 重心坐标\n \"\"\"\n\n M = cv2.moments(contour)\n return int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"])\n\n\ndef gamma_trans(img_grey):\n \"\"\"gamma变换\n\n :param img_grey: 原图,灰度图\n :return: 新的图片\n \"\"\"\n # 求平均亮度\n brightness = cv2.mean(img_grey)[0]\n # TODO 这段代码可以优化\n # gamma变换,把亮度低的地方变得更低,所以取100而不是128。128是256的一半,可能是平均亮度\n gamma = 1 + (brightness - 100) / 100\n if gamma > 1.5:\n gamma = gamma + 1\n\n # gamma 变换\n img_grey = np.float32(img_grey)\n img_grey = cv2.pow(img_grey, gamma)\n cv2.normalize(img_grey, img_grey, 0, 255, cv2.NORM_MINMAX)\n img_grey = cv2.convertScaleAbs(img_grey)\n\n return img_grey\n\n\ndef equalize_hist(img_grey):\n \"\"\"直方图均衡化\n\n :param img_grey: 原图,灰度图\n :return: 新的图片\n \"\"\"\n hist = cv2.calcHist(\n [img_grey], # 计算图像的直方图\n [0], # 使用的通道\n None, # 没有使用mask\n [256], # it is a 1D histogram\n [0.0, 255.0],\n )\n\n min_bin_no, max_bin_no = 0, 255\n\n for bin_no, bin_value in enumerate(hist):\n if bin_value != 0:\n min_bin_no = bin_no\n break\n\n for bin_no, bin_value in reversed(list(enumerate(hist))):\n if bin_value != 0:\n max_bin_no = bin_no\n break\n\n if min_bin_no == max_bin_no:\n return img_grey\n\n # 生成查找表,参考文献 《Opencv2 Computer Vision Application Programming Cookbook》 第四章第2节\n lut = np.zeros(256, dtype=img_grey.dtype)\n for index, value in enumerate(lut):\n if index < min_bin_no:\n lut[index] = 0\n elif index > max_bin_no:\n lut[index] = 255\n else:\n lut[index] = int(255.0 * (index - min_bin_no) / (max_bin_no - min_bin_no) + 0.5)\n\n return cv2.LUT(img_grey, lut)\n\n\ndef sorted_corner_points(points):\n \"\"\"四个角,坐标排序\n\n :param points:\n :return:\n \"\"\"\n points = sorted(points)\n left, right = points[:2], points[2:]\n\n left = sorted(left, key=operator.itemgetter(1))\n right = sorted(right, key=operator.itemgetter(1))\n\n left_top, left_bottom = left\n right_top, right_bottom = right\n\n return [left_top, right_top, left_bottom, right_bottom]\n\n\ndef warpPerspective(img, M):\n \"\"\"透视变换\n\n :param img: 原图\n :param M: 变换矩阵\n :return: 返回图片\n \"\"\"\n height, width = img.shape[:2]\n if len(img.shape) == 2:\n border_value = 255\n else:\n border_value = (255, 255, 255)\n\n return cv2.warpPerspective(img, M, (width, height), borderValue=border_value)\n\n\ndef scale_points(points, scale):\n \"\"\"对一系列点坐标进行缩放\n\n :param points:\n :param scale:\n :return:\n \"\"\"\n return [(x * scale, y * scale) for x, y in points]\n\n\ndef points_add(p1, p2):\n \"\"\"两点坐标相加\n\n :param p1: 点1\n :param p2: 点2\n :return:\n \"\"\"\n return tuple(map(sum, zip(p1, p2)))\n\n\ndef sorted_rect(rect_list, style=\"column\"):\n \"\"\"排列矩形\n\n :param rect_list: 矩形列表,矩形的格式(x, y, w, h)\n :param style: column or row, 按列或者按行排列\n :param delta: 超过delta,就算下一行或者下一列\n :return: 排序过的矩形矩阵\n \"\"\"\n\n if not rect_list: # 为空\n return rect_list\n\n if style == \"row\": # 如果是按行排列,跟按列排列相反,坐标交换下即可\n rect_list = [(y, x, h, w) for x, y, w, h in rect_list]\n\n rect_list = sorted(rect_list)\n\n # 当前列最左边的x坐标\n current_column = [rect_list[0]]\n rect_matrix = [current_column]\n\n for rect in rect_list[1:]:\n if rect[0] - current_column[0][0] >= current_column[0][2]: # 已经换列\n current_column = [rect]\n rect_matrix.append(current_column)\n\n else: # 没有换列\n current_column.append(rect)\n\n # 按第二个维度来排序\n rect_matrix = [sorted(column, key=operator.itemgetter(1)) for column in rect_matrix]\n\n if style == \"row\": # 如果是按行排列,跟按列排列相反,坐标交换下即可\n tmp = []\n for column in rect_matrix:\n new_column = [(x, y, w, h) for y, x, h, w in column]\n tmp.append(new_column)\n rect_matrix = tmp\n\n rect_result = []\n for rect_column in rect_matrix:\n rect_result.extend(rect_column)\n\n return rect_result\n","repo_name":"younglalala/lixue-ai-scripts","sub_path":"melon/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39856839548","text":"import glob\nimport csv\nimport sys\nfiles = glob.glob('/apollo/data/log/test/multi/*lan')\nmsg_size = int(sys.argv[1])\nhz = int(sys.argv[2])\ncon = int(sys.argv[3])\nrecord_file_name = str(sys.argv[4])\nfor f in files:\n \n for_write = []\n with open(f) as lines:\n for_write.append(msg_size)\n for_write.append(hz)\n for_write.append(con)\n array = lines.readlines()\n total = 0\n count = 0\n ignore = 0\n res = []\n for i in array:\n if ignore < 30:\n ignore = ignore + 1\n continue\n res.append(int(i.strip()))\n total = total + int(i.strip())\n count = count + 1\n res.sort()\n #print(f)\n #for_write.append(f)\n middle_lantency = res[int(len(res)/2)]\n #print(middle_lantency)#middle lantency\n for_write.append(middle_lantency)\n lan_95th = res[int(len(res)*0.95)]\n #print(lan_95th)\n for_write.append(lan_95th)\n big_lan = res[len(res)-1]\n #print(big_lan)#big\n for_write.append(big_lan)\n small_lan = res[0]\n #print(small_lan)#small\n for_write.append(small_lan)\n avg = total/count\n #print(avg)#avg\n for_write.append(avg)\n with open(f+\"loss\") as lines2:\n loss_rate = lines2.readlines()\n #print(loss_rate[0])\n for_write.append(loss_rate[0])\nwith open(record_file_name, 'a', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(for_write)\n# writer.writerows(data)\n \n","repo_name":"wutianze/ComP","sub_path":"cyber/data/handleDir.py","file_name":"handleDir.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"95"} +{"seq_id":"72202380792","text":"from typing import List\nfrom collections import defaultdict, deque\n\nclass Solution:\n def numBusesToDestination(self, routes: List[List[int]], source: int, target: int) -> int:\n if source == target:\n return 0\n stopBoards = defaultdict(list)\n for bus,route in enumerate(routes):\n for stop in route:\n stopBoards[stop].append(bus)\n \n q = deque([source])\n visitedBus = set()\n ans = 0\n while q:\n ans += 1\n numStop = len(q)\n\n for i in range(numStop):\n curStop = q.popleft()\n for bus in stopBoards[curStop]:\n if bus not in visitedBus:\n visitedBus.add(bus)\n for stop in routes[bus]:\n if stop == target:\n return ans\n q.append(stop)\n return -1\n\ns = Solution()\nassert s.numBusesToDestination(routes = [[1,2,7],[3,6,7]], source = 1, target = 6) == 2\nassert s.numBusesToDestination([[7,12],[4,5,15],[6],[15,19],[9,12,13]], source = 15, target = 12) == -1\n\n\"\"\"\nintuitive: graph\nnode is stop\nedges come from routes\nneed to know start node,\nfrom each node, we want to know all neighbor\ndict of set\n\nobservation: there is no need to take a bus more than once\nfind least number of buses to take, is just the shortest path (number of levels) in BFS\n\nit's interesting how you store bus in the map, rather than what stop is reachable. This save us from storing too many things.\nLet's say number of stop is s, number of bus is b\nfor each popped stop, we spend some time on each bus that will stop by.\nFor each bus that is not visited, we add all stops in its routes to the queue. (this will take number of elements in routes time in total, but will be dominated).\nO(s+s*b)\n\"\"\"","repo_name":"kateyeziyang/leetCode","sub_path":"800-899/815. Bus Routes/ans.py","file_name":"ans.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31197051606","text":"from aardvark.db import api as db_api\n\n\ndef get_test_reaper_action(**kw):\n requested = ['5d12f6fd-a196-4bf0-ae4c-1f639a523a52']\n victims = ['5d12f6fd-a196-4bf0-ae4c-1f639a523a53']\n return {\n 'id': kw.get('id', 12),\n 'uuid': kw.get('uuid', '483203a3-dbee-4a9c-9d65-9820512f4df8'),\n 'state': kw.get('state', \"SUCCESS\"),\n 'requested_instances': kw.get('requested_instances', requested),\n 'fault_reason': kw.get('fault_reason', None),\n 'victims': kw.get('victims', victims),\n 'event': kw.get('event', \"BUILD_REQUEST\")\n }\n\n\ndef create_test_reaper_action(**kw):\n action = get_test_reaper_action(**kw)\n if 'id' not in kw:\n del action['id']\n dbapi = db_api.get_instance()\n return dbapi.create_reaper_action(action)\n","repo_name":"NeCTAR-RC/aardvark","sub_path":"aardvark/tests/unit/db/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1255339415","text":"import mlflow\n\ndef fun_mlfow_log(model, experiment_name, results_accuracy):\n \"\"\"Log and register a model along with accuracy results. Accuracy results must be in dictionary form.\n\n Args:\n model (model): Model to register and log.\n experiment_name (str): Experiment name.\n results_accuracy (dic): Dictionary accuracy results.\n \"\"\"\n mlflow.sklearn.log_model(\n model, artifact_path=\"model\"\n ) # Name the folder where the model will be stored and grabbed in the path below\n run = mlflow.active_run()\n model_uri = \"runs:/{}/model\".format(run.info.run_id)\n mlflow.register_model(model_uri, experiment_name) # Name of the experiment / job\n mlflow.log_metrics(results_accuracy)\n mlflow.end_run()","repo_name":"mhanauer/internal_work","sub_path":"internal_work/analysis/fun_mlfow_log.py","file_name":"fun_mlfow_log.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"9003556311","text":"#webscaper using BeautifulSoup\ndef city_plumbing():\n import requests\n from bs4 import BeautifulSoup\n import pandas as pd\n import os\n baseurl = 'https://www.cityplumbing.co.uk'\n\n header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}\n\n product_links = []\n\n #get the page\n\n for x in range(0,6):\n\n r = requests.get(f\"https://www.cityplumbing.co.uk/Product/Heating/Boilers/Gas-Boilers/Gas-Combi-Boilers/c/1838005?q=%3Arelevance&page={x}&perPage=15\")\n soup = BeautifulSoup(r.content, 'html.parser')\n\n product_list = soup.find_all('div', class_='prod_img')\n\n\n\n for item in product_list:\n for link in item.find_all('a', href=True):\n product_links.append(baseurl + link['href'])\n\n #test_link = \"https://www.cityplumbing.co.uk/Vokera-Vision-25C-Combi-Boiler-20097278/p/529743\"\n Product_final = []\n for link in product_links:\n r=requests.get(link, headers=header)\n\n soup = BeautifulSoup(r.content, 'html.parser')\n\n name=soup.find('h1', class_='tpProductTitle').text.strip()\n\n\n try:\n price =soup.find('span', class_='price_value').text.strip()\n price = price.replace('£','')\n price = price.replace(',','')\n price = price.replace(' ','')\n price = float(price)\n except:\n price = \"Price not found\"\n\n description1=soup.find('div', class_='summary-info').text.strip()\n description=description1.replace('Product Information\\n\\n', '')\n description=description.replace('More Info', '')\n\n product = {\n 'name': name,\n 'price': price,\n 'description': description,\n\n }\n\n\n print(\"Saving : \", product)\n Product_final.append(product)\n df = pd.DataFrame(Product_final)\n df = df.replace(r'\\n', ' ', regex=True)\n df.to_csv('boiler.csv', index=False)\n\n df_saved = pd.read_csv('boiler.csv')\n df_saved\n\n\n\n\n\ncity_plumbing()\n\n\n\n","repo_name":"MauriceMohamed/webScraper_v1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40348407402","text":"from glob import glob\nfrom PIL import Image\nimport re\n\n# pull first file that will act as template\nimg = Image.open(\"line_map_prev.png\")\n\n# pull metrics\nmaxWidth = img.width\nmaxHeight = img.height\n\nfiles = (glob(\"*.png\"))\nfor file in files:\n # Get file name \n fileName = re.sub(\".png\", \"\", file)\n\n # open and resize each file\n img = Image.open(file)\n newHeight = (maxHeight/ img.height) * img.height\n img = img.resize(size=[int(maxWidth), int(newHeight)])\n img.save(f\"{fileName}_resize.png\")\n\n","repo_name":"mxblsdl/shiny-server","sub_path":"gallery/www/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21484742103","text":"#!/usr/bin/env python\n# -*- coding=utf8 -*-\n\nimport pytest\n\n\nclass Solution:\n def reverse(self, x: int) -> int:\n val = abs(x)\n op = (x > 0) - (x < 0)\n new_str = ''\n for i, j in enumerate(str(val)):\n new_str = j+new_str\n new_int = int(new_str)*op\n if -2**31 <= new_int <= 2**31:\n return new_int\n return 0\n\n def reverse1(self, x: int) -> int:\n \"\"\"\n 第一步:求符号\n 第二步:step为-1切片,并求绝对值\n 第三步:op*val恢复原始符号,val<2**31绝对值判断是否溢出,溢出乘以0则返回值为0\n\n 切片操作:[start_index: stop_index: step]\n \"\"\"\n op = (x > 0) - (x < 0)\n val = int(str(x*op)[::-1])\n return op*val * (val < 2**31)\n\n\n@pytest.mark.parametrize(('param', 'ret'), [(123, 321),\n (-123, -321),\n (-120, -21),\n (120, 21)])\ndef test1(param, ret):\n solution = Solution()\n assert solution.reverse(param) == ret\n assert solution.reverse1(param) == ret\n","repo_name":"helloocc/algorithm","sub_path":"007_reverse-integer.py","file_name":"007_reverse-integer.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"42900708988","text":"#!/usr/bin/env python3\n\nimport click\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\ndef plot_proteomic_data(annotations, abundances, output):\n \"\"\"\n Plot the functional groups, genomic and abundance distribution for proteomic data\n \"\"\"\n\n figure, axs = plt.subplots(nrows=2, ncols=2, figsize=(8, 8))\n tem = plt.imread('../resources/sfgv_ob_tem.png')\n axs[0, 0].imshow(tem)\n axs[0, 0].axis(\"off\")\n sem = plt.imread('../resources/sfgv_ob_sem.png')\n axs[0, 1].imshow(sem)\n axs[0, 1].axis(\"off\")\n bars = sns.barplot(x=\"Annotation\", y=\"# Pfam Domains\",\n data=annotations, ax=axs[1, 0],\n palette=\"deep\")\n hist = sns.histplot(abundances, ax=axs[1, 1], color=\"black\", bins=10)\n plt.savefig(f\"{output}.png\")\n\n\n# Command line interface\n@click.command()\n@click.option(\"-a\", \"--annotations\",\n help=\"Pfam functional annotations\")\n@click.option(\"-p\", \"--protein_list\",\n help=\"Protein list with abundance values\")\n@click.option(\"-o\", \"--output\",\n help=\"Output file\")\ndef cli(annotations, protein_list, output):\n \"Command line interface\"\n\n names = [\"Protein\",\n \"InterProscan\",\n \"Description\",\n \"Annotation\"]\n ann = pd.read_csv(annotations,\n sep=\"\\t\",\n names=names)\n g = ann.groupby(\"Annotation\").size()\n # ann = list(zip(g.index, g.values))\n ann = pd.DataFrame(data={\"Annotation\": g.index, \"# Pfam Domains\": g.values})\n # ann = list(zip(g.index, g.values))\n\n df = pd.read_csv(protein_list)\n vp39 = float(df[df[\"Protein name\"] == \"Vp39\"][\"Average emPAI\"])\n abund = df[\"Average emPAI\"] * 100 / vp39\n abund = abund[abund <= 100]\n\n plot_proteomic_data(ann, abund, output)\n\n\nif __name__ == \"__main__\":\n cli()\n\n\n# def get_protein_locations(proteome):\n# \"\"\"\n# Parse protein positions from a fasta file and returns them as a list\n# \"\"\"\n\n# # Load proteome\n# seqs = SeqIO.parse(proteome, \"fasta\")\n# # Initialize coordinates list\n# locs = []\n# # Get location for each protein on the file\n# for seq in seqs:\n# loc = seq.description.split(\"[location=\")[1].split(\"]\")[0]\n# if loc.startswith(\"complement\"):\n# locs.append(int(loc[11:-1].split(\"..\")[1]))\n# else:\n# locs.append(int(loc.split(\"..\")[0]))\n\n# return locs\n","repo_name":"tomasMasson/sfgv_proteomics","sub_path":"scripts/plot_proteomic_data.py","file_name":"plot_proteomic_data.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16991645734","text":"nombres = [\"dave\", \"javiera\", \"paz\", \"patrick\", \"pablo\"]\nnumeros = [1,2,3,4,5,6]\n\n#For loop\nfor name in nombres:\n if name == \"patrick\":\n continue\n print(name)\n\n\n\n#for n in range(0, len(nombres)):\n# print(nombres[n])\n\n#for name in nombres:\n# print(name)\n\n#While loop\n#n = 0\nwhile len(nombres) > n:\n print(nombres[n])\n n = n + 1\n\nage = int(input(\"Cual es tu edad? \"))\n\nprint(type(age))\n\n\n\n\n\n\n\n","repo_name":"davejfranco/codingdojo-python-lessons","sub_path":"basico/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"23725404373","text":"import inspect\nimport multiprocessing\nimport warnings\nimport torch\nimport os\nimport numpy as np\n# import distmap\nimport cupy as cp\nfrom torch.utils.dlpack import to_dlpack\nfrom torch.utils.dlpack import from_dlpack\nfrom cucim.core.operations.morphology import distance_transform_edt as distance_transform_edt_cupy\nimport nibabel as nib\n\nfrom skimage.morphology import skeletonize_3d\nfrom scipy import ndimage\nfrom scipy.spatial import cKDTree\nfrom torch import autocast, nn\nfrom time import time, sleep\nfrom datetime import datetime\nfrom typing import Union, Tuple, List\nfrom torch import distributed as dist\nfrom torch.cuda import device_count\nfrom torch.cuda.amp import GradScaler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\n\nfrom nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer\nfrom nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager\nfrom nnunetv2.training.nnUNetTrainer.variants.network_architecture.PlainConvUNet_DC_CLDC_skeletonize import PlainConvUNet\nfrom nnunetv2.training.nnUNetTrainer.variants.network_architecture.skeletonize import Skeletonize\nfrom dynamic_network_architectures.architectures.unet import ResidualEncoderUNet\nfrom dynamic_network_architectures.building_blocks.helper import convert_dim_to_conv_op, get_matching_batchnorm\nfrom dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0, InitWeights_He\nfrom batchgenerators.utilities.file_and_folder_operations import join, load_json, isfile, save_json, maybe_mkdir_p\nfrom nnunetv2.training.dataloading.nnunet_dataset import nnUNetDataset\nfrom nnunetv2.configuration import ANISO_THRESHOLD, default_num_processes\nfrom nnunetv2.evaluation.evaluate_predictions import compute_metrics_on_folder\nfrom nnunetv2.inference.export_prediction import export_prediction_from_logits, resample_and_save\nfrom nnunetv2.inference.predict_from_raw_data_skeletonize import nnUNetPredictor\nfrom nnunetv2.inference.sliding_window_prediction import compute_gaussian\nfrom nnunetv2.paths import nnUNet_preprocessed, nnUNet_results\nfrom nnunetv2.training.dataloading.utils import get_case_identifiers, unpack_dataset\nfrom nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss\nfrom nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss, CE_loss, BCE_loss\nfrom nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper\nfrom nnunetv2.training.logging.nnunet_logger_vesselgrapher import nnUNetLogger_vesselgrapher\nfrom nnunetv2.utilities.collate_outputs import collate_outputs\nfrom nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA\nfrom nnunetv2.utilities.file_path_utilities import check_workers_busy\nfrom nnunetv2.utilities.get_network_from_plans import get_network_from_plans\nfrom nnunetv2.utilities.helpers import empty_cache, dummy_context\nfrom nnunetv2.utilities.label_handling.label_handling import convert_labelmap_to_one_hot, determine_num_input_channels\nfrom nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager\n \nclass nnUNetTrainer_CB_DICE(nnUNetTrainer):\n\n def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,\n device: torch.device = torch.device('cuda')):\n # From https://grugbrain.dev/. Worth a read ya big brains ;-)\n\n # apex predator of grug is complexity\n # complexity bad\n # say again:\n # complexity very bad\n # you say now:\n # complexity very, very bad\n # given choice between complexity or one on one against t-rex, grug take t-rex: at least grug see t-rex\n # complexity is spirit demon that enter codebase through well-meaning but ultimately very clubbable non grug-brain developers and project managers who not fear complexity spirit demon or even know about sometime\n # one day code base understandable and grug can get work done, everything good!\n # next day impossible: complexity demon spirit has entered code and very dangerous situation!\n\n # OK OK I am guilty. But I tried. http://tiny.cc/gzgwuz\n\n self.is_ddp = dist.is_available() and dist.is_initialized()\n self.local_rank = 0 if not self.is_ddp else dist.get_rank()\n\n self.device = device\n\n # print what device we are using\n if self.is_ddp: # implicitly it's clear that we use cuda in this case\n print(f\"I am local rank {self.local_rank}. {device_count()} GPUs are available. The world size is \"\n f\"{dist.get_world_size()}.\"\n f\"Setting device to {self.device}\")\n self.device = torch.device(type='cuda', index=self.local_rank)\n else:\n if self.device.type == 'cuda':\n # we might want to let the user pick this but for now please pick the correct GPU with CUDA_VISIBLE_DEVICES=X\n self.device = torch.device(type='cuda', index=0)\n print(f\"Using device: {self.device}\")\n\n # loading and saving this class for continuing from checkpoint should not happen based on pickling. This\n # would also pickle the network etc. Bad, bad. Instead we just reinstantiate and then load the checkpoint we\n # need. So let's save the init args\n self.my_init_kwargs = {}\n for k in inspect.signature(self.__init__).parameters.keys():\n self.my_init_kwargs[k] = locals()[k]\n\n ### Saving all the init args into class variables for later access\n self.plans_manager = PlansManager(plans)\n self.configuration_manager = self.plans_manager.get_configuration(\n configuration)\n self.configuration_name = configuration\n self.dataset_json = dataset_json\n self.fold = fold\n self.unpack_dataset = unpack_dataset\n\n ### Setting all the folder names. We need to make sure things don't crash in case we are just running\n # inference and some of the folders may not be defined!\n self.preprocessed_dataset_folder_base = join(nnUNet_preprocessed, self.plans_manager.dataset_name) \\\n if nnUNet_preprocessed is not None else None\n self.output_folder_base = join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" + configuration) \\\n if nnUNet_results is not None else None\n self.output_folder = join(self.output_folder_base, f'fold_{fold}')\n\n self.preprocessed_dataset_folder = join(self.preprocessed_dataset_folder_base,\n self.configuration_manager.data_identifier)\n # unlike the previous nnunet folder_with_segs_from_previous_stage is now part of the plans. For now it has to\n # be a different configuration in the same plans\n # IMPORTANT! the mapping must be bijective, so lowres must point to fullres and vice versa (using\n # \"previous_stage\" and \"next_stage\"). Otherwise it won't work!\n self.is_cascaded = self.configuration_manager.previous_stage_name is not None\n self.folder_with_segs_from_previous_stage = \\\n join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" +\n self.configuration_manager.previous_stage_name, 'predicted_next_stage', self.configuration_name) \\\n if self.is_cascaded else None\n\n ### Some hyperparameters for you to fiddle with\n self.initial_lr = 1e-2\n self.weight_decay = 3e-5\n self.oversample_foreground_percent = 0.33\n self.num_iterations_per_epoch = 250\n self.num_val_iterations_per_epoch = 50\n self.num_epochs = 1000\n self.current_epoch = 0\n\n ### Dealing with labels/regions\n self.label_manager = self.plans_manager.get_label_manager(dataset_json)\n # labels can either be a list of int (regular training) or a list of tuples of int (region-based training)\n # needed for predictions. We do sigmoid in case of (overlapping) regions\n\n self.num_input_channels = None # -> self.initialize()\n self.network = None # -> self._get_network()\n self.optimizer = self.lr_scheduler = None # -> self.initialize\n self.grad_scaler = GradScaler() if self.device.type == 'cuda' else None\n self.loss = None # -> self.initialize\n\n ### Simple logging. Don't take that away from me!\n # initialize log file. This is just our log for the print statements etc. Not to be confused with lightning\n # logging\n timestamp = datetime.now()\n maybe_mkdir_p(self.output_folder)\n self.log_file = join(self.output_folder, \"training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt\" %\n (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,\n timestamp.second))\n self.logger = nnUNetLogger_vesselgrapher()\n\n ### placeholders\n self.dataloader_train = self.dataloader_val = None # see on_train_start\n\n ### initializing stuff for remembering things and such\n self._best_ema = None\n\n ### inference things\n self.inference_allowed_mirroring_axes = None # this variable is set in\n # self.configure_rotation_dummyDA_mirroring_and_inital_patch_size and will be saved in checkpoints\n\n ### checkpoint saving stuff\n self.save_every = 2 # 50\n self.disable_checkpointing = False\n\n ## DDP batch size and oversampling can differ between workers and needs adaptation\n # we need to change the batch size in DDP because we don't use any of those distributed samplers\n self._set_batch_size_and_oversample()\n\n self.was_initialized = False\n\n self.print_to_log_file(\"\\n#######################################################################\\n\"\n \"Please cite the following paper when using nnU-Net:\\n\"\n \"Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). \"\n \"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. \"\n \"Nature methods, 18(2), 203-211.\\n\"\n \"#######################################################################\\n\",\n also_print_to_console=True, add_timestamp=False)\n \n def initialize(self):\n if not self.was_initialized:\n self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager,\n self.dataset_json)\n # print(\"self.dataset_json: \", self.dataset_json)\n # max_label_value = max(self.dataset_json[\"labels\"].values())\n # self.dataset_json[\"labels\"][\"skel_vessel\"] = max_label_value + 1\n # self.dataset_json[\"labels\"][\"skel\"] = max_label_value + 2\n\n self.network = self.build_network_architecture(self.plans_manager, self.dataset_json,\n self.configuration_manager,\n self.num_input_channels,\n enable_deep_supervision=True).to(self.device)\n \n self.skeletonization_module_binary = Skeletonize(probabilistic=False, simple_point_detection='EulerCharacteristic').to(self.device)\n self.skeletonization_module_multi = Skeletonize(probabilistic=False, simple_point_detection='EulerCharacteristic').to(self.device)\n\n self.skeletonization_module = Skeletonize(probabilistic=False, simple_point_detection='EulerCharacteristic').to(self.device)\n\n # compile network for free speedup\n if ('nnUNet_compile' in os.environ.keys()) and (\n os.environ['nnUNet_compile'].lower() in ('true', '1', 't')):\n self.print_to_log_file('Compiling network...')\n self.network = torch.compile(self.network)\n\n self.optimizer, self.lr_scheduler = self.configure_optimizers()\n # if ddp, wrap in DDP wrapper\n if self.is_ddp:\n self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network)\n self.network = DDP(self.network, device_ids=[self.local_rank])\n\n # self.seg_loss = self._build_loss()\n self.seg_loss_0_bin, self.seg_loss_0_mul, self.seg_loss_weights, self.seg_loss_deep = self._build_loss()\n self.was_initialized = True\n else:\n raise RuntimeError(\"You have called self.initialize even though the trainer was already initialized. \"\n \"That should not happen.\")\n \n def _build_loss(self):\n # if self.label_manager.has_regions:\n # loss = DC_and_BCE_loss({},\n # {'batch_dice': self.configuration_manager.batch_dice,\n # 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp},\n # use_ignore_label=self.label_manager.ignore_label is not None,\n # dice_class=MemoryEfficientSoftDiceLoss)\n # else:\n loss_0_bin = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,\n 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=0.5, weight_dice=0.5,\n ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)\n loss_0_mul = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,\n 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=0.5, weight_dice=0.5,\n ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)\n loss_deep = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,\n 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=0.5, weight_dice=0.5,\n ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)\n\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases\n # this gives higher resolution outputs more weight in the loss\n weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])\n weights[-1] = 0\n\n # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1\n weights = weights / weights.sum()\n # now wrap the loss\n loss_deep = DeepSupervisionWrapper(loss_deep, weights[1:])\n\n return loss_0_bin, loss_0_mul, weights, loss_deep\n \n @staticmethod\n def build_network_architecture(plans_manager: PlansManager,\n dataset_json,\n configuration_manager: ConfigurationManager,\n num_input_channels,\n enable_deep_supervision: bool = True) -> nn.Module:\n num_stages = len(configuration_manager.conv_kernel_sizes)\n\n dim = len(configuration_manager.conv_kernel_sizes[0])\n conv_op = convert_dim_to_conv_op(dim)\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n\n # configuration_manager.UNet_class_name\n segmentation_network_class_name = 'PlainConvUNet'\n mapping = {\n 'PlainConvUNet': PlainConvUNet,\n 'ResidualEncoderUNet': ResidualEncoderUNet\n }\n kwargs = {\n 'PlainConvUNet': {\n 'conv_bias': True,\n 'norm_op': get_matching_batchnorm(conv_op),\n 'norm_op_kwargs': {'eps': 1e-5, 'affine': True},\n 'dropout_op': None, 'dropout_op_kwargs': None,\n 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},\n },\n 'ResidualEncoderUNet': {\n 'conv_bias': True,\n 'norm_op': get_matching_batchnorm(conv_op),\n 'norm_op_kwargs': {'eps': 1e-5, 'affine': True},\n 'dropout_op': None, 'dropout_op_kwargs': None,\n 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},\n }\n }\n assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \\\n 'is non-standard (maybe your own?). Yo\\'ll have to dive ' \\\n 'into either this ' \\\n 'function (get_network_from_plans) or ' \\\n 'the init of your nnUNetModule to accomodate that.'\n network_class = mapping[segmentation_network_class_name]\n\n conv_or_blocks_per_stage = {\n 'n_conv_per_stage'\n if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder,\n 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder\n }\n # network class name!!\n model = network_class(\n input_channels=num_input_channels,\n n_stages=num_stages,\n features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i,\n configuration_manager.unet_max_num_features) for i in range(num_stages)],\n conv_op=conv_op,\n kernel_sizes=configuration_manager.conv_kernel_sizes,\n strides=configuration_manager.pool_op_kernel_sizes,\n num_classes=label_manager.num_segmentation_heads,\n deep_supervision=enable_deep_supervision,\n **conv_or_blocks_per_stage,\n **kwargs[segmentation_network_class_name]\n )\n model.apply(InitWeights_He(1e-2))\n if network_class == ResidualEncoderUNet:\n model.apply(init_last_bn_before_add_to_0)\n return model\n\n def batch_id2pos_indexs_k(self, id, size):\n x, y, z = size\n\n pos_x = (id // (y * z)).type(torch.int32)\n pos_y = ((id // z) % y).type(torch.int32)\n pos_z = (id % z).type(torch.int32)\n\n pos = torch.stack((pos_x, pos_y, pos_z))\n\n return pos\n\n def combine_tensors(self, A, B, C):\n D = A.clone() # Clone A to D\n B_C = B * C # Element-wise multiply B and C\n mask = (A != 0) & (B != 0) # Non-zero mask for A and B\n\n D[mask] = B_C[mask] # Update D based on mask\n return D\n\n def get_radius_weights(self, y_true, skel_true, H, W, D):\n # dist_map_3d = distmap.euclidean_distance_transform(y_true)\n # https://docs.cupy.dev/en/stable/user_guide/interoperability.html\n y_true_cupy_array = cp.from_dlpack(to_dlpack(y_true))\n dist_map_3d_cupy_array = distance_transform_edt_cupy(y_true_cupy_array)\n dist_map_3d = from_dlpack(dist_map_3d_cupy_array.toDlpack())\n \n dist_map_3d[y_true == 0] = 0\n vessel_radius = dist_map_3d[skel_true == 1]\n\n if vessel_radius.shape[0] == 0 or vessel_radius.min() == vessel_radius.max():\n return y_true, skel_true.clone(), skel_true.clone(), skel_true.clone()\n\n smooth = 1e-7\n vessel_radius_max = vessel_radius.max()\n dist_map_3d[dist_map_3d > vessel_radius_max] = vessel_radius_max\n # print(\"vessel_radius_max: \", vessel_radius_max)\n # print(\"dist_map_3d_max: \", dist_map_3d.max())\n vessel_radius_0_1 = vessel_radius / vessel_radius_max\n vessel_radius_1_R2 = (1 + smooth) / (vessel_radius_0_1 ** 2 + smooth)\n vessel_radius_1_R = (1 + smooth) / (vessel_radius_0_1 + smooth)\n y_dist_map_norm = dist_map_3d / vessel_radius_max\n \n vessel_radius_weights_1_R2 = torch.zeros_like(skel_true, dtype=torch.float32)\n vessel_radius_weights_1_R = torch.zeros_like(skel_true, dtype=torch.float32)\n vessel_radius_weights_1 = torch.zeros_like(skel_true, dtype=torch.float32)\n N = H * W * D\n skel_N = skel_true.reshape(N)\n nodes = (skel_N == 1).nonzero(as_tuple=False).squeeze()\n nodes_pos = self.batch_id2pos_indexs_k(nodes, (H, W, D)).T\n\n vessel_radius_weights_1_R2[nodes_pos[:, 0], nodes_pos[:, 1], nodes_pos[:, 2]] = vessel_radius_1_R2\n vessel_radius_weights_1_R[nodes_pos[:, 0], nodes_pos[:, 1], nodes_pos[:, 2]] = vessel_radius_1_R\n vessel_radius_weights_1[nodes_pos[:, 0], nodes_pos[:, 1], nodes_pos[:, 2]] = vessel_radius_0_1\n\n return y_dist_map_norm, vessel_radius_weights_1_R2, vessel_radius_weights_1_R, vessel_radius_weights_1\n \n def train_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n data = data.to(self.device, non_blocking=True)\n\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n self.optimizer.zero_grad()\n\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n\n target_0 = target[0].clone()\n target_0[target_0 > 0] = 1\n skel_target_0 = self.skeletonization_module(target_0.float())\n\n output = self.network(data)\n output_0 = output[0]\n\n seg_binary_output_0 = output_0[:, -2:, :, :, :]\n seg_output_0 = output_0[:, :-2, :, :, :]\n\n loss_binary_dc = self.seg_loss_0_bin(seg_binary_output_0, target_0)\n \n loss_seg_0 = self.seg_loss_0_mul(seg_output_0, target[0])\n loss_seg_deep = self.seg_loss_deep(output[1:], target[1:])\n\n seg_loss_weight_0 = self.seg_loss_weights[0]\n seg_loss_weight_deep = self.seg_loss_weights[1:]\n \n results_prob = torch.softmax(seg_binary_output_0, 1)\n seg_pre = torch.argmax(results_prob, dim=1)\n skel_pred_binary = self.skeletonization_module_binary(seg_pre.unsqueeze(1).float()).squeeze(1)\n skel_true = skel_target_0.detach().squeeze(1)\n y_pred_binary = torch.where(seg_pre > 0, 1, 0)\n y_true = target_0.detach().squeeze(1)\n\n results_prob_fore = torch.softmax(seg_output_0, 1)\n seg_pre_fore = torch.argmax(results_prob_fore, dim=1)\n y_pred_multi = torch.where(seg_pre_fore > 0, 1, 0)\n #skel_pred_multi = self.skeletonization_module_multi(y_pred_multi.unsqueeze(1).float()).squeeze(1)\n skel_pred_multi = skel_pred_binary\n\n self.smooth = 1e-3\n Batch = y_true.shape[0]\n H, W, D = y_true.shape[1], y_true.shape[2], y_true.shape[3]\n\n if loss_seg_0 > 0.3:\n radii_weights_1_R2_true_multi = skel_true.float()\n radii_weights_1_R2_pred_multi = skel_pred_multi.float()\n radii_weights_1_R2_pred_binary = skel_pred_binary.float()\n radii_weights_1_R2_true_binary = skel_true.float()\n radii_weights_1_R_true_multi = skel_true.float()\n radii_weights_1_R_pred_multi = skel_pred_multi.float()\n radii_weights_1_R_pred_binary = skel_pred_binary.float()\n radii_weights_1_R_true_binary = skel_true.float()\n radii_weights_1_true_multi = skel_true.float()\n radii_weights_1_pred_multi = skel_pred_multi.float()\n radii_weights_1_pred_binary = skel_pred_binary.float()\n radii_weights_1_true_binary = skel_true.float()\n y_true_dist_map_norm = y_true.float()\n y_pred_multi_dist_map_norm = y_pred_multi.float()\n y_pred_binary_dist_map_norm = y_pred_binary.float()\n else:\n radii_weights_1_R2_true_multi = torch.zeros_like(skel_true).float()\n radii_weights_1_R2_pred_multi = torch.zeros_like(skel_pred_multi).float()\n radii_weights_1_R2_pred_binary = torch.zeros_like(skel_pred_binary).float()\n radii_weights_1_R_true_multi = torch.zeros_like(skel_true).float()\n radii_weights_1_R_pred_multi = torch.zeros_like(skel_pred_multi).float()\n radii_weights_1_R_pred_binary = torch.zeros_like(skel_pred_binary).float()\n radii_weights_1_true_multi = torch.zeros_like(skel_true).float()\n radii_weights_1_pred_multi = torch.zeros_like(skel_pred_multi).float()\n radii_weights_1_pred_binary = torch.zeros_like(skel_pred_binary).float()\n y_true_dist_map_norm = torch.zeros_like(y_true).float()\n y_pred_multi_dist_map_norm = torch.zeros_like(y_pred_multi).float()\n y_pred_binary_dist_map_norm = torch.zeros_like(y_pred_binary).float()\n\n for b_i in range(Batch):\n y_true_dist_map_norm[b_i], radii_weights_1_R2_true_multi[b_i], radii_weights_1_R_true_multi[b_i], radii_weights_1_true_multi[b_i] = self.get_radius_weights(y_true[b_i], skel_true[b_i], H, W, D)\n y_pred_multi_dist_map_norm[b_i], radii_weights_1_R2_pred_multi[b_i], radii_weights_1_R_pred_multi[b_i], radii_weights_1_pred_multi[b_i] = self.get_radius_weights(y_pred_multi[b_i], skel_pred_multi[b_i], H, W, D)\n y_pred_binary_dist_map_norm[b_i], radii_weights_1_R2_pred_binary[b_i], radii_weights_1_R_pred_binary[b_i], radii_weights_1_pred_binary[b_i] = self.get_radius_weights(y_pred_binary[b_i], skel_pred_binary[b_i], H, W, D)\n\n radii_weights_1_R_true_binary = radii_weights_1_R_true_multi\n radii_weights_1_R2_true_binary = radii_weights_1_R2_true_multi\n radii_weights_1_true_binary = radii_weights_1_true_multi\n \n weighted_tprec = (torch.sum(torch.multiply(radii_weights_1_R2_pred_binary, y_true_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_pred_binary, radii_weights_1_true_binary, radii_weights_1_R2_pred_binary))+self.smooth)\n weighted_tsens = (torch.sum(torch.multiply(radii_weights_1_R2_true_binary, y_pred_binary_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_true_binary, radii_weights_1_pred_binary, radii_weights_1_R2_true_binary))+self.smooth) \n skel_cl_dice = - 2.0 * (weighted_tprec * weighted_tsens) / (weighted_tprec + weighted_tsens)\n print(\"skel cl_dice: \", skel_cl_dice)\n\n weighted_tprec = (torch.sum(torch.multiply(radii_weights_1_R2_pred_multi, y_true_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_pred_multi, radii_weights_1_true_multi, radii_weights_1_R2_pred_multi))+self.smooth)\n weighted_tsens = (torch.sum(torch.multiply(radii_weights_1_R2_true_multi, y_pred_multi_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_true_multi, radii_weights_1_pred_multi, radii_weights_1_R2_true_multi))+self.smooth)\n seg_cl_dice = - 2.0 * (weighted_tprec * weighted_tsens) / (weighted_tprec + weighted_tsens)\n print(\"seg_output cl_dice: \", seg_cl_dice)\n\n loss_skel_dc_cldc = loss_binary_dc + 0.5 * skel_cl_dice\n\n loss_0_dc_cldc = loss_seg_0 + 0.5 * seg_cl_dice\n loss_seg_deep_dc_cldc = loss_seg_deep + np.sum(seg_loss_weight_deep) * 0.5 * seg_cl_dice\n\n l = loss_seg_deep_dc_cldc + seg_loss_weight_0 * loss_0_dc_cldc + seg_loss_weight_0 * loss_skel_dc_cldc\n print(\"loss_seg_deep_dc_cldc, loss_0_dc_cldc, loss_skel_dc_cldc, l: \", loss_seg_deep_dc_cldc, loss_0_dc_cldc, loss_skel_dc_cldc, l)\n\n if self.grad_scaler is not None:\n self.grad_scaler.scale(l).backward()\n self.grad_scaler.unscale_(self.optimizer)\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.grad_scaler.step(self.optimizer)\n self.grad_scaler.update()\n else:\n l.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.optimizer.step()\n \n return {'loss': l.detach().cpu().numpy()}\n\n def validation_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n data = data.to(self.device, non_blocking=True)\n\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n self.optimizer.zero_grad()\n\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n \n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n target_0 = target[0].clone()\n target_0[target_0 > 0] = 1\n skel_target_0 = self.skeletonization_module(target_0.float())\n\n output = self.network(data)\n output_0 = output[0]\n\n seg_binary_output_0 = output_0[:, -2:, :, :, :]\n seg_output_0 = output_0[:, :-2, :, :, :]\n\n loss_binary_dc = self.seg_loss_0_bin(seg_binary_output_0, target_0)\n \n loss_seg_0 = self.seg_loss_0_mul(seg_output_0, target[0])\n loss_seg_deep = self.seg_loss_deep(output[1:], target[1:])\n\n seg_loss_weight_0 = self.seg_loss_weights[0]\n seg_loss_weight_deep = self.seg_loss_weights[1:]\n \n results_prob = torch.softmax(seg_binary_output_0, 1)\n seg_pre = torch.argmax(results_prob, dim=1)\n skel_pred_binary = self.skeletonization_module_binary(seg_pre.unsqueeze(1).float()).squeeze(1)\n skel_true = skel_target_0.detach().squeeze(1)\n y_pred_binary = torch.where(seg_pre > 0, 1, 0)\n y_true = target_0.detach().squeeze(1)\n\n results_prob_fore = torch.softmax(seg_output_0, 1)\n seg_pre_fore = torch.argmax(results_prob_fore, dim=1)\n y_pred_multi = torch.where(seg_pre_fore > 0, 1, 0)\n skel_pred_multi = self.skeletonization_module_multi(y_pred_multi.unsqueeze(1).float()).squeeze(1)\n\n self.smooth = 1e-3\n Batch = y_true.shape[0]\n H, W, D = y_true.shape[1], y_true.shape[2], y_true.shape[3]\n\n if loss_seg_0 > 0.3:\n radii_weights_1_R2_true_multi = skel_true.float()\n radii_weights_1_R2_pred_multi = skel_pred_multi.float()\n radii_weights_1_R2_pred_binary = skel_pred_binary.float()\n radii_weights_1_R2_true_binary = skel_true.float()\n radii_weights_1_R_true_multi = skel_true.float()\n radii_weights_1_R_pred_multi = skel_pred_multi.float()\n radii_weights_1_R_pred_binary = skel_pred_binary.float()\n radii_weights_1_R_true_binary = skel_true.float()\n radii_weights_1_true_multi = skel_true.float()\n radii_weights_1_pred_multi = skel_pred_multi.float()\n radii_weights_1_pred_binary = skel_pred_binary.float()\n radii_weights_1_true_binary = skel_true.float()\n y_true_dist_map_norm = y_true.float()\n y_pred_multi_dist_map_norm = y_pred_multi.float()\n y_pred_binary_dist_map_norm = y_pred_binary.float()\n else:\n radii_weights_1_R2_true_multi = torch.zeros_like(skel_true).float()\n radii_weights_1_R2_pred_multi = torch.zeros_like(skel_pred_multi).float()\n radii_weights_1_R2_pred_binary = torch.zeros_like(skel_pred_binary).float()\n radii_weights_1_R_true_multi = torch.zeros_like(skel_true).float()\n radii_weights_1_R_pred_multi = torch.zeros_like(skel_pred_multi).float()\n radii_weights_1_R_pred_binary = torch.zeros_like(skel_pred_binary).float()\n radii_weights_1_true_multi = torch.zeros_like(skel_true).float()\n radii_weights_1_pred_multi = torch.zeros_like(skel_pred_multi).float()\n radii_weights_1_pred_binary = torch.zeros_like(skel_pred_binary).float()\n y_true_dist_map_norm = torch.zeros_like(y_true).float()\n y_pred_multi_dist_map_norm = torch.zeros_like(y_pred_multi).float()\n y_pred_binary_dist_map_norm = torch.zeros_like(y_pred_binary).float()\n \n for b_i in range(Batch):\n y_true_dist_map_norm[b_i], radii_weights_1_R2_true_multi[b_i], radii_weights_1_R_true_multi[b_i], radii_weights_1_true_multi[b_i] = self.get_radius_weights(y_true[b_i], skel_true[b_i], H, W, D)\n y_pred_multi_dist_map_norm[b_i], radii_weights_1_R2_pred_multi[b_i], radii_weights_1_R_pred_multi[b_i], radii_weights_1_pred_multi[b_i] = self.get_radius_weights(y_pred_multi[b_i], skel_pred_multi[b_i], H, W, D)\n y_pred_binary_dist_map_norm[b_i], radii_weights_1_R2_pred_binary[b_i], radii_weights_1_R_pred_binary[b_i], radii_weights_1_pred_binary[b_i] = self.get_radius_weights(y_pred_binary[b_i], skel_pred_binary[b_i], H, W, D)\n\n radii_weights_1_R_true_binary = radii_weights_1_R_true_multi\n radii_weights_1_R2_true_binary = radii_weights_1_R2_true_multi\n radii_weights_1_true_binary = radii_weights_1_true_multi\n \n weighted_tprec = (torch.sum(torch.multiply(radii_weights_1_R2_pred_binary, y_true_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_pred_binary, radii_weights_1_true_binary, radii_weights_1_R2_pred_binary))+self.smooth)\n weighted_tsens = (torch.sum(torch.multiply(radii_weights_1_R2_true_binary, y_pred_binary_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_true_binary, radii_weights_1_pred_binary, radii_weights_1_R2_true_binary))+self.smooth) \n skel_cl_dice = - 2.0 * (weighted_tprec * weighted_tsens) / (weighted_tprec + weighted_tsens)\n print(\"skel cl_dice: \", skel_cl_dice)\n\n weighted_tprec = (torch.sum(torch.multiply(radii_weights_1_R2_pred_multi, y_true_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_pred_multi, radii_weights_1_true_multi, radii_weights_1_R2_pred_multi))+self.smooth)\n weighted_tsens = (torch.sum(torch.multiply(radii_weights_1_R2_true_multi, y_pred_multi_dist_map_norm))+self.smooth)/(torch.sum(self.combine_tensors(radii_weights_1_R_true_multi, radii_weights_1_pred_multi, radii_weights_1_R2_true_multi))+self.smooth)\n seg_cl_dice = - 2.0 * (weighted_tprec * weighted_tsens) / (weighted_tprec + weighted_tsens)\n print(\"seg_output cl_dice: \", seg_cl_dice)\n\n loss_skel_dc_cldc = loss_binary_dc + 0.5 * skel_cl_dice\n\n if torch.sum(skel_true) > 0 and torch.sum(skel_pred_multi) == 0:\n cl_dice = 0\n else:\n cl_dice = -seg_cl_dice.detach().cpu().numpy()\n\n loss_0_dc_cldc = loss_seg_0 + 0.5 * seg_cl_dice\n loss_seg_deep_dc_cldc = loss_seg_deep + np.sum(seg_loss_weight_deep) * 0.5 * seg_cl_dice\n\n l = loss_seg_deep_dc_cldc + seg_loss_weight_0 * loss_0_dc_cldc + seg_loss_weight_0 * loss_skel_dc_cldc\n print(\"loss_seg_deep_dc_cldc, loss_0_dc_cldc, loss_skel_dc_cldc, l: \", loss_seg_deep_dc_cldc, loss_0_dc_cldc, loss_skel_dc_cldc, l)\n \n target = target[0]\n output = seg_output_0\n # the following is needed for online evaluation. Fake dice (green line)\n axes = [0] + list(range(2, len(output.shape)))\n\n if self.label_manager.has_regions:\n predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long()\n else:\n # no need for softmax\n output_seg = output.argmax(1)[:, None]\n predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32)\n predicted_segmentation_onehot.scatter_(1, output_seg, 1)\n del output_seg\n\n if self.label_manager.has_ignore_label:\n if not self.label_manager.has_regions:\n mask = (target != self.label_manager.ignore_label).float()\n # CAREFUL that you don't rely on target after this line!\n target[target == self.label_manager.ignore_label] = 0\n else:\n mask = 1 - target[:, -1:]\n # CAREFUL that you don't rely on target after this line!\n target = target[:, :-1]\n else:\n mask = None\n\n tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask)\n\n tp_hard = tp.detach().cpu().numpy()\n fp_hard = fp.detach().cpu().numpy()\n fn_hard = fn.detach().cpu().numpy()\n if not self.label_manager.has_regions:\n # if we train with regions all segmentation heads predict some kind of foreground. In conventional\n # (softmax training) there needs tobe one output for the background. We are not interested in the\n # background Dice\n # [1:] in order to remove background\n tp_hard = tp_hard[1:]\n fp_hard = fp_hard[1:]\n fn_hard = fn_hard[1:]\n\n return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard, 'cl_dice': cl_dice}\n\n def on_validation_epoch_end(self, val_outputs: List[dict]):\n outputs_collated = collate_outputs(val_outputs)\n tp = np.sum(outputs_collated['tp_hard'], 0)\n fp = np.sum(outputs_collated['fp_hard'], 0)\n fn = np.sum(outputs_collated['fn_hard'], 0)\n mean_fg_cl_dice = np.mean(outputs_collated['cl_dice'])\n \n if self.is_ddp:\n world_size = dist.get_world_size()\n\n tps = [None for _ in range(world_size)]\n dist.all_gather_object(tps, tp)\n tp = np.vstack([i[None] for i in tps]).sum(0)\n\n fps = [None for _ in range(world_size)]\n dist.all_gather_object(fps, fp)\n fp = np.vstack([i[None] for i in fps]).sum(0)\n\n fns = [None for _ in range(world_size)]\n dist.all_gather_object(fns, fn)\n fn = np.vstack([i[None] for i in fns]).sum(0)\n\n losses_val = [None for _ in range(world_size)]\n dist.all_gather_object(losses_val, outputs_collated['loss'])\n loss_here = np.vstack(losses_val).mean()\n else:\n loss_here = np.mean(outputs_collated['loss'])\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(tp, fp, fn)]]\n mean_fg_dice = np.nanmean(global_dc_per_class)\n mean_fg_dc_cldc = 0.5 * mean_fg_dice + 0.5 * mean_fg_cl_dice\n self.logger.log('mean_fg_dice', mean_fg_dice, self.current_epoch)\n self.logger.log('mean_fg_cl_dice', mean_fg_cl_dice, self.current_epoch)\n self.logger.log('mean_fg_dc_cldc', mean_fg_dc_cldc, self.current_epoch)\n self.logger.log('dice_per_class_or_region',\n global_dc_per_class, self.current_epoch)\n self.logger.log('val_losses', loss_here, self.current_epoch)\n \n def on_epoch_end(self):\n self.logger.log('epoch_end_timestamps', time(), self.current_epoch)\n\n # todo find a solution for this stupid shit\n self.print_to_log_file('train_loss', np.round(self.logger.my_fantastic_logging['train_losses'][-1], decimals=4))\n self.print_to_log_file('val_loss', np.round(self.logger.my_fantastic_logging['val_losses'][-1], decimals=4))\n self.print_to_log_file('Pseudo dice', [np.round(i, decimals=4) for i in\n self.logger.my_fantastic_logging['dice_per_class_or_region'][-1]])\n self.print_to_log_file('Pseudo cl dice', np.round(self.logger.my_fantastic_logging['mean_fg_cl_dice'][-1], decimals=4))\n self.print_to_log_file('Pseudo dc_cldc', np.round(self.logger.my_fantastic_logging['mean_fg_dc_cldc'][-1], decimals=4))\n self.print_to_log_file(\n f\"Epoch time: {np.round(self.logger.my_fantastic_logging['epoch_end_timestamps'][-1] - self.logger.my_fantastic_logging['epoch_start_timestamps'][-1], decimals=2)} s\")\n\n # handling periodic checkpointing\n current_epoch = self.current_epoch\n if (current_epoch + 1) % self.save_every == 0 and current_epoch != (self.num_epochs - 1):\n self.save_checkpoint(join(self.output_folder, 'checkpoint_latest.pth'))\n\n # handle 'best' checkpointing. ema_fg_dice is computed by the logger and can be accessed like this\n if self._best_ema is None or self.logger.my_fantastic_logging['ema_fg_dc_cldc'][-1] > self._best_ema:\n self._best_ema = self.logger.my_fantastic_logging['ema_fg_dc_cldc'][-1]\n self.print_to_log_file(f\"Yayy! New best EMA pseudo Dice: {np.round(self._best_ema, decimals=4)}\")\n self.save_checkpoint(join(self.output_folder, 'checkpoint_best.pth'))\n\n if self.local_rank == 0:\n self.logger.plot_progress_png(self.output_folder)\n\n self.current_epoch += 1\n\n\n def perform_actual_validation(self, save_probabilities: bool = False):\n self.set_deep_supervision_enabled(False)\n self.network.eval()\n\n predictor = nnUNetPredictor(tile_step_size=0.5, use_gaussian=True, use_mirroring=True,\n perform_everything_on_gpu=True, device=self.device, verbose=False,\n verbose_preprocessing=False, allow_tqdm=False)\n predictor.manual_initialization(self.network, self.plans_manager, self.configuration_manager, None,\n self.dataset_json, self.__class__.__name__,\n self.inference_allowed_mirroring_axes)\n\n with multiprocessing.get_context(\"spawn\").Pool(default_num_processes) as segmentation_export_pool:\n validation_output_folder = join(self.output_folder, 'validation')\n maybe_mkdir_p(validation_output_folder)\n\n # we cannot use self.get_tr_and_val_datasets() here because we might be DDP and then we have to distribute\n # the validation keys across the workers.\n _, val_keys = self.do_split()\n if self.is_ddp:\n val_keys = val_keys[self.local_rank:: dist.get_world_size()]\n\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n\n next_stages = self.configuration_manager.next_stage_names\n\n if next_stages is not None:\n _ = [maybe_mkdir_p(join(self.output_folder_base, 'predicted_next_stage', n)) for n in next_stages]\n\n results = []\n for k in dataset_val.keys():\n proceed = not check_workers_busy(segmentation_export_pool, results,\n allowed_num_queued=2 * len(segmentation_export_pool._pool))\n while not proceed:\n sleep(0.1)\n proceed = not check_workers_busy(segmentation_export_pool, results,\n allowed_num_queued=2 * len(segmentation_export_pool._pool))\n\n self.print_to_log_file(f\"predicting {k}\")\n data, seg, properties = dataset_val.load_case(k)\n\n if self.is_cascaded:\n data = np.vstack((data, convert_labelmap_to_one_hot(seg[-1], self.label_manager.foreground_labels,\n output_dtype=data.dtype)))\n with warnings.catch_warnings():\n # ignore 'The given NumPy array is not writable' warning\n warnings.simplefilter(\"ignore\")\n data = torch.from_numpy(data)\n\n output_filename_truncated = join(validation_output_folder, k)\n\n try:\n prediction = predictor.predict_sliding_window_return_logits(data)\n except RuntimeError:\n predictor.perform_everything_on_gpu = False\n prediction = predictor.predict_sliding_window_return_logits(data)\n predictor.perform_everything_on_gpu = True\n\n prediction = prediction.cpu()\n\n # this needs to go into background processes\n results.append(\n segmentation_export_pool.starmap_async(\n export_prediction_from_logits, (\n (prediction, properties, self.configuration_manager, self.plans_manager,\n self.dataset_json, output_filename_truncated, save_probabilities),\n )\n )\n )\n # for debug purposes\n # export_prediction(prediction_for_export, properties, self.configuration, self.plans, self.dataset_json,\n # output_filename_truncated, save_probabilities)\n\n # if needed, export the softmax prediction for the next stage\n if next_stages is not None:\n for n in next_stages:\n next_stage_config_manager = self.plans_manager.get_configuration(n)\n expected_preprocessed_folder = join(nnUNet_preprocessed, self.plans_manager.dataset_name,\n next_stage_config_manager.data_identifier)\n\n try:\n # we do this so that we can use load_case and do not have to hard code how loading training cases is implemented\n tmp = nnUNetDataset(expected_preprocessed_folder, [k],\n num_images_properties_loading_threshold=0)\n d, s, p = tmp.load_case(k)\n except FileNotFoundError:\n self.print_to_log_file(\n f\"Predicting next stage {n} failed for case {k} because the preprocessed file is missing! \"\n f\"Run the preprocessing for this configuration first!\")\n continue\n\n target_shape = d.shape[1:]\n output_folder = join(self.output_folder_base, 'predicted_next_stage', n)\n output_file = join(output_folder, k + '.npz')\n\n # resample_and_save(prediction, target_shape, output_file, self.plans_manager, self.configuration_manager, properties,\n # self.dataset_json)\n results.append(segmentation_export_pool.starmap_async(\n resample_and_save, (\n (prediction, target_shape, output_file, self.plans_manager,\n self.configuration_manager,\n properties,\n self.dataset_json),\n )\n ))\n\n _ = [r.get() for r in results]\n\n if self.is_ddp:\n dist.barrier()\n\n if self.local_rank == 0:\n metrics = compute_metrics_on_folder(join(self.preprocessed_dataset_folder_base, 'gt_segmentations'),\n validation_output_folder,\n join(validation_output_folder, 'summary.json'),\n self.plans_manager.image_reader_writer_class(),\n self.dataset_json[\"file_ending\"],\n self.label_manager.foreground_regions if self.label_manager.has_regions else\n self.label_manager.foreground_labels,\n self.label_manager.ignore_label, chill=True)\n self.print_to_log_file(\"Validation complete\", also_print_to_console=True)\n self.print_to_log_file(\"Mean Validation Dice: \", (metrics['foreground_mean'][\"Dice\"]), also_print_to_console=True)\n\n self.set_deep_supervision_enabled(True)\n compute_gaussian.cache_clear()\n\n\n","repo_name":"PengchengShi1220/cbDice","sub_path":"nnUNetTrainer/nnUNetTrainer_CB_DICE.py","file_name":"nnUNetTrainer_CB_DICE.py","file_ext":"py","file_size_in_byte":49385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"22631066648","text":"# Factorial\n# DFS의 대표적인 예제 - Stack 자료구조\n# 1 * 2 * 3 ....* (n-1) * n\n\n# 1. 반복적으로 구현한 방식\ndef factorial_normal(n):\n num = 1\n for i in range(1, n+1):\n num *= i\n return num\n\n\n# 2 . 재귀적으로 구현한 방식\ndef factorial_recur(n):\n # 종료 조건\n # 수학적으로 0!, 1!의 값은 1로 같다는 성질을 이용\n # n이 1 이하가 되었을 때 \n if n <= 1: \n return 1\n\n # n! = n * (n-1)!을 그대로 코드로 작성\n return n * factorial_recur(n-1)\n\nprint(factorial_normal(6))\nprint(factorial_recur(6))","repo_name":"kiimy-git/Algorithm_Test","sub_path":"DFS_BFS/Factorial.py","file_name":"Factorial.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70032964153","text":"def plus_one(digits):\n for i in reversed(range(len(digits))):\n reminder = 0\n if digits[i] < 9:\n digits[i] += 1\n return digits\n else:\n # num = digits[i] + 1\n digits[i] = 0\n reminder = 1\n return digits if reminder == 0 else [1] + digits\n\n \nprint(plus_one([9,9,9,9]))","repo_name":"abijoy/leetcode","sub_path":"problems/plus-one-66.py","file_name":"plus-one-66.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17644471207","text":"\r\nimport logging\r\nimport numpy as np\r\nimport random\r\nimport os\r\nimport sys\r\nimport time\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nimport uuid\r\n\r\nfrom dataset import DemoAttrDataset, batchify\r\nfrom exp import Experiment\r\nfrom config import get_args\r\n\r\ndef run_experiment(args, logger):\r\n\r\n # generate a data loader for validation set\r\n if args.do_validation:\r\n eval_loader = DataLoader(\r\n dataset=DemoAttrDataset(\r\n logger=logger, \r\n data_type='valid',\r\n data_path=os.path.join(args.data_path, args.dataset),\r\n task_type=args.task_type,\r\n model_type=args.model_type),\r\n batch_size=args.batch_size,\r\n shuffle=False,\r\n num_workers=args.num_workers,\r\n collate_fn=batchify)\r\n else:\r\n eval_loader = DataLoader(\r\n dataset=DemoAttrDataset(\r\n logger=logger, \r\n data_type='test',\r\n data_path=os.path.join(args.data_path, args.dataset),\r\n task_type=args.task_type,\r\n model_type=args.model_type),\r\n batch_size=args.batch_size,\r\n shuffle=False,\r\n num_workers=args.num_workers,\r\n collate_fn=batchify)\r\n\r\n train_dataset = DemoAttrDataset(\r\n logger=logger,\r\n data_type='train',\r\n data_path=os.path.join(args.data_path, args.dataset),\r\n task_type=args.task_type,\r\n model_type=args.model_type)\r\n train_loader = DataLoader(\r\n dataset=train_dataset,\r\n batch_size=args.batch_size,\r\n shuffle=True,\r\n num_workers=args.num_workers,\r\n collate_fn=batchify)\r\n\r\n exp = Experiment(args, logger)\r\n\r\n max_score = max_loss = stop_cnt = 0\r\n max_macP = max_macR = max_macF1 = max_wP = max_wR = max_wF1 = 0\r\n pre_wR = 0\r\n for epoch in range(args.max_epoch):\r\n logger.info(\"++ Epoch : {} ++ \\n\".format(epoch+1))\r\n\r\n tr_t0 = time.clock()\r\n tr_loss, tr_hm, \\\r\n tr_macP, tr_macR, tr_macF1, tr_wP, tr_wR, tr_wF1 = \\\r\n exp.run_epoch(epoch, train_loader, args.dataset, trainable=True)\r\n tr_t1 = time.clock()\r\n\r\n eval_t0 = time.clock()\r\n eval_loss, eval_hm, \\\r\n eval_macP, eval_macR, eval_macF1, eval_wP, eval_wR, eval_wF1 = \\\r\n exp.run_epoch(epoch, eval_loader, args.dataset, trainable=False)\r\n eval_t1 = time.clock()\r\n\r\n # print training scores\r\n logger.info(\"### Training # Loss={:5.3f}, time:{:5.2}, Hamming={:2.3f}\"\r\n .format(tr_loss, tr_t1-tr_t0, tr_hm))\r\n logger.info(\"# macro - macP:{:2.3f}, macR:{:2.3f}, macF1:{:2.3f}\"\r\n .format(tr_macP, tr_macR, tr_macF1))\r\n logger.info(\"# weighted - wP:{:2.3f}, wR:{:2.3f}, wF1:{:2.3f} \\n\"\r\n .format(tr_wP, tr_wR, tr_wF1))\r\n\r\n # print val/test scores\r\n logger.info(\"%%% Evaluation % Loss={:5.3f}, time:{:5.2}, Hamming={:2.3f}\"\r\n .format(eval_loss, eval_t1-eval_t0, eval_hm))\r\n logger.info(\"% macro - macP:{:2.3f}, macR:{:2.3f}, macF1:{:2.3f}\"\r\n .format(eval_macP, eval_macR, eval_macF1))\r\n logger.info(\"% weighted - wP:{:2.3f}, wR:{:2.3f}, wF1:{:2.3f} \\n\"\r\n .format(eval_wP, eval_wR, eval_wF1))\r\n\r\n # early stop\r\n if max_score < eval_wF1:\r\n max_epoch = epoch+1\r\n max_score = eval_wF1\r\n max_loss = eval_loss\r\n max_hm = eval_hm\r\n max_macP = eval_macP\r\n max_macR = eval_macR\r\n max_macF1 = eval_macF1\r\n max_wP = eval_wP\r\n max_wR = eval_wR\r\n max_wF1 = eval_wF1\r\n #model_params = exp.model.item_emb.weight\r\n stop_cnt = 0\r\n else:\r\n # lr decay\r\n exp.adjust_lr()\r\n stop_cnt += 1\r\n if args.model_type == 'POP': break\r\n\r\n if stop_cnt >= 5 and not args.no_early_stop:\r\n return max_epoch, max_loss, max_hm, \\\r\n max_macP, max_macR, max_macF1, \\\r\n max_wP, max_wR, max_wF1\r\n return max_epoch, max_loss, max_hm, \\\r\n max_macP, max_macR, max_macF1, \\\r\n max_wP, max_wR, max_wF1\r\n\r\n\r\ndef main():\r\n # get all arguments\r\n args = get_args()\r\n\r\n # set random seeds\r\n #np.random.seed(args.rand_seed)\r\n #random.seed(args.rand_seed)\r\n #torch.manual_seed(args.rand_seed)\r\n\r\n # set a logger\r\n model_id = time.strftime(\"%Y%m%d-\") + str(uuid.uuid4())[:8]\r\n formatter = logging.Formatter('%(asctime)s: %(message)s ', '%m/%d/%Y %I:%M:%S %p')\r\n logger = logging.getLogger(model_id)\r\n logger.setLevel(logging.INFO)\r\n streamHandler = logging.StreamHandler()\r\n streamHandler.setFormatter(formatter)\r\n logger.addHandler(streamHandler)\r\n if args.save_log:\r\n fileHandler = logging.FileHandler('./save/log/'+model_id+'.log')\r\n fileHandler.setFormatter(formatter)\r\n logger.addHandler(fileHandler)\r\n logger.info('log file : ./save/log/'+model_id+'.log')\r\n logger.info(args)\r\n\r\n ep, loss, hm, macP, macR, macF1, wP, wR, wF1 = run_experiment(args, logger)\r\n logger.info(\"[Final score - ep:{}] Loss={:5.3f}, Hamming={:2.3f}\"\r\n .format(ep, loss, hm))\r\n logger.info(\"[ macro ] macP:{:2.3f}, macR:{:2.3f}, macF1:{:2.3f}\"\r\n .format(macP, macR, macF1))\r\n logger.info(\"[ weighted ] wP:{:2.3f}, wR:{:2.3f}, wF1:{:2.3f}\"\r\n .format(wP, wR, wF1))\r\n if args.save_log:\r\n logger.info('log file : ./save/log/'+model_id+'.log')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"dmis-lab/demographic-prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"95"} +{"seq_id":"5652701320","text":"from cursive import exception as cursive_exception\nimport subject_store\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom oslo_utils import encodeutils\nfrom oslo_utils import excutils\nimport webob.exc\n\nimport subject.api.policy\nfrom subject.common import exception\nfrom subject.common import trust_auth\nfrom subject.common import utils\nfrom subject.common import wsgi\nimport subject.db\nimport subject.gateway\nfrom subject.i18n import _, _LE, _LI\nimport subject.notifier\n\n\nLOG = logging.getLogger(__name__)\n\nCONF = cfg.CONF\n\n\nclass SubjectDataController(object):\n def __init__(self, db_api=None, store_api=None,\n policy_enforcer=None, notifier=None,\n gateway=None):\n if gateway is None:\n db_api = db_api or subject.db.get_api()\n store_api = store_api or subject_store\n policy = policy_enforcer or subject.api.policy.Enforcer()\n notifier = notifier or subject.notifier.Notifier()\n gateway = subject.gateway.Gateway(db_api, store_api,\n notifier, policy)\n self.gateway = gateway\n\n def _restore(self, subject_repo, subject):\n \"\"\"\n Restore the subject to queued status.\n\n :param subject_repo: The instance of SubjectRepo\n :param subject: The subject will be restored\n \"\"\"\n try:\n if subject_repo and subject:\n subject.status = 'queued'\n subject_repo.save(subject)\n except Exception as e:\n msg = (_LE(\"Unable to restore subject %(subject_id)s: %(e)s\") %\n {'subject_id': subject.subject_id,\n 'e': encodeutils.exception_to_unicode(e)})\n LOG.exception(msg)\n\n def _delete(self, subject_repo, subject):\n \"\"\"Delete the subject.\n\n :param subject_repo: The instance of SubjectRepo\n :param subject: The subject that will be deleted\n \"\"\"\n try:\n if subject_repo and subject:\n subject.status = 'killed'\n subject_repo.save(subject)\n except Exception as e:\n msg = (_LE(\"Unable to delete subject %(subject_id)s: %(e)s\") %\n {'subject_id': subject.subject_id,\n 'e': encodeutils.exception_to_unicode(e)})\n LOG.exception(msg)\n\n @utils.mutating\n def upload(self, req, subject_id, data, size):\n subject_repo = self.gateway.get_repo(req.context)\n subject = None\n refresher = None\n cxt = req.context\n try:\n subject = subject_repo.get(subject_id)\n subject.status = 'saving'\n try:\n if CONF.data_api == 'subject.db.registry.api':\n # create a trust if backend is registry\n try:\n # request user plugin for current token\n user_plugin = req.environ.get('keystone.token_auth')\n roles = []\n # use roles from request environment because they\n # are not transformed to lower-case unlike cxt.roles\n for role_info in req.environ.get(\n 'keystone.token_info')['token']['roles']:\n roles.append(role_info['name'])\n refresher = trust_auth.TokenRefresher(user_plugin,\n cxt.tenant,\n roles)\n except Exception as e:\n LOG.info(_LI(\"Unable to create trust: %s \"\n \"Use the existing user token.\"),\n encodeutils.exception_to_unicode(e))\n\n subject_repo.save(subject, from_state='queued')\n subject.set_data(data, size)\n\n try:\n subject_repo.save(subject, from_state='saving')\n except exception.NotAuthenticated:\n if refresher is not None:\n # request a new token to update an subject in database\n cxt.auth_token = refresher.refresh_token()\n subject_repo = self.gateway.get_repo(req.context)\n subject_repo.save(subject, from_state='saving')\n else:\n raise\n\n try:\n # release resources required for re-auth\n if refresher is not None:\n refresher.release_resources()\n except Exception as e:\n LOG.info(_LI(\"Unable to delete trust %(trust)s: %(msg)s\"),\n {\"trust\": refresher.trust_id,\n \"msg\": encodeutils.exception_to_unicode(e)})\n\n except (subject_store.NotFound,\n exception.SubjectNotFound,\n exception.Conflict):\n msg = (_(\"Subject %s could not be found after upload. \"\n \"The subject may have been deleted during the \"\n \"upload, cleaning up the chunks uploaded.\") %\n subject_id)\n LOG.warn(msg)\n # NOTE(sridevi): Cleaning up the uploaded chunks.\n try:\n subject.delete()\n except exception.SubjectNotFound:\n # NOTE(sridevi): Ignore this exception\n pass\n raise webob.exc.HTTPGone(explanation=msg,\n request=req,\n content_type='text/plain')\n except exception.NotAuthenticated:\n msg = (_(\"Authentication error - the token may have \"\n \"expired during file upload. Deleting subject data for \"\n \"%s.\") % subject_id)\n LOG.debug(msg)\n try:\n subject.delete()\n except exception.NotAuthenticated:\n # NOTE: Ignore this exception\n pass\n raise webob.exc.HTTPUnauthorized(explanation=msg,\n request=req,\n content_type='text/plain')\n except ValueError as e:\n LOG.debug(\"Cannot save data for subject %(id)s: %(e)s\",\n {'id': subject_id,\n 'e': encodeutils.exception_to_unicode(e)})\n self._restore(subject_repo, subject)\n raise webob.exc.HTTPBadRequest(\n explanation=encodeutils.exception_to_unicode(e))\n\n except subject_store.StoreAddDisabled:\n msg = _(\"Error in store configuration. Adding subjects to store \"\n \"is disabled.\")\n LOG.exception(msg)\n self._restore(subject_repo, subject)\n raise webob.exc.HTTPGone(explanation=msg, request=req,\n content_type='text/plain')\n\n except exception.InvalidSubjectStatusTransition as e:\n msg = encodeutils.exception_to_unicode(e)\n LOG.exception(msg)\n raise webob.exc.HTTPConflict(explanation=e.msg, request=req)\n\n except exception.Forbidden as e:\n msg = (\"Not allowed to upload subject data for subject %s\" %\n subject_id)\n LOG.debug(msg)\n raise webob.exc.HTTPForbidden(explanation=msg, request=req)\n\n except exception.NotFound as e:\n raise webob.exc.HTTPNotFound(explanation=e.msg)\n\n except subject_store.StorageFull as e:\n msg = _(\"Subject storage media \"\n \"is full: %s\") % encodeutils.exception_to_unicode(e)\n LOG.error(msg)\n self._restore(subject_repo, subject)\n raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,\n request=req)\n\n except exception.StorageQuotaFull as e:\n msg = _(\"Subject exceeds the storage \"\n \"quota: %s\") % encodeutils.exception_to_unicode(e)\n LOG.error(msg)\n self._restore(subject_repo, subject)\n raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,\n request=req)\n\n except exception.SubjectSizeLimitExceeded as e:\n msg = _(\"The incoming subject is \"\n \"too large: %s\") % encodeutils.exception_to_unicode(e)\n LOG.error(msg)\n self._restore(subject_repo, subject)\n raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,\n request=req)\n\n except subject_store.StorageWriteDenied as e:\n msg = _(\"Insufficient permissions on subject \"\n \"storage media: %s\") % encodeutils.exception_to_unicode(e)\n LOG.error(msg)\n self._restore(subject_repo, subject)\n raise webob.exc.HTTPServiceUnavailable(explanation=msg,\n request=req)\n\n except cursive_exception.SignatureVerificationError as e:\n msg = (_LE(\"Signature verification failed for subject %(id)s: %(e)s\")\n % {'id': subject_id,\n 'e': encodeutils.exception_to_unicode(e)})\n LOG.error(msg)\n self._delete(subject_repo, subject)\n raise webob.exc.HTTPBadRequest(explanation=msg)\n\n except webob.exc.HTTPGone as e:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE(\"Failed to upload subject data due to HTTP error\"))\n\n except webob.exc.HTTPError as e:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE(\"Failed to upload subject data due to HTTP error\"))\n self._restore(subject_repo, subject)\n\n except Exception as e:\n with excutils.save_and_reraise_exception():\n LOG.exception(_LE(\"Failed to upload subject data due to \"\n \"internal error\"))\n self._restore(subject_repo, subject)\n\n def download(self, req, subject_id):\n subject_repo = self.gateway.get_repo(req.context)\n try:\n subject = subject_repo.get(subject_id)\n if subject.status == 'deactivated' and not req.context.is_admin:\n msg = _('The requested subject has been deactivated. '\n 'Subject data download is forbidden.')\n raise exception.Forbidden(message=msg)\n except exception.NotFound as e:\n raise webob.exc.HTTPNotFound(explanation=e.msg)\n except exception.Forbidden as e:\n LOG.debug(\"User not permitted to download subject '%s'\", subject_id)\n raise webob.exc.HTTPForbidden(explanation=e.msg)\n\n return subject\n\n\nclass RequestDeserializer(wsgi.JSONRequestDeserializer):\n\n def upload(self, request):\n try:\n request.get_content_type(('application/octet-stream',))\n except exception.InvalidContentType as e:\n raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg)\n\n subject_size = request.content_length or None\n return {'size': subject_size, 'data': request.body_file}\n\n\nclass ResponseSerializer(wsgi.JSONResponseSerializer):\n\n def download(self, response, subject):\n offset, chunk_size = 0, None\n range_val = response.request.get_content_range()\n\n if range_val:\n # NOTE(flaper87): if not present, both, start\n # and stop, will be None.\n if range_val.start is not None:\n offset = range_val.start\n\n if range_val.stop is not None:\n chunk_size = range_val.stop - offset\n\n response.headers['Content-Type'] = 'application/octet-stream'\n\n try:\n # NOTE(markwash): filesystem store (and maybe others?) cause a\n # problem with the caching middleware if they are not wrapped in\n # an iterator very strange\n response.app_iter = iter(subject.get_data(offset=offset,\n chunk_size=chunk_size))\n except subject_store.NotFound as e:\n raise webob.exc.HTTPNoContent(explanation=e.msg)\n except subject_store.RemoteServiceUnavailable as e:\n raise webob.exc.HTTPServiceUnavailable(explanation=e.msg)\n except (subject_store.StoreGetNotSupported,\n subject_store.StoreRandomGetNotSupported) as e:\n raise webob.exc.HTTPBadRequest(explanation=e.msg)\n except exception.Forbidden as e:\n LOG.debug(\"User not permitted to download subject '%s'\", subject)\n raise webob.exc.HTTPForbidden(explanation=e.msg)\n # NOTE(saschpe): \"response.app_iter = ...\" currently resets Content-MD5\n # (https://github.com/Pylons/webob/issues/86), so it should be set\n # afterwards for the time being.\n if subject.checksum:\n response.headers['Content-MD5'] = subject.checksum\n # NOTE(markwash): \"response.app_iter = ...\" also erroneously resets the\n # content-length\n response.headers['Content-Length'] = str(subject.size)\n\n def upload(self, response, result):\n response.status_int = 204\n\n\ndef create_resource():\n \"\"\"Subject data resource factory method\"\"\"\n deserializer = RequestDeserializer()\n serializer = ResponseSerializer()\n controller = SubjectDataController()\n return wsgi.Resource(controller, deserializer, serializer)\n","repo_name":"laoyigrace/subject","sub_path":"subject/api/v1/subject_data.py","file_name":"subject_data.py","file_ext":"py","file_size_in_byte":13798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39399555555","text":"import csv\nfrom random import *\nimport math\nimport time\n\nheader = ['question title', 'author', 'assignment/topic', 'date posted', 'message content', 'number of reactions for subscribe to this', 'number of reactions for this is important', 'answer 1 content', 'answer 1 number of reactions for this is helpful']\ndata = ['aTitle', 'anAuthor', 'assignment1', 'date123455', 'my message here', '123', '3456', 'here is my answer', '04902393293942']\n\ntitles = ['Harry', 'Ron', 'Hermione', 'Penelope', 'Fred', 'George', 'Luna', 'Pavarti', 'Padma', 'Percy']\n\n\ndef load_words():\n with open('words_dictionary.json') as word_file:\n valid_words = list(word_file.read().split())\n \n return valid_words\n\ndef get_random_word(words):\n random = randint(0, 200000)\n rawWord = words[2*random + 1];\n return rawWord[1: math.floor(len(rawWord) - 2)]\n\ndef generate_phrase(words, numWords):\n s = get_random_word(words)\n for i in range(1, numWords):\n s += \" \" + get_random_word(words)\n return s\n\ndef get_random_author():\n return titles[randint(0, 9)]\n\nif __name__ == '__main__':\n with open('questions.csv', 'w', encoding='UTF8') as f:\n writer = csv.writer(f)\n \n words = load_words()\n\n # write the header\n writer.writerow(header)\n \n for i in range(0, 100000):\n title = generate_phrase(words, 3) \n author = get_random_author()\n assignment = randint(1, 20)\n date = int(time.time()) + randint(-100000, 0) \n message = generate_phrase(words,12)\n numSubscribe = randint(1, 50)\n numImportant = randint(1, 50)\n answer1content = generate_phrase(words, 12)\n numHelpful = randint(1, 50)\n \n data = [title, author, assignment, date, message, numSubscribe, numImportant, answer1content, numHelpful]\n writer.writerow(data)\n","repo_name":"BlakeSanders10497/Project3","sub_path":"Data_Collection/makewordlist.py","file_name":"makewordlist.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40475283995","text":"from mipac.errors import APIError, NoSuchNoteError\nfrom pyrogram import Client, filters\nfrom pyrogram.types import Message\n\nfrom misskey_init import get_misskey_bot\nfrom models.filters import timeline_filter\n\n\n@Client.on_message(\n filters.incoming\n & filters.text\n & filters.reply\n & timeline_filter\n & filters.command(\"delete\")\n)\nasync def delete_command(_: Client, message: Message):\n \"\"\"\n 删除\n \"\"\"\n if not message.reply_to_message:\n return\n if not message.reply_to_message.reply_markup:\n return\n try:\n url = message.reply_to_message.reply_markup.inline_keyboard[0][0].url\n note_id = url.split(\"/\")[-1]\n except (IndexError, AttributeError):\n return\n try:\n misskey_bot = get_misskey_bot(message.from_user.id)\n await misskey_bot.core.api.note.action.delete(note_id)\n await message.reply(\"删除成功\", quote=True)\n except NoSuchNoteError:\n await message.reply(\"该嘟文不存在\", quote=True)\n except APIError as e:\n await message.reply(f\"删除失败 {e}\", quote=True)\n","repo_name":"Xtao-Labs/misskey2telegram","sub_path":"modules/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"28892240699","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.4.2\n# kernelspec:\n# display_name: Python 2\n# language: python\n# name: python2\n# ---\n\n# %%\nimport ipyvolume.pylab as p3\nimport numpy as np\n\n# %%\ns = 1/2**0.5\n# 4 vertices for the tetrahedron\nx = np.array([1., -1, 0, 0])\ny = np.array([0, 0, 1., -1])\nz = np.array([-s, -s, s, s])\n# and 4 surfaces (triangles), where the number refer to the vertex index\ntriangles = [(0, 1, 2), (0, 1, 3), (0, 2, 3), (1,3,2)]\n\n# %%\np3.figure()\n# we draw the tetrahedron\np3.plot_trisurf(x, y, z, triangles=triangles, color='orange')\n# and also mark the vertices\np3.scatter(x, y, z, marker='sphere', color='blue')\np3.xyzlim(-2, 2)\np3.show()\n\n# %%\n\n# f(u, v) -> (u, v, u*v**2)\na = np.arange(-5, 5)\nU, V = np.meshgrid(a, a)\nX = U\nY = V\nZ = X*Y**2\np3.figure()\np3.plot_surface(X, Z, Y, color=\"orange\")\np3.plot_wireframe(X, Z, Y, color=\"black\")\np3.show()\n\n# %%\nX = np.arange(-5, 5, 0.25*1)\nY = np.arange(-5, 5, 0.25*1)\nX, Y = np.meshgrid(X, Y)\nR = np.sqrt(X**2 + Y**2)\nZ = 0.1*np.sin(R)\n\n# %%\n\nfrom matplotlib import cm\ncolormap = cm.coolwarm\nznorm = Z - Z.min()\nznorm /= znorm.ptp()\nznorm.min(), znorm.max()\ncolor = colormap(znorm)\n\n# %%\np3.figure()\nmesh = p3.plot_surface(X, Z, Y, color=color[...,0:3])\np3.show()\n\n# %%\n","repo_name":"underworld-community/quagmire-examples-and-workflows","sub_path":"Examples/Unsupported/_Obsolete_Matplotlib3Dplots.py","file_name":"_Obsolete_Matplotlib3Dplots.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31770067266","text":"# Advent of Code 2018, Day 18\n# (c) blu3r4y\n\nimport numpy as np\n\nOPEN, TREE, LUMBER = 0, 1, 2\nNUMBERS = {'.': OPEN, '|': TREE, '#': LUMBER}\nSYMBOLS = {v: k for k, v in NUMBERS.items()}\n\n# adjacent cell offsets\nGRID_OFFSETS = [(-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0)]\n\n\ndef solve(grid, total_minutes):\n # store hashes of seen grids (key) and at which minute (value)\n backlog = {hash(grid.tostring()): 0}\n\n minute = 0\n while minute < total_minutes:\n\n reference = np.copy(grid)\n for (x, y), cell in np.ndenumerate(grid):\n num_tree = num_symbol(neighbors(x, y, reference), TREE)\n num_lumber = num_symbol(neighbors(x, y, reference), LUMBER)\n\n # apply transformation rules\n if cell == OPEN and num_tree >= 3:\n grid[x, y] = TREE\n elif cell == TREE and num_lumber >= 3:\n grid[x, y] = LUMBER\n elif cell == LUMBER and not (num_lumber > 0 and num_tree > 0):\n grid[x, y] = OPEN\n\n minute += 1\n\n # store the hash of this grid\n key = hash(grid.tostring())\n if key not in backlog:\n backlog[key] = minute\n else:\n # cycle-break forward if we have already seen this grid hash before\n delta = (total_minutes - minute) % (minute - backlog[key])\n minute = total_minutes - delta\n\n # count tree and lumber resources\n return num_symbol(grid.ravel(), TREE) * num_symbol(grid.ravel(), LUMBER)\n\n\ndef num_symbol(cells, symbol):\n return sum(1 for c in cells if c == symbol)\n\n\ndef neighbors(x, y, grid):\n return (grid[x + dxy[0]][y + dxy[1]]\n for dxy in GRID_OFFSETS\n if (0 <= x + dxy[0] < grid.shape[0]) and (0 <= y + dxy[1] < grid.shape[1]))\n\n\ndef parse(lines):\n shape = (len(lines), len(lines[0].strip()))\n grid = np.zeros(shape, dtype=int)\n for x in range(shape[0]):\n for y in range(shape[1]):\n grid[x, y] = NUMBERS[lines[x][y]]\n return grid\n\n\nif __name__ == \"__main__\":\n print(solve(parse(open(r\"../assets/day18_demo.txt\").readlines()), 10))\n print(solve(parse(open(r\"../assets/day18.txt\").readlines()), 10))\n\n print(solve(parse(open(r\"../assets/day18_demo.txt\").readlines()), 1000000000))\n print(solve(parse(open(r\"../assets/day18.txt\").readlines()), 1000000000))\n","repo_name":"blu3r4y/AdventOfCode2018","sub_path":"src/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"22498565445","text":"'''Local Use Settings File'''\ntry:\n from .base import *\nexcept ImportError as e:\n raise Exception(\"A base.py file is required to run this project\")\n\n#from django.utils.translation import gettext_lazy as _\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: don't run with debug turned on in production!\n\nALLOWED_HOSTS += ['127.0.0.1', 'localhost']\n\n# Django Mail Backend Service to Use With Development\nEMAIL_BACKEND = env.str(\"APP_EMAIL_BACKEND\")\nEMAIL_SUBJECT_PREFIX = env.str(\"APP_EMAIL_SUBJECT_PREFIX\")\nDEFAULT_FROM_EMAIL = env.str(\"APP_DEFAULT_FROM_EMAIL\")\nEMAIL_HOST_USER = env.str(\"APP_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env.str(\"APP_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"APP_EMAIL_USE_TLS\")\nEMAIL_PORT = env.int(\"APP_EMAIL_PORT\")\n\nprint('Using locally native server (sqlite3)')\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(RUN_ROOT, 'db.sqlite3'),\n 'CONN_MAX_AGE': 600, # 10 Minutes\n },\n}\n\nINSTALLED_APPS += [\n 'health_check', # required\n 'health_check.db', # stock Django health checkers\n 'health_check.cache',\n]\n\nCORS_ALLOW_CREDENTIALS = False\nCORS_ORIGIN_ALLOW_ALL = True\n# CORS_URLS_REGEX = r'^/api.*$'\n# CORS_ORIGIN_WHITELIST = ('*',)\n# CORS_ORIGIN_WHITELIST = (\n# 'http://localhost:3000',\n# 'http://localhost:8080',\n# )\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_AGE = 1800 # set just 30 minutes to test\nSESSION_SAVE_EVERY_REQUEST = True\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSECURE_SSL_REDIRECT = False\nSESSION_COOKIE_SECURE = False\nCSRF_COOKIE_SECURE = False\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = 'DENY'\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\n\nNOCAPTCHA = True\n# RECAPTCHA_PROXY = 'http://127.0.0.1:8000'\n#RECAPTCHA_PUBLIC_KEY = CONFIG('RECAPTCHA_PUBLIC_KEY')\n#RECAPTCHA_PRIVATE_KEY = CONFIG('RECAPTCHA_PRIVATE_KEY')\n\nBROKER_URL = 'amqp://user:password@localhost:5672/myvhost'","repo_name":"pssalman/django-base","sub_path":"src/conf/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17039051683","text":"\"\"\"\r\nTokenizers\r\nUsage: python tokenizer.py < corpus.txt\r\n\"\"\"\r\n__author__ = \"Pierre Nugues\"\r\n\r\nimport sys\r\nimport regex as re\r\nimport os\r\n\r\ntext = open('Selma.txt').read()\r\ntext2 = \"\"\"hejsan mitt namn är Gustav, vad heter du? Hej! Jag är 34 år gammal och en väldigt glad mäniiska!\r\nÄr du också glad? Det hoppas jag.\"\"\"\r\n\r\n\r\ndef tokenize(text):\r\n \"\"\"uses the nonletters to break the text into words\r\n returns a list of words\"\"\"\r\n # words = re.split('[\\s\\-,;:!?.’\\'«»()–...&‘’“”*—]+', text)\r\n # words = re.split('[^a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’\\-]+', text)\r\n # words = re.split('\\W+', text)\r\n words = re.split('\\P{L}+', text)\r\n words.remove('')\r\n return words\r\n\r\n\r\ndef tokenize2(text):\r\n \"\"\"uses the letters to break the text into words\r\n returns a list of words\"\"\"\r\n # words = re.findall('[a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’\\-]+', text)\r\n # words = re.findall('\\w+', text)\r\n words = re.findall('\\p{L}+', text)\r\n return words\r\n\r\n\r\ndef tokenize3(text):\r\n \"\"\"uses the punctuation and nonletters to break the text into words\r\n returns a list of words\"\"\"\r\n # text = re.sub('[^a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’'()\\-,.?!:;]+', '\\n', text)\r\n # text = re.sub('([,.?!:;)('-])', r'\\n\\1\\n', text)\r\n text = re.sub(r'[^\\p{L}\\p{P}]+', '\\n', text)\r\n text = re.sub(r'(\\p{P})', r'\\n\\1\\n', text)\r\n text = re.sub(r'\\n+', '\\n', text)\r\n return text.split()\r\n\r\n\r\ndef tokenize4(text):\r\n \"\"\"uses the punctuation and symbols to break the text into words\r\n returns a list of words\"\"\"\r\n spaced_tokens = re.sub('([\\p{S}\\p{P}])', r' \\1 ', text)\r\n one_token_per_line = re.sub('\\s+', '\\n', spaced_tokens)\r\n tokens = one_token_per_line.split()\r\n return tokens\r\n\r\n\r\nif __name__ == '__main__':\r\n \"\"\"words = tokenize(text)\r\n for word in words:\r\n print(word)\r\n words = tokenize2(text)\r\n print(words)\"\"\"\r\n # os.system('wc -w Selma.txt')\r\n words = tokenize(text)\r\n print(tokenize(text2))\r\n print(tokenize2(text2))\r\n print(tokenize3(text2))\r\n print(tokenize4(text2))\r\n count = 0\r\n count1 = 0\r\n for word in words:\r\n count1 += 1\r\n if word == 'gick':\r\n count += 1\r\n print(count)\r\n print(count1)\r\n","repo_name":"elt14gfa/EDAN20","sub_path":"Lab2/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5755887082","text":"from flyapi import app\nimport unittest\n\n\"\"\"\nSimple Flask API Server Unit Test\n\nhttps://docs.python.org/3/library/unittest.html#test-discovery\n\"\"\"\n\nclass TheAPITest(unittest.TestCase):\n \"\"\"\n The simplest unit test for the simplest Flask app\n \"\"\"\n def test_api_out(self):\n \"\"\"\n Test getting JSON out from an API\n \"\"\"\n client = app.test_client()\n r = client.get('/example/api/endpoint')\n assert r.status_code==200\n\n\n def test_api_in(self):\n \"\"\"\n Test getting JSON in from an API\n \"\"\"\n client = app.test_client()\n data = {\n 'color':'red'\n }\n r = client.post('/example/api/receiver', data=data)\n assert r.status_code==200\n\n","repo_name":"charlesreid1/flaskadillo","sub_path":"api/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14155689103","text":"import gym\nimport numpy as np\n\nfrom gym.spaces import Box, Discrete\n\n\nclass DiscretizedObservationWrapper(gym.ObservationWrapper):\n def __init__(self, env, n_bins=10, low=None, high=None):\n super().__init__(env)\n assert isinstance(env.observation_space, Box)\n\n low = self.observation_space.low if low is None else low\n high = self.observation_space.high if high is None else high\n\n low = np.array(low)\n high = np.array(high)\n\n self.n_bins = n_bins\n self.val_bins = [np.linspace(l, h, n_bins + 1) for l, h in\n zip(low.flatten(), high.flatten())]\n self.ob_shape = self.observation_space.shape\n\n print(\"New ob space:\", Discrete((n_bins + 1) ** len(low)))\n self.observation_space = Discrete(n_bins ** len(low))\n\n def _convert_to_one_number(self, digits):\n return sum([d * ((self.n_bins + 1) ** i) for i, d in enumerate(digits)])\n\n def observation(self, observation):\n digits = [np.digitize([x], bins)[0]\n for x, bins in zip(observation.flatten(), self.val_bins)]\n return self._convert_to_one_number(digits)\n","repo_name":"lilianweng/deep-reinforcement-learning-gym","sub_path":"playground/utils/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"95"} +{"seq_id":"3682147253","text":"# coding: utf-8\n\n\"\"\" Base graph :\nFICLITDL, E21 Person\n\"\"\"\n\nfrom rdflib import Dataset, URIRef, Literal, Namespace\nfrom rdflib.namespace import RDF, RDFS, XSD, DCTERMS, OWL, PROV\nimport csv\nimport re\n\n# Create an empty Dataset\nd = Dataset()\n\nefrbroo = Namespace('http://erlangen-crm.org/efrbroo/')\necrm = Namespace('http://erlangen-crm.org/current/')\nficlitdl = Namespace('https://w3id.org/ficlitdl/')\nficlitdlo = Namespace('https://w3id.org/ficlitdl/ontology/')\nnp = Namespace('http://www.nanopub.org/nschema#')\nprism = Namespace('http://prismstandard.org/namespaces/basic/2.0/')\npro = Namespace(\"http://purl.org/spar/pro/\")\nseq = Namespace('http://www.ontologydesignpatterns.org/cp/owl/sequence.owl#')\nti = Namespace(\"http://www.ontologydesignpatterns.org/cp/owl/timeinterval.owl#\")\ntvc = Namespace(\"http://www.essepuntato.it/2012/04/tvc/\")\n\n\n\n# Add a namespace prefix to it, just like for Graph\nd.bind('dcterms', DCTERMS)\nd.bind('ecrm', ecrm)\nd.bind(\"efrbroo\", efrbroo)\nd.bind('ficlitdl', ficlitdl)\nd.bind('ficlitdlo', ficlitdlo)\nd.bind('np', np)\nd.bind('ficlitdl-np', URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base/'))\nd.bind(\"owl\", OWL)\nd.bind('prov', PROV)\nd.bind(\"pro\", pro)\nd.bind('rdfs', RDFS)\nd.bind('prism', prism)\nd.bind('seq', seq)\nd.bind('ti', ti)\nd.bind('tvc', tvc)\n\n\n# Declare a base URI for the Giuseppe Raimondi Fonds \nbase_uri = 'https://w3id.org/giuseppe-raimondi-lod/'\n\n# Declare a URI for the nanopub\nnanopub = URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base/')\n\n# Declare a Graph URI to be used to identify a Graph\ngraph_base = URIRef(nanopub + 'assertion')\n\n# Add an empty Graph, identified by graph_base, to the Dataset\nd.graph(identifier=graph_base)\n\nwith open('../../input/quaderni.csv', mode='r') as csv_file:\n\tcsv_reader = csv.DictReader(csv_file , delimiter=';')\n\tfor row in csv_reader:\n\n\t\tinventario = re.findall('(.+?) *$', row['\\ufeffInventario'])\n\t\tsezione = row['Sezione']\n\t\tcollocazione = row['Collocazione']\n\t\tspecificazione = row['Specificazione']\n\t\tsequenza = row['Sequenza']\n\t\tidentificativo = row['Id.']\n\t\tdescrizione_isbd = row['Descrizione isbd']\n\t\tlegami = re.findall(\"(.+?) *$\", row[\"Legami con titoli superiori o supplementi\"])\n\t\t\n\t\trecord = URIRef(base_uri + 'notebook/' + inventario[0].lower().replace(' ', '') + '/')\n\n\t\t# physical notebook URI\n\n\t\trec_object = URIRef(record + 'object')\n \t\t\n \t\t# expression URI\n\n\t\trec_expression = URIRef(record + 'text')\n\n\t\t# person URI\n\n\t\tperson = URIRef('https://w3id.org/ficlitdl/' + 'person/') \n\n\n\n# Add quads to base-graph\n\n\t\t# Nanopublication\n\t\td.add((URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base'), RDF.type, np.Nanopublication, URIRef(nanopub + 'head')))\n\t\td.add((URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base'), np.hasAssertion, URIRef(nanopub + 'assertion'), URIRef(nanopub + 'head')))\n\t\td.add((URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base'), np.hasProvenance, URIRef(nanopub + 'provenance'), URIRef(nanopub + 'head')))\n\t\td.add((URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base'), np.hasPublicationInfo, URIRef(nanopub + 'pubinfo'), URIRef(nanopub + 'head')))\n\n\t\t# Provenance of the assertions\n\t\td.add((URIRef(nanopub + 'assertion'), PROV.generatedAtTime, Literal('1993-03' , datatype=XSD.date), URIRef(nanopub + 'provenance')))\n\t\td.add((URIRef(nanopub + 'assertion'), PROV.wasAttributedTo, URIRef('https://w3id.org/ficlitdl/org/sab-ero'), URIRef(nanopub + 'provenance')))\n\n\t\t# Publication info\n\t\td.add((URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base'), PROV.generatedAtTime, Literal('2022-02-28' , datatype=XSD.date), URIRef(nanopub + 'pubinfo')))\n\t\td.add((URIRef('https://w3id.org/ficlitdl/nanopub/nanopub-base'), PROV.wasAttributedTo, URIRef('https://orcid.org/0000-0001-6007-9118'), URIRef(nanopub + 'pubinfo')))\n\n\n\t\t###########################\n\t\t# #\n\t\t# Giuseppe Raimondi #\n\t\t# #\n\t\t###########################\n\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), RDF.type, ecrm.E21_Person, graph_base))\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), RDFS.label, Literal('Giuseppe Raimondi' , lang='it'), graph_base))\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), RDFS.label, Literal('Giuseppe Raimondi' , lang='en'), graph_base))\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), OWL.sameAs, URIRef('http://viaf.org/viaf/7457679'), graph_base))\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), OWL.sameAs, URIRef('https://www.worldcat.org/identities/lccn-n79021749'), graph_base))\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), OWL.sameAs, URIRef('https://www.wikidata.org/wiki/Q3771293'), graph_base))\n\t\td.add((URIRef(person + 'giuseppe-raimondi'), pro.holdsRoleInTime, URIRef(rec_expression + '/author'), graph_base))\n\n\n# persone menzionate nella descrizione isbd (persone menzionate nel testo TODO con trascrizioni)\n\nmy_dict = {}\n\nwith open('../../input/quaderni_ner_person.tsv', mode='r') as csv_file:\n\tcsv_reader = csv.DictReader(csv_file , delimiter='\\t')\n\tfor row in csv_reader:\n\n\t\tinventario = row[\"inventario\"].lower().replace(' ', '')\n\t\twd = row[\"wikidata\"]\n\t\twd_code = row[\"wikidata_code\"]\n\n\t\tmy_dict[inventario] = list()\n\t\tmy_dict[inventario].append((wd, wd_code))\n\n\t\trecord = URIRef(base_uri + 'notebook/' + inventario + '/')\n\n \t\t# expression URI\n\t\trec_expression = URIRef(record + 'text')\n\n\t\t# person URI\n\t\tperson = URIRef('https://w3id.org/ficlitdl/' + 'person/')\n\n\t\tfor item in my_dict[inventario]:\n\t\t\tmentioned_person = URIRef(person + item[0].lower().replace(' ', '-').replace('.', '').replace(',', '').replace('è', 'e').replace('é', 'e').replace('à', 'a').replace('á', 'a').replace('ö', 'o').replace('ç', 'c'))\n\t\t\td.add((mentioned_person, RDF.type, ecrm.E21_Person, graph_base))\n\t\t\td.add((mentioned_person, RDFS.label, Literal(item[0], lang='it'), graph_base))\n\t\t\td.add((mentioned_person, RDFS.label, Literal(item[0], lang='en'), graph_base))\n\t\t\tif 'Q' in item[1]:\n\t\t\t\td.add((mentioned_person, OWL.sameAs, URIRef('https://www.wikidata.org/wiki/' + item[1]), graph_base))\n\t\t\telif 'viaf' in item[1]:\n\t\t\t\td.add((mentioned_person, OWL.sameAs, URIRef(item[1]), graph_base))\n\n\n\n\n\n# TriG\nd.serialize(destination=\"../../dataset/trig/quaderni_base-graph-E21.trig\", format='trig')\n\n# N-Quads\nd.serialize(destination=\"../../dataset/nquads/quaderni_base-graph-E21.nq\", format='nquads')","repo_name":"fgiovannetti/giuseppe-raimondi-lod","sub_path":"scripts/quaderni/base-graph-E21.py","file_name":"base-graph-E21.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"37712261197","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 6 22:05:43 2018\n\n\n@author: Ringo\n\"\"\"\nimport plotly\nimport plotly.graph_objs as go\nfrom helpers import build_info_arrow, is_fiction, black_rgba, white_rgba\n\nfic_color = 'rgba(51, 107, 135, 1)' # orange\nnonfic_color = 'rgba(255, 120, 80, 1)' # blue\n\ndef arrow(books):\n '''\n Builds a chart with vertical arrows pointing from my rating to average\n user rating. Arrows are colored by fiction/non-fiction and ordered in\n the x dimension by date read at.\n '''\n books.sort(key=lambda x: x['read_at'])\n x = [i+1 for i in range(len(books))]\n my_ratings = [book['rating'] for book in books]\n book_info = [build_info_arrow(book) for book in books]\n shapes = []\n build_arrows(books, shapes)\n trace_me = build_scatter(x, my_ratings, book_info, 'circle', white_rgba())\n trace_legend = build_legend()\n trace_avgs = build_avg_ratings(books)\n data = [trace_me, trace_legend]\n data.extend(trace_avgs)\n layout = go.Layout(\n title='My Ratings vs. Average User Ratings',\n xaxis=build_axis(0, 71, None, {}),\n yaxis=build_axis(\n 2,\n 5,\n title='Rating',\n titlefont=dict(\n family='Open Sans, monospace',\n size=14,\n color='rgba(0, 0, 0, 1)',\n )\n ),\n font=dict(\n family='Open Sans, monospace',\n ),\n showlegend=False,\n shapes=shapes,\n )\n fig = go.Figure(\n data=data,\n layout=layout,\n )\n fig = plotly.offline.plot(fig, filename='arrows.html')\n\ndef build_avg_ratings(books):\n '''\n Build average rating traces separately because we need the marker \n symbols to be either triangle up or triangle down and either fic_color\n or nonfic_color which gives four combinations. Just use four traces.\n '''\n x_avg = {\n \"up\": {\"fic\": [], \"nonfic\": []}, \n \"down\": {\"fic\": [], \"nonfic\": []},\n }\n ratings_avg = {\n \"up\": {\"fic\": [], \"nonfic\": []}, \n \"down\": {\"fic\": [], \"nonfic\": []},\n }\n for i in range(len(books)):\n book = books[i]\n relevant_x, relevant_rating = None, None\n if book['rating'] > book['average_rating']:\n relevant_x = x_avg['down']\n relevant_rating = ratings_avg['down']\n if book['rating'] < book['average_rating']:\n relevant_x = x_avg['up']\n relevant_rating = ratings_avg['up']\n if is_fiction(book['genre']):\n relevant_x = relevant_x['fic']\n relevant_rating = relevant_rating['fic']\n if not is_fiction(book['genre']):\n relevant_x = relevant_x['nonfic']\n relevant_rating = relevant_rating['nonfic']\n relevant_x.append(i+1)\n relevant_rating.append(book['average_rating'])\n trace_avg_up_nonfic = build_scatter(\n x_avg['up']['nonfic'], \n ratings_avg['up']['nonfic'], \n None, \n 'triangle-up',\n nonfic_color[:-2] + '.5)',\n )\n trace_avg_up_fic = build_scatter(\n x_avg['up']['fic'], \n ratings_avg['up']['fic'], \n None, \n 'triangle-up',\n fic_color[:-2] + '.5)',\n )\n trace_avg_down_nonfic = build_scatter(\n x_avg['down']['nonfic'], \n ratings_avg['down']['nonfic'], \n None, \n 'triangle-down',\n nonfic_color,\n )\n trace_avg_down_fic = build_scatter(\n x_avg['down']['fic'], \n ratings_avg['down']['fic'], \n None, \n 'triangle-down',\n fic_color,\n )\n return [trace_avg_up_nonfic, trace_avg_up_fic, trace_avg_down_nonfic, \\\n trace_avg_down_fic]\ndef build_legend():\n '''\n Builds a legend using a scatter plot data trace.\n '''\n return go.Scatter(\n x=[15, 15],\n y=[3, 2.9],\n marker = dict(\n size = [10, 10],\n color = [fic_color, nonfic_color],\n ),\n mode='markers+text',\n name='Markers and Text',\n text=['Fiction', 'Non-Fiction'],\n textposition='right',\n textfont=dict(\n family='Open Sans, monospace',\n size=14,\n color=black_rgba(),\n ),\n hoverinfo='none',\n )\n\ndef build_axis(range_min, range_max, title, titlefont):\n '''\n Returns an axis configuration dictionary for use in a plotly layout.\n '''\n return dict(\n range=[range_min, range_max],\n title=title,\n titlefont=titlefont,\n showgrid=False,\n zeroline = False,\n showline=False,\n autotick=True,\n ticks='',\n showticklabels=False\n )\n \ndef build_scatter(x, y, text, symbol, color):\n '''\n Return a data trace with certain parameters set to the input arguments.\n '''\n return go.Scatter(\n x=x,\n y=y,\n mode='markers',\n text=text,\n hoverinfo=\"text\", # this disables the coordinate info\n marker=dict(\n size = 10,\n symbol = symbol,\n color = color,\n ),\n )\n\ndef build_arrows(books, shapes):\n '''\n Construct the arrows using shapes.\n '''\n for i in range(len(books)):\n book = books[i]\n arrow_color = fic_color if is_fiction(book['genre']) else nonfic_color\n # upward arrows have a lower opacity for easier reading of the chart\n if book['rating'] < book['average_rating']:\n arrow_color = arrow_color[:-2] + '.5)'\n shapes.append({\n 'type': 'line',\n 'x0': i+1,\n 'y0': book['average_rating'],\n 'x1': i+1,\n 'y1': book['rating'],\n 'line': {\n 'color': arrow_color,\n 'width': 2,\n },\n })\n\n \n \n \n","repo_name":"cheungringo/reading_challenge_2017","sub_path":"arrow.py","file_name":"arrow.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72602814394","text":"n = int(input(\"Digite a quantidade: \"))\ne = 0\nu = 0\n\nwhile (e != n+1):\n e = e + 1\n while (u != e+1 ):\n print(u * str(e), end = \" \")\n u = u + 1\nprint()","repo_name":"Thiago309/CDD-4.0--Aulas-","sub_path":"Codigos_Python/AULA 06/Exercicio_04.py","file_name":"Exercicio_04.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"23744853153","text":"\"\"\"\nGiven two strings s and t, determine if they are isomorphic.\n\nTwo strings are isomorphic if the characters in s can be replaced to get t.\n\nAll occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character but a character may map to itself.\n\nExample 1:\n Input: s = \"egg\", t = \"add\"\n Output: true\n\nExample 2:\n Input: s = \"foo\", t = \"bar\"\n Output: false\n\nExample 3:\n Input: s = \"paper\", t = \"title\"\n Output: true\n\nNote:\n You may assume both s and t have the same length.\n\"\"\"\n\n\ndef isIsomorphic(s, t): # 28ms, faster than 89.57%.\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n mapping, length = {}, len(s)\n for i in range(length):\n if s[i] not in mapping:\n if t[i] not in mapping.values(): # ***\n mapping[s[i]] = t[i]\n else:\n return False\n elif mapping[s[i]] != t[i]:\n return False\n return True\n\n\ndef isIsomorphic2(s, t): # 28ms, faster than 89.57%. Best solution from the submissions.\n if len(set(s))!=len(set(t)):\n return False\n s_dic={}\n for counter,i in enumerate(s):\n print(counter, i, s[counter], t[counter])\n if i not in s_dic:\n s_dic[i]=t[counter]\n else:\n if s_dic[i]!=t[counter]:\n return False\n return True\n\n\nprint(isIsomorphic(\"egg\", \"add\"))\nprint(isIsomorphic(\"ab\", \"aa\"))\n","repo_name":"XunzhaoYu/LeetCode","sub_path":"easy/205.Isomorphic Strings.py","file_name":"205.Isomorphic Strings.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29431663711","text":"import nonebot\nfrom nonebot.adapters.mirai2 import Adapter as MIRAI2Adapter \n# 初始化 NoneBot\nnonebot.init()\n\n# 注册适配器\ndriver = nonebot.get_driver()\ndriver.register_adapter(MIRAI2Adapter)\n\n# 在这里加载插件\nnonebot.load_builtin_plugins(\"echo\") # 内置插件\nnonebot.load_plugins(\"girlfriend-assistant\\plugins\") \n\nif __name__ == \"__main__\":\n nonebot.run()","repo_name":"li1553770945/girlfriend-assistant","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"72945075514","text":"from django.urls import path\nfrom . import views\n\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\n\nurlpatterns = [\n path(\"restaurants/\", views.RestaurantListCreateView.as_view(), name=\"restaurant\"),\n path(\"menus/\", views.MenuListCreateView.as_view(), name=\"menu\"),\n path(\"votes/\", views.VoteCreateView.as_view(), name=\"vote\"),\n path(\"employees/\", views.EmployeeListCreateView.as_view(), name=\"employee-list\"),\n path(\"token/\", TokenObtainPairView.as_view(), name=\"token_obtain_pair\"),\n path(\"token/refresh/\", TokenRefreshView.as_view(), name=\"token_refresh\"),\n]\n","repo_name":"andwellbas/LunchDecisionAPI","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38022964325","text":"import random\n\n# Definir la clase de Proceso Minero\nclass ProcesoMinero:\n def __init__(self, id_proceso, duracion, beneficio, componentes_40):\n self.id_proceso = id_proceso\n self.duracion = duracion\n self.beneficio = beneficio\n self.componentes_40 = componentes_40\n\n# Definir los procesos mineros\nprocesos_mineros = [\n ProcesoMinero(0, 10, 100, [\"Automatización y control inteligente\"]), # Exploración\n ProcesoMinero(1, 8, 80, [\"Monitoreo en tiempo real\"]), # Perforación\n ProcesoMinero(2, 6, 60, [\"Análisis de datos y big data\"]), # Voladura\n ProcesoMinero(3, 7, 70, []), # Carga\n ProcesoMinero(4, 5, 50, [\"Automatización y control inteligente\"]), # Transporte\n ProcesoMinero(5, 4, 40, []), # Trituración\n ProcesoMinero(6, 6, 60, [\"Automatización y control inteligente\", \"Ciberseguridad\"]), # Separación\n ProcesoMinero(7, 9, 90, []), # Refinación\n ProcesoMinero(8, 7, 70, []), # Desarrollo de infraestructuras\n ProcesoMinero(9, 5, 50, []), # Rehabilitación ambiental\n ProcesoMinero(10, 6, 60, [\"Monitoreo en tiempo real\", \"Análisis de datos y big data\"]), # Proceso adicional 1\n ProcesoMinero(11, 4, 40, [\"Automatización y control inteligente\"]), # Proceso adicional 2\n ProcesoMinero(12, 7, 70, [\"Simulación y modelado\"]), # Proceso adicional 3\n ProcesoMinero(13, 8, 80, [\"Automatización y control inteligente\"]), # Proceso adicional 4\n ProcesoMinero(14, 5, 50, [\"Ciberseguridad\"]) # Proceso adicional 5\n]\n\n# Definir la función de evaluación multiobjetivo\ndef evaluar(cromosoma):\n rendimiento_economico = 0\n beneficio_trabajadores = 0\n duracion_total = 0\n componentes_40_presentes = set()\n for i, gen in enumerate(cromosoma):\n proceso = procesos_mineros[gen]\n duracion_total += proceso.duracion\n rendimiento_economico += proceso.duracion * proceso.beneficio\n beneficio_trabajadores += proceso.beneficio\n componentes_40_presentes.update(proceso.componentes_40)\n if i > 0:\n # Aplicar restricciones adicionales basadas en las relaciones entre los procesos\n proceso_anterior = procesos_mineros[cromosoma[i - 1]]\n if \"Automatización y control inteligente\" in proceso.componentes_40 and \\\n \"Automatización y control inteligente\" not in proceso_anterior.componentes_40:\n rendimiento_economico *= 0.5 # Penalizar si no hay continuidad en la automatización y control inteligente\n return rendimiento_economico, beneficio_trabajadores, duracion_total, list(componentes_40_presentes)\n\n# Definir las operaciones genéticas\ndef seleccion(cromosomas, fitness):\n cromosomas_seleccionados = []\n while len(cromosomas_seleccionados) < len(cromosomas):\n idx = random.randint(0, len(cromosomas) - 1)\n if es_no_dominada(fitness[idx], cromosomas, fitness):\n cromosomas_seleccionados.append(cromosomas[idx])\n return cromosomas_seleccionados\n\ndef es_no_dominada(fitness, cromosomas, fitness_cromosomas):\n for i, cromosoma in enumerate(cromosomas):\n if fitness_cromosomas[i] != fitness:\n if (fitness_cromosomas[i][0] >= fitness[0] and fitness_cromosomas[i][1] >= fitness[1]) or \\\n (fitness_cromosomas[i][0] > fitness[0] and fitness_cromosomas[i][1] > fitness[1]):\n return False\n return True\n\ndef cruza(cromosoma_actual, otro_cromosoma):\n punto_cruza = random.randint(1, len(cromosoma_actual) - 1)\n nuevo_cromosoma = cromosoma_actual[:punto_cruza] + otro_cromosoma[punto_cruza:]\n return nuevo_cromosoma\n\ndef mutacion(cromosoma, prob_mutacion):\n for i in range(len(cromosoma)):\n if random.random() < prob_mutacion:\n nuevo_gen = random.randint(0, len(procesos_mineros) - 1)\n cromosoma[i] = nuevo_gen\n return cromosoma\n\n# Definir los parámetros del algoritmo genético\ntamano_poblacion = 50\nnum_generaciones = 50\nprob_mutacion = 0.1\ntasa_elitismo = 0.1 # Tasa de elitismo del 10%\n\n# Inicializar la población inicial\npoblacion = []\nfor _ in range(tamano_poblacion):\n cromosoma = [random.randint(0, len(procesos_mineros) - 1) for _ in range(len(procesos_mineros))]\n poblacion.append(cromosoma)\n\n# Ejecutar el algoritmo genético\nfor generacion in range(num_generaciones):\n fitness = []\n for cromosoma in poblacion:\n fitness.append(evaluar(cromosoma))\n\n # Obtener las mejores soluciones de la generación actual\n mejores_soluciones = []\n for cromosoma, fit in zip(poblacion, fitness):\n if es_no_dominada(fit, poblacion, fitness):\n mejores_soluciones.append((cromosoma, fit))\n\n # Ordenar las mejores soluciones por su dominancia\n mejores_soluciones.sort(key=lambda x: x[1])\n\n # Obtener las soluciones elitistas\n num_elitismo = int(tamano_poblacion * tasa_elitismo)\n soluciones_elitistas = [sol[0] for sol in mejores_soluciones[:num_elitismo]]\n\n nueva_generacion = []\n\n # Agregar las soluciones elitistas a la nueva generación sin cambios\n nueva_generacion.extend(soluciones_elitistas)\n\n # Generar el resto de la nueva generación mediante selección, cruza y mutación\n while len(nueva_generacion) < tamano_poblacion:\n cromosoma_actual = random.choice(poblacion)\n otro_cromosoma = random.choice(poblacion)\n nuevo_cromosoma = cruza(cromosoma_actual, otro_cromosoma)\n nuevo_cromosoma = mutacion(nuevo_cromosoma, prob_mutacion)\n nueva_generacion.append(nuevo_cromosoma)\n\n poblacion = nueva_generacion\n\n # Obtener el mejor cromosoma de la generación actual y mostrar resultados\n fitness_generacion = [evaluar(cromosoma) for cromosoma in poblacion]\n mejor_cromosoma_generacion = poblacion[fitness_generacion.index(max(fitness_generacion))]\n\n print(\"Generación:\", generacion)\n print(\"Mejor solución encontrada hasta ahora:\")\n print(\"Cromosoma:\", mejor_cromosoma_generacion)\n print(\"Rendimiento Económico:\", evaluar(mejor_cromosoma_generacion)[0])\n print(\"Beneficio Trabajadores:\", evaluar(mejor_cromosoma_generacion)[1])\n print(\"Duración Total:\", evaluar(mejor_cromosoma_generacion)[2])\n print(\"Componentes 4.0 presentes:\", evaluar(mejor_cromosoma_generacion)[3])\n print(\"Fitness:\", max(fitness_generacion, key=lambda x: x[0]))\n print(\"============================================\")\n\n# Obtener el mejor cromosoma de la última generación\nfitness = [evaluar(cromosoma) for cromosoma in poblacion]\nmejor_cromosoma = poblacion[fitness.index(max(fitness))]\nmejor_rendimiento_economico, mejor_beneficio_trabajadores, mejor_duracion_total, componentes_40_presentes = evaluar(mejor_cromosoma)\n\n# Verificar reglas básicas de dominancia\ncromosomas_seleccionados = [] # Variable para almacenar las soluciones seleccionadas\nfor solucion in poblacion:\n es_no_dominante = True\n for otra_solucion in poblacion:\n if otra_solucion != solucion:\n if (otra_solucion[0] >= solucion[0] and otra_solucion[1] >= solucion[1]) or \\\n (otra_solucion[0] > solucion[0] and otra_solucion[1] > solucion[1]):\n es_no_dominante = False\n break\n if es_no_dominante:\n cromosomas_seleccionados.append(solucion) # Agregar la solución no dominada\n\n# Verificar que las soluciones seleccionadas sean no dominadas\nfor solucion in cromosomas_seleccionados:\n for otra_solucion in cromosomas_seleccionados:\n if otra_solucion != solucion:\n assert not ((otra_solucion[0] >= solucion[0] and otra_solucion[1] >= solucion[1]) or \\\n (otra_solucion[0] > solucion[0] and otra_solucion[1] > solucion[1])), \\\n \"Error: una solución dominante fue seleccionada en lugar de una no dominada\"\n\n# Imprimir los resultados\nprint(\"Mejor solución encontrada:\")\nprint(\"Cromosoma:\", mejor_cromosoma)\nprint(\"Rendimiento Económico:\", mejor_rendimiento_economico)\nprint(\"Beneficio Trabajadores:\", mejor_beneficio_trabajadores)\nprint(\"Duración Total:\", mejor_duracion_total)\nprint(\"Componentes 4.0 presentes:\", componentes_40_presentes)\nprint(\"Fitness:\", max(fitness, key=lambda x: x[0]))\n\n\n","repo_name":"gustavoalcantara1993/Algoritmo-genetico-multiobjetivo-python","sub_path":"AG Multiobjetivo FUNCIONANDO.py","file_name":"AG Multiobjetivo FUNCIONANDO.py","file_ext":"py","file_size_in_byte":8200,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74348250871","text":"import broadbean as bb\nfrom os import listdir\nfrom os.path import isfile, join\nfrom pathlib import Path\nimport pathlib\nfrom PyQt5.QtCore import QCoreApplication,Qt\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PyQt5.QtWidgets import QApplication, QWidget, QFrame,QMainWindow, QPushButton, QAction, QMessageBox, QLineEdit, QLabel, QSizePolicy\nfrom PyQt5.QtWidgets import QCheckBox,QDialog,QTableWidget,QTableWidgetItem,QVBoxLayout,QHBoxLayout,QComboBox,QGridLayout\nfrom broadbean.plotting import plotter\nfrom pulsequantum.dftable import QTableWidgetDF\nfrom pulsequantum.annotateshape import annotateshape\nfrom pulsequantum.elem_from_plot import elem_on_plot\nfrom pulsequantum.elem_from_plot import elem_from_lists\n\nnchans=2;\n\nramp = bb.PulseAtoms.ramp # Globally defined ramp, element, and sequence\ngelem = bb.Element()\ngseq = bb.Sequence()\n\n\nclass Gelem():\n def __init__(self, AWG=None, gelem=None, libpath = 'pulselib/'):\n self.gelem = bb.Element()\n self.table = QTableWidgetDF()\n self.awgclock=1.2e9\n self.libpath = join(pathlib.Path(__file__).parents[0], libpath)\n self.seq_files = [f for f in listdir(self.libpath) if isfile(join(self.libpath, f))]\n self.corrDflag=0\n self.w = None\n self.ch_x = None\n self.ch_y = None\n self.ramp = None\n\n def generateElement(self):\n # Make element from pulse table\n self.gelem = bb.Element()\n h = int((self.table.columnCount()-2)/3)\n prevlvl = 0\n v = self.table.rowCount()\n for col in range(2,h+2):\n chno=int(self.table.horizontalHeaderItem(col).text()[2]);\n gp = bb.BluePrint()\n gp.setSR(self.awgclock);\n for row in range(v):\n nm=self.table.verticalHeaderItem(row).text();\n dr=(float(self.table.item(row,0).text()))*1e-6;\n rmp=int(self.table.item(row,1).text());\n lvl=(float(self.table.item(row,col).text()))*self.divider_ch[col-2]*1e-3;\n mkr1=int(self.table.item(row,h+2).text());\n mkr2=int(self.table.item(row,h+3).text());\n if rmp==0:\n gp.insertSegment(row, ramp, (lvl, lvl), name=nm, dur=dr);\n if rmp==1:\n if row==0:\n gp.insertSegment(row, ramp, (0, lvl), name=nm, dur=dr);\n else:\n gp.insertSegment(row, ramp, (prevlvl, lvl), name=nm, dur=dr);\n if mkr1==1:\n gp.setSegmentMarker(nm, (0,dr), 1);\n if mkr2==1:\n gp.setSegmentMarker(nm, (0,dr), 2);\n prevlvl=lvl;\n self.gelem.addBluePrint(chno, gp);\n h=h+2;\n self.gelem.validateDurations();\n\n def coordinates_from_plot(self, id: int) -> None:\n self.ch_x, self.ch_y, self.ramp = elem_on_plot(id)\n\n def elem_from_lists_update_table(self,\n duration: float = 1e-6, dac_a: float = 0, dac_b: float = 0,\n divider_a: float = 1.0, divider_b: float = 1.0,\n SR: float = 1e9,\n chx: int = 1, chy: int = 2) -> None: \n self.gelem = elem_from_lists(self.ch_x, self.ch_y, self.ramp, duration, dac_a, dac_b,\n divider_a, divider_b, SR, chx, chy)\n self.from_element()\n\n#############################################################################################\n# The correction D pulse keeps the centre of gravity of the pulse at the DC value (voltage\n# seen by the same when there is no pulsing. Not always used or needed.\n#############################################################################################\n def correctionD(self):\n if self.corrDflag==1:\n print(\"Correction D pulse already exists.\")\n return;\n self.corrDflag=1;\n awgclockinus=self.awgclock/1e6;\n tottime=0;\n dpos=1;#position of correction D pulse, hardcoded for now\n self.table.addPulse('corrD',dpos);\n #Set D pulse time to 60% of total pulse cycle time\n for row in range(self.table.rowCount()):\n nm=self.table.verticalHeaderItem(row).text();\n if nm!='corrD':\n tottime=tottime+(float(self.table.item(row,0).text()));\n timeD=round(tottime/1.65*(awgclockinus))/awgclockinus;\n self.table.setItem(dpos,0, QTableWidgetItem(\"%f\"%timeD));\n \n #Correct all voltages in a loop\n for column in range(6):\n tottimevolt=0;\n colnm=self.table.horizontalHeaderItem(column).text();\n for row in range(self.table.rowCount()):\n rownm=self.table.verticalHeaderItem(row).text();\n rmp=int(self.table.item(row,1).text());\n if (rownm!='corrD') and (colnm=='CH1' or colnm=='CH2' or colnm=='CH3' or colnm=='CH4'):\n if rmp==0:\n tottimevolt=tottimevolt+((float(self.table.item(row,0).text()))*(float(self.table.item(row,column).text())));\n if rmp==1:\n if row==0:\n tottimevolt=tottimevolt+((float(self.table.item(row,0).text()))*(float(self.table.item(row,column).text()))/2);\n else:\n tottimevolt=tottimevolt+((float(self.table.item(row,0).text()))*((float(self.table.item(row,column).text()))+(float(self.table.item(row-1,column).text())))/2);\n voltD=-tottimevolt/timeD;\n if (column!=0) and (column!=1) and (colnm=='CH1' or colnm=='CH2' or colnm=='CH3' or colnm=='CH4'):\n self.table.setItem(dpos,column, QTableWidgetItem(\"%f\"%voltD));\n \n\n\n############################################################################################ \n\n def write_element(self, path:str,SR:float = 1e9,SeqAmp:float = 10e-3,SeqOffset:float = 0) -> None:\n if self.gelem.SR == None:\n self.gelem.setSR(SR)\n seqtmp = bb.Sequence()\n seqtmp.addElement(1, self.gelem)\n for ch in self.gelem.channels:\n seqtmp.setChannelAmplitude(ch, SeqAmp)\n seqtmp.setChannelOffset(ch, 0)\n seqtmp.setSR(self.gelem.SR)\n seqtmp.write_to_json(path)\n\n def saveElement(self,path: str) -> None:\n #self.gelem.write_to_json(path)\n self.write_element(path)\n self.seq_files = [f for f in listdir(self.libpath) if isfile(join(self.libpath, f))] \n\n\n # From Element\n def from_element(self):\n \n elem_description = self.gelem.description\n seg_name = []\n seg_durations = []\n seg_ramp = []\n values = []\n marker1 = []\n marker2 = []\n chan_names = list(elem_description.keys())\n for chan in chan_names:\n ch_values = []\n channels_marker1 = []\n channels_marker2 = []\n marker1_rel = elem_description[chan]['marker1_rel']\n marker2_rel = elem_description[chan]['marker2_rel']\n seg_mar_list = list(elem_description[chan].keys())\n seg_list = [s for s in seg_mar_list if 'segment' in s]\n for i, seg in enumerate(seg_list):\n seg_digt = elem_description[chan][seg]\n tmp_name = seg_digt['name']\n tmp_durations = seg_digt[\"durations\"]\n if tmp_name not in seg_name:\n seg_name.append(tmp_name)\n seg_durations.append(tmp_durations)\n if seg_digt['arguments']['start'] != seg_digt['arguments']['stop']:\n seg_ramp.append(1)\n else:\n seg_ramp.append(0)\n ch_values.append(seg_digt['arguments']['stop'])\n if marker1_rel[i] == (0,0):\n channels_marker1.append(0)\n else:\n channels_marker1.append(1)\n \n if marker2_rel[i] == (0,0):\n channels_marker2.append(0)\n else:\n channels_marker2.append(1) \n values.append(ch_values)\n marker1.append(channels_marker1)\n marker2.append(channels_marker2)\n \n self.nchans = len(values)\n nsegs = len(values[0])\n\n\n self.table.setColumnCount((self.nchans*3)+2)\n self.table.setRowCount(nsegs)\n \n #Set horizontal headers\n h=self.nchans+1;\n self.table.setHorizontalHeaderItem(0, QTableWidgetItem(\"Time (us)\"));\n self.table.setHorizontalHeaderItem(1, QTableWidgetItem(\"Ramp? 1=Yes\"));\n for i in range(self.nchans): # TODO use the correct channel number as name\n self.table.setHorizontalHeaderItem(i+2, QTableWidgetItem(\"CH{}\".format(chan_names[i])))\n self.table.setHorizontalHeaderItem(h+1, QTableWidgetItem(\"CH{}M1\".format(chan_names[i])))\n self.table.setHorizontalHeaderItem(h+2, QTableWidgetItem(\"CH{}M2\".format(chan_names[i])))\n h=h+2;\n \n #Set vertical headers\n #nlist= seg_name\n for i, name in enumerate(seg_name):\n self.table.setVerticalHeaderItem(i, QTableWidgetItem(name));\n \n \n for seg in range(nsegs):\n duration = str(seg_durations[seg]/1e-6)\n self.table.setItem(seg,0, QTableWidgetItem(duration))\n ramp_yes = str(seg_ramp[seg])\n self.table.setItem(seg,0, QTableWidgetItem(duration))\n self.table.setItem(seg,1, QTableWidgetItem(ramp_yes))\n for ch in range(self.nchans):\n val = str(values[ch][seg]/(self.divider_ch[ch]*1e-3))\n mark1 = str(marker1[ch][seg])\n mark2 = str(marker2[ch][seg])\n self.table.setItem(seg,ch+2, QTableWidgetItem(val))\n self.table.setItem(seg,ch*2+4, QTableWidgetItem(mark1))\n self.table.setItem(seg,ch*2+5, QTableWidgetItem(mark2))\n\n def loadElement(self, path):\n seq = bb.Sequence.init_from_json(path)\n self.gelem = seq.element(1)\n self.from_element()\n self.generateElement() # TODO IS THIS NEEDED \n \n def plotElement(self,plotid,gatex,gatey,channelx,channely,dividerx,dividery):\n #if self.w is None:\n # self.w = PlotWindow(pulse=self.gelem)\n # self.w.show()\n self.generateElement()\n plotter(self.gelem)\n if plotid != 0:\n annotateshape(plotid,gatex,gatey,self.gelem,channelx,channely,dividerx,dividery)\n \n\n#############################################################################################\n\n\n\n\n\n \n ","repo_name":"qdev-dk/pulsequantum","sub_path":"pulsequantum/pulsebuilding.py","file_name":"pulsebuilding.py","file_ext":"py","file_size_in_byte":10754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"14865295019","text":"print('Importing SiSo Wrapper')\r\nimport os\r\nimport cv2 \r\nimport numpy as np\r\n\r\n# IMPORT additional modules\r\nimport sys\r\nimport time\r\nfrom datetime import datetime\r\nimport os\r\n\r\n#tensorflow\r\nimport tensorflow as tf\r\nfrom time import sleep\r\nfrom tensorflow.python.platform import gfile\r\n\r\ntf.compat.v1.disable_eager_execution()\r\n\r\ntry:\r\n sys.path.append(os.path.join(os.environ['SISODIR5'],\r\n 'SDKWrapper/PythonWrapper/python36/lib/'))\r\n sys.path.append(os.path.join(os.environ['SISODIR5'],\r\n 'SDKWrapper/PythonWrapper/python36/bin/'))\r\n\r\n import SiSoPyInterface as s\r\nexcept ImportError:\r\n raise ImportError('SiSo module not loaded successfully')\r\n \r\n\r\n# for \"s.getArrayFrom\", to handle grabbed image as NumPy array\r\nprint('Importing NumPy', end='')\r\nimport numpy as np\r\nprint('Version', np.__version__)\r\n\r\n#%% Kamera Ayarları\r\n\r\ndef getNrOfBoards():\r\n\tnrOfBoards = 0\r\n\t(err, buffer, buflen) = s.Fg_getSystemInformation(None, s.INFO_NR_OF_BOARDS, s.PROP_ID_VALUE, 0)\r\n\tif (err == s.FG_OK):\r\n\t\tnrOfBoards = int(buffer)\r\n \r\n\treturn nrOfBoards\r\n\r\ndef selectBoardDialog():\r\n\tmaxNrOfboards = 10\r\n\tnrOfBoardsFound = 0\r\n\tnrOfBoardsPresent = getNrOfBoards()\r\n\tmaxBoardIndex = -1\r\n\tminBoardIndex = None\r\n\r\n\tif (nrOfBoardsPresent <= 0):\r\n\t\tprint(\"No Boards found!\")\r\n\t\treturn -1\r\n\t\r\n\tprint('Found', nrOfBoardsPresent, 'Board(s)')\r\n\t\r\n\tfor i in range(0, maxNrOfboards):\r\n\t\tskipIndex = False\r\n\t\tboardType = s.Fg_getBoardType(i);\r\n\t\tif boardType == s.PN_MICROENABLE4AS1CL:\r\n\t\t\tboardName = \"MicroEnable IV AS1-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE4AD1CL:\r\n\t\t\tboardName = \"MicroEnable IV AD1-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE4VD1CL:\r\n\t\t\tboardName = \"MicroEnable IV VD1-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE4AD4CL:\r\n\t\t\tboardName = \"MicroEnable IV AD4-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE4VD4CL:\r\n\t\t\tboardName = \"MicroEnable IV VD4-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE4AQ4GE:\r\n\t\t\tboardName = \"MicroEnable IV AQ4-GE\"\r\n\t\telif boardType == s.PN_MICROENABLE4VQ4GE:\r\n\t\t\tboardName = \"MicroEnable IV VQ4-GE\"\r\n\t\telif boardType == s.PN_MICROENABLE5AQ8CXP6B:\r\n\t\t\tboardName = \"MicroEnable V AQ8-CXP\"\r\n\t\telif boardType == s.PN_MICROENABLE5VQ8CXP6B:\r\n\t\t\tboardName = \"MicroEnable V VQ8-CXP\"\r\n\t\telif boardType == s.PN_MICROENABLE5VD8CL:\r\n\t\t\tboardName = \"MicroEnable 5 VD8-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE5AD8CL:\r\n\t\t\tboardName = \"MicroEnable 5 AD8-CL\"\r\n\t\telif boardType == s.PN_MICROENABLE5AQ8CXP6D:\r\n\t\t\tboardName = \"MicroEnable 5 AQ8-CXP6D\"\r\n\t\telif boardType == s.PN_MICROENABLE5VQ8CXP6D:\r\n\t\t\tboardName = \"MicroEnable 5 VQ8-CXP6D\"\r\n\t\telif boardType == s.PN_MICROENABLE5AD8CLHSF2:\r\n\t\t\tboardName = \"MicroEnable 5 AD8-CLHS-F2\"\r\n\t\telif boardType == s.PN_MICROENABLE5_LIGHTBRIDGE_ACL:\r\n\t\t\tboardName = \"MicroEnable 5 LB-ACL\"\r\n\t\telif boardType == s.PN_MICROENABLE5_LIGHTBRIDGE_VCL:\r\n\t\t\tboardName = \"MicroEnable 5 LB-VCL\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_ACL:\r\n\t\t\tboardName = \"MicroEnable 5 MA-ACL\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_ACX_SP:\r\n\t\t\tboardName = \"MicroEnable 5 MA-ACX-SP\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_ACX_DP:\r\n\t\t\tboardName = \"MicroEnable 5 MA-ACX-DP\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_ACX_QP:\r\n\t\t\tboardName = \"MicroEnable 5 MA-ACX-QP\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_AF2_DP:\r\n\t\t\tboardName = \"MicroEnable 5 MA-AF2-DP\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_VCL:\r\n\t\t\tboardName = \"MicroEnable 5 MA-VCL\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_VCX_QP:\r\n\t\t\tboardName = \"MicroEnable 5 MA-VCX-QP\"\r\n\t\telif boardType == s.PN_MICROENABLE5_MARATHON_VF2_DP:\r\n\t\t\tboardName = \"MicroEnable 5 MA-VF2-DP\"\r\n\t\telse:\r\n\t\t\tboardName = \"Unknown / Unsupported Board\"\r\n\t\t\tskipIndex = True\r\n\t\t\r\n\t\tif not skipIndex:\r\n\t\t\tsys.stdout.write(\"Board ID \" + str(i) + \": \" + boardName + \" 0x\" + format(boardType, '02X') + \"\\n\")\r\n\t\t\tnrOfBoardsFound = nrOfBoardsFound + 1\r\n\t\t\tmaxBoardIndex = i\r\n\t\t\tif minBoardIndex == None: minBoardIndex = i\r\n\t\t\t\r\n\t\tif nrOfBoardsFound >= nrOfBoardsPresent:\r\n\t\t\tbreak\r\n\r\n\t\tif nrOfBoardsFound < 0:\r\n\t\t\tbreak\r\n\t\r\n\tif nrOfBoardsFound <= 0:\r\n\t\tprint(\"No Boards found!\")\r\n\t\treturn -1\r\n\t\r\n\tinStr = \"=====================================\\n\\nPlease choose a board[{0}-{1}]: \".format(minBoardIndex, maxBoardIndex)\r\n\tuserInput = input(inStr)\r\n\r\n\twhile (not userInput.isdigit()) or (int(userInput) > maxBoardIndex):\r\n\t\tinStr = \"Invalid selection, retry[{0}-{1}]: \".format(minBoardIndex, maxBoardIndex)\r\n\t\tuserInput = input(inStr)\r\n\r\n\treturn int(userInput)\r\n\r\nboard_id = selectBoardDialog()\r\nprint(board_id)\r\n\r\nif board_id < 0:\r\n print(\"not selected board !!\")\r\n exit(1)\r\n\r\n\r\n# definition of resolution\r\nwidth = 1024\r\nheight = 1024\r\nsamplePerPixel = 1\r\nbytePerSample = 1\r\nisSlave = False\r\nuseCameraSimulator = True\r\ncamPort = s.PORT_A\r\n\r\n# number of buffers for acquisition\r\nnbBuffers = 4\r\ntotalBufferSize = width * height * samplePerPixel * bytePerSample * nbBuffers\r\n\r\n# number of image to acquire\r\nnrOfPicturesToGrab = 100\r\nframeRate = 10\r\n\r\n# initialize hub\r\nhub_path = \"median_blop.hap\"\r\nfg = s.Fg_InitEx(hub_path, board_id, 0);\r\n\r\n# error handling\r\nerr = s.Fg_getLastErrorNumber(fg)\r\nmes = s.Fg_getErrorDescription(err)\r\n\r\nif err < 0:\r\n\tprint(\"Error\", err, \":\", mes)\r\n\tsys.exit()\r\nelse:\r\n\tprint(\"ok\")\r\n\r\n# allocating memory\r\nmemHandle = s.Fg_AllocMemEx(fg, totalBufferSize, nbBuffers)\r\n\r\n\r\n# Set Applet Parameters\r\nerr = s.Fg_setParameterWithInt(fg, s.FG_WIDTH, width, camPort)\r\nif (err < 0):\r\n\tprint(\"Fg_setParameter(FG_WIDTH) failed: \", s.Fg_getLastErrorDescription(fg))\r\n\ts.Fg_FreeMemEx(fg, memHandle)\r\n\ts.Fg_FreeGrabber(fg)\r\n\texit(err)\r\n\r\nerr = s.Fg_setParameterWithInt(fg, s.FG_HEIGHT, height, camPort)\r\nif (err < 0):\r\n\tprint(\"Fg_setParameter(FG_HEIGHT) failed: \", s.Fg_getLastErrorDescription(fg))\r\n\ts.Fg_FreeMemEx(fg, memHandle)\r\n\ts.Fg_FreeGrabber(fg)\r\n\texit(err)\r\n\r\n# Read back settings\r\n(err, oWidth) = s.Fg_getParameterWithInt(fg, s.FG_WIDTH, camPort)\r\nif (err == 0):\r\n\tprint('Width =', oWidth)\r\n(err, oHeight) = s.Fg_getParameterWithInt(fg, s.FG_HEIGHT, camPort)\r\nif (err == 0):\r\n\tprint('Height =', oHeight)\r\n(err, oString) = s.Fg_getParameterWithString(fg, s.FG_HAP_FILE, camPort)\r\nif (err == 0):\r\n\tprint('Hap File =', oString)\r\n\r\n\r\n# create a display window\r\ndispId0 = s.CreateDisplay(8 * bytePerSample * samplePerPixel, width, height)\r\ns.SetBufferWidth(dispId0, width, height)\r\n\r\n\r\n\r\ncur_pic_nr = 0\r\nlast_pic_nr = 0\r\nimg = \"will point to last grabbed image\"\r\nnImg = \"will point to Numpy image/matrix\"\r\n\r\nwin_name_img = \"Source Image (SiSo Runtime)\"\r\nwin_name_res = \"Result Image (openCV)\"\r\n\r\n# start acquisition - görüntü almaya başlıoruz.\r\nerr = s.Fg_AcquireEx(fg, camPort, nrOfPicturesToGrab, s.ACQ_STANDARD, memHandle)\r\nif (err != 0):\r\n print('Fg_AcquireEx() failed:', s.Fg_getLastErrorDescription(fg))\r\n s.Fg_FreeMemEx(fg, memHandle)\r\n s.CloseDisplay(dispId0)\r\n s.Fg_FreeGrabber(fg)\r\n exit(err)\r\n\r\n\r\n# definition of resolution\r\nwidth = 1024\r\nheight = 1024\r\nsamplePerPixel = 1\r\nbytePerSample = 1\r\nisSlave = False\r\nuseCameraSimulator = True\r\ncamPort = s.PORT_A\r\n\r\n# number of buffers for acquisition\r\nnbBuffers = 4\r\ntotalBufferSize = width * height * samplePerPixel * bytePerSample * nbBuffers\r\n\r\n# number of image to acquire\r\nnrOfPicturesToGrab = 1000\r\nframeRate = 10\r\n\r\n# initialize hub\r\nhub_path = \"median_blop.hap\"\r\nfg = s.Fg_InitEx(hub_path, board_id, 0);\r\n\r\n# error handling\r\nerr = s.Fg_getLastErrorNumber(fg)\r\nmes = s.Fg_getErrorDescription(err)\r\n\r\nif err < 0:\r\n\tprint(\"Error\", err, \":\", mes)\r\n\tsys.exit()\r\nelse:\r\n\tprint(\"ok\")\r\n\r\n# allocating memory\r\nmemHandle = s.Fg_AllocMemEx(fg, totalBufferSize, nbBuffers)\r\n\r\n\r\n# Set Applet Parameters\r\nerr = s.Fg_setParameterWithInt(fg, s.FG_WIDTH, width, camPort)\r\nif (err < 0):\r\n\tprint(\"Fg_setParameter(FG_WIDTH) failed: \", s.Fg_getLastErrorDescription(fg))\r\n\ts.Fg_FreeMemEx(fg, memHandle)\r\n\ts.Fg_FreeGrabber(fg)\r\n\texit(err)\r\n\r\nerr = s.Fg_setParameterWithInt(fg, s.FG_HEIGHT, height, camPort)\r\nif (err < 0):\r\n\tprint(\"Fg_setParameter(FG_HEIGHT) failed: \", s.Fg_getLastErrorDescription(fg))\r\n\ts.Fg_FreeMemEx(fg, memHandle)\r\n\ts.Fg_FreeGrabber(fg)\r\n\texit(err)\r\n\r\n# Read back settings\r\n(err, oWidth) = s.Fg_getParameterWithInt(fg, s.FG_WIDTH, camPort)\r\nif (err == 0):\r\n\tprint('Width =', oWidth)\r\n(err, oHeight) = s.Fg_getParameterWithInt(fg, s.FG_HEIGHT, camPort)\r\nif (err == 0):\r\n\tprint('Height =', oHeight)\r\n(err, oString) = s.Fg_getParameterWithString(fg, s.FG_HAP_FILE, camPort)\r\nif (err == 0):\r\n\tprint('Hap File =', oString)\r\n\r\n\r\n# create a display window\r\ndispId0 = s.CreateDisplay(8 * bytePerSample * samplePerPixel, width, height)\r\ns.SetBufferWidth(dispId0, width, height)\r\n\r\n\r\n\r\ncur_pic_nr = 0\r\nlast_pic_nr = 0\r\nimg = \"will point to last grabbed image\"\r\nnImg = \"will point to Numpy image/matrix\"\r\n\r\nwin_name_img = \"Source Image (SiSo Runtime)\"\r\nwin_name_res = \"Result Image (openCV)\"\r\n\r\nprint(\"Acquisition started\")\r\n#total_time = []\r\none_image_time = []\r\n\r\n# start acquisition - görüntü almaya başlıoruz.\r\nerr = s.Fg_AcquireEx(fg, camPort, nrOfPicturesToGrab, s.ACQ_STANDARD, memHandle)\r\nif (err != 0):\r\n print('Fg_AcquireEx() failed:', s.Fg_getLastErrorDescription(fg))\r\n s.Fg_FreeMemEx(fg, memHandle)\r\n s.CloseDisplay(dispId0)\r\n s.Fg_FreeGrabber(fg)\r\n exit(err)\r\n\r\nwhile cur_pic_nr < nrOfPicturesToGrab:\r\n cur_pic_nr = s.Fg_getLastPicNumberBlockingEx(fg, last_pic_nr + 1, camPort, 5, memHandle)\r\n print(cur_pic_nr)\r\n if (cur_pic_nr < 0):\r\n print(\"Fg_getLastPicNumberBlockingEx(\", (last_pic_nr + 1), \") failed: \", (s.Fg_getLastErrorDescription(fg)))\r\n # s.g_stopAcquire(fg, camPort)\r\n # s.g_FreeMemEx(fg, memHandle)\r\n # s.loseDisplay(dispId0)\r\n s.g_FreeGrabber(fg)\r\n exit(cur_pic_nr)\r\n \r\n last_pic_nr = cur_pic_nr\r\n \r\n # get image pointer\r\n img = s.Fg_getImagePtrEx(fg, last_pic_nr, camPort, memHandle)\r\n\r\n # handle this as Numpy array (using same memory, NO copy)\r\n frame = s.getArrayFrom(img, width, height)\r\n \r\n img_width=frame.shape[1]\r\n img_height=frame.shape[0]\r\n frame_blob = cv2.dnn.blobFromImage(frame,1/255,(416,416),swapRB=True,crop=False) # Görüntüyü 4 boyutlu tensöre çevirme işlemi.\r\n\r\n labels = [\"mask\",\"no mask\"]\r\n\r\n\r\n colors=[\"0,255,255\",\"255,0,0\",\"255,255,0\",\"0,255,0\"]\r\n colors=[np.array(color.split(\",\")).astype(\"int\") for color in colors]\r\n colors=np.array(colors) # Tek bir array de tuttuk.\r\n colors=np.tile(colors,(18,1)) # Büyütme işlemi yapıyoruz.\r\n \r\n \r\n cfg=\"yolov4_tiny.cfg\"\r\n weights=\"yolov4_tiny_detector_last.weights\"\r\n model=cv2.dnn.readNetFromDarknet(cfg,weights)\r\n \r\n model.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\r\n model.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\r\n\r\n \r\n layers=model.getLayerNames()\r\n output_layer=[layers[layer[0]-1] for layer in model.getUnconnectedOutLayers()] # Modelde ki çıktı katmanlarını aldık.\r\n\r\n model.setInput(frame_blob)\r\n\r\n detection_layers=model.forward(output_layer)\r\n\r\n #----------- Non Maximum Supression Operation-1 ----------\r\n ids_list=[]\r\n boxes_list=[]\r\n confidence_list=[]\r\n #------------ End Of Opertation 1 -------------\r\n\r\n for detection_layer in detection_layers:\r\n for object_detection in detection_layer:\r\n scores=object_detection[5:]\r\n predicted_id=np.argmax(scores)\r\n confidence=scores[predicted_id]\r\n if confidence > 0.30:\r\n label=labels[predicted_id]\r\n bounding_box=object_detection[0:4] * np.array([img_width,img_height,img_width,img_height])\r\n (box_center_x,box_center_y,box_width,box_height)=bounding_box.astype(\"int\")\r\n\r\n start_x=int(box_center_x-(box_width/2))\r\n start_y =int(box_center_y - (box_height / 2))\r\n\r\n # ----------- Non Maximum Supression Operation-2 ----------\r\n ids_list.append(predicted_id)\r\n confidence_list.append(float(confidence))\r\n boxes_list.append([start_x,start_y,int(box_width),int(box_height)])\r\n # ------------ End Of Opertation 2 -------------\r\n\r\n # ----------- Non Maximum Supression Operation-3 ----------\r\n max_ids=cv2.dnn.NMSBoxes(boxes_list,confidence_list,0.5,0.4)\r\n \r\n for max_id in max_ids:\r\n max_class_id = max_id[0]\r\n box = boxes_list[max_class_id]\r\n\r\n start_x = box[0]\r\n start_y = box[1]\r\n box_width = box[2]\r\n box_height = box[3]\r\n\r\n predicted_id = ids_list[max_class_id]\r\n label = labels[predicted_id]\r\n confidence = confidence_list[max_class_id]\r\n # ------------ End Of Opertation 3 -------------\r\n\r\n end_x = start_x + box_width\r\n end_y = start_y + box_height\r\n\r\n box_color = colors[predicted_id]\r\n box_color = [int(each) for each in box_color]\r\n \r\n label = \"{}: {:.2f}%\".format(label, confidence * 100)\r\n print(\"Predicted_object: \", label)\r\n \r\n cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), box_color, 3)\r\n cv2.putText(frame, label, (start_x, start_y - 10), cv2.FONT_ITALIC, 0.6, box_color, 2)\r\n\r\n t, _ = model.getPerfProfile()\r\n text = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())\r\n cv2.putText(frame, text, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)\r\n cv2.imshow(\"Detection\",frame)\r\n # out.write(frame)\r\n \r\n \r\n \r\n # if cv2.waitKey(1) & 0xff == ord(\"q\"):\r\n # camera.StopGrabbing()\r\n # # camera.Release()\r\n # camera.Close()\r\n # # out.release()\r\n # break\r\n\r\n\r\n\r\ncv2.destroyAllWindows()","repo_name":"Fomkar/Python_OpenCv_-mage_processing","sub_path":"yüz tespiti işlemleri/Tiny_yolo_face/tiny_cameralink.py","file_name":"tiny_cameralink.py","file_ext":"py","file_size_in_byte":13997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24156746001","text":"def solution(arr1, arr2):\n answer = []\n for ar1 in arr1:\n temp = []\n for j in range(len(arr2[0])):\n temp.append(sum(list(map(lambda x:x[0]*x[1], zip(ar1, [x[j] for x in arr2])))))\n answer.append(temp)\n return answer\n \n\nprint(solution([\n [2, 3, 2], \n [4, 2, 4], \n [3, 1, 4]],\n \n [[5, 4, 3], \n [2, 4, 1], \n [3, 1, 1]]))\n","repo_name":"songyw0517/Algorithm","sub_path":"programmers_algorithm/python/LEVEL_2/행렬의 곱셈.py","file_name":"행렬의 곱셈.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29885095492","text":"import sys\nimport os\nproject_root = os.path.dirname(os.path.dirname(__file__)) \nsys.path.append(project_root)\n\nfrom functions.database.selecting_data import *\n\nimport unittest\n\nclass Test_selecting_data(unittest.TestCase):\n\tdef test_select_data(self):\n df = pd.DataFrame({\"RJ\": [1, 2, 3]})\n\t\t\n expected_output = \"etnia invalida\"\n self.assertEqual(select_data(df, \"caju\"), expected_output)\n\n expected_output = \"a etnia precisa de ser uma string\"\n self.assertEqual(select_data(df, 5), expected_output)\n\t\n\nif __name__ == \"__main__\":\n\tunittest.main()","repo_name":"ddanieldma/LP-2023-A1","sub_path":"tests/test_selecting_data.py","file_name":"test_selecting_data.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"73177804153","text":"from flask import Flask,jsonify,Blueprint,request,current_app\nfrom flask_cors import *\nfrom flask_k8s.k8s_decode import MyEncoder\nfrom flask_k8s.util import *\nfrom .cluster import get_event_list_by_name\n\nfrom kubernetes import client,config\nfrom kubernetes.client.rest import ApiException\n\n# 导入蓝图\nfrom flask_k8s.k8s import k8s\n\n\n@k8s.route('/get_statefulset_list',methods=('GET','POST'))\ndef get_statefulset_list():\n data = json.loads(request.get_data().decode(\"utf-8\"))\n namespace = handle_input(data.get(\"namespace\"))\n current_app.logger.debug(\"接收到的数据:{}\".format(namespace))\n myclient = client.AppsV1Api()\n # statefulsets = myclient.list_stateful_set_for_all_namespaces()\n try:\n if namespace == \"\" or namespace == \"all\":\n statefulsets = myclient.list_stateful_set_for_all_namespaces()\n else:\n statefulsets = myclient.list_namespaced_stateful_set(namespace=namespace)\n except ApiException as e:\n if isinstance(e.body,dict):\n body = json.loads(e.body)\n message = body['message']\n else:\n body = e.body\n message = body\n msg = {\"status\": e.status, \"reason\": e.reason, \"message\": message}\n current_app.logger.debug(msg)\n return jsonify({'error': '获取列表失败', \"msg\": msg})\n\n i = 0\n statefulset_list = []\n for statefulset in statefulsets.items:\n if (i>=0):\n # print(statefulset)\n meta = statefulset.metadata\n name = meta.name\n create_time = time_to_string(meta.creation_timestamp)\n cluster_name = meta.cluster_name\n labels = meta.labels\n namespace = meta.namespace \n spec = statefulset.spec\n template = spec.template\n template_spec = template.spec \n \n replicas = spec.replicas\n # selector = spec.selector\n service_name = spec.service_name\n # update_strategy = spec.update_strategy\n # affinity = template_spec.affinity\n containers = template_spec.containers\n container_list = []\n for container in containers:\n image = container.image\n volume_mounts = container.volume_mounts\n env = container.env\n mycontainer = {\"image\":image,\"volume_mounts\":volume_mounts,\"env\":env}\n container_list.append(mycontainer)\n host_network = template_spec.host_network\n # node_selector = template_spec.node_selector\n \n tolerations = template_spec.tolerations\n \n pvc_list = []\n pvc_templates = spec.volume_claim_templates\n #bug TypeError: 'NoneType' object is not iterable\n if pvc_templates != None:\n for pvc_template in pvc_templates: \n pvc_annotations= pvc_template.metadata.annotations\n pvc_name = pvc_template.metadata.name\n pvc_access_mode = pvc_template.spec.access_modes[0]\n pvc_capacity = pvc_template.spec.resources.requests['storage']\n pvc_status = pvc_template.status.phase\n my_pvc = {\"pvc_name\":pvc_name,\"pvc_access_mode\":pvc_access_mode,\"pvc_capacity\":pvc_capacity,\"pvc_status\":pvc_status,\"pvc_annotations\":pvc_annotations}\n pvc_list.append(my_pvc)\n\n info = {}\n info[\"replicas\"] = replicas\n info[\"labels\"] = labels\n info[\"service_name\"] = service_name\n info[\"host_network\"] = host_network\n info[\"tolerations\"] = tolerations\n \n my_state = {}\n my_state[\"name\"] = name\n my_state[\"namespace\"] = namespace\n my_state[\"info\"] = info\n my_state[\"container_list\"] =container_list\n my_state[\"pvc_list\"] = pvc_list\n my_state[\"create_time\"] = create_time\n \n statefulset_list.append(my_state)\n \n i = i +1 \n return json.dumps(statefulset_list,indent=4,cls=MyEncoder)\n\n\n@k8s.route('/delete_statefulset',methods=('GET','POST'))\ndef delete_statefulset():\n data = json.loads(request.get_data().decode(\"utf-8\"))\n current_app.logger.debug(\"k8s接收到的数据:{}\".format(data))\n name = handle_input(data.get('name'))\n namespace = handle_input(data.get(\"namespace\"))\n \n if namespace == '' or namespace == 'all':\n return simple_error_handle(\"namespace不能为空,并且不能选择all\")\n myclient = client.AppsV1Api()\n # myclient = client.AppsV1beta1Api()\n try:\n # body=client.V1DeleteOptions(propagation_policy='Foreground',grace_period_seconds=5)\n result = myclient.delete_namespaced_stateful_set(namespace=namespace,name=name)\n except ApiException as e:\n body = json.loads(e.body)\n msg={\"status\":e.status,\"reason\":e.reason,\"message\":body['message']}\n return jsonify({'error': '删除statefulset异常',\"msg\":msg})\n return jsonify({\"ok\":\"删除成功\"})","repo_name":"cmlfxz/flask-k8s","sub_path":"flask_k8s/k8s/statefulSet.py","file_name":"statefulSet.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33830246877","text":"'''\nCreated on 08-Dec-2018\n\n@author: tanumoy\n'''\nfrom basudebpur_agro_erp.view.template import template\nfrom basudebpur_agro_erp.jinja_template import jinja_template\nfrom django.http.response import HttpResponse\nimport requests\nfrom basudebpur_agro_erp.external_urls import PURCHASE_TRANSACTION\nfrom basudebpur_agro_erp.permission.purchase_permissions import hasUpdatePurchaseRecordAccess\nimport json\nfrom django.shortcuts import redirect\n\nclass purchase_close_view(template):\n '''\n classdocs\n '''\n\n\n def get(self, request, transaction_number):\n r = requests.get(url = PURCHASE_TRANSACTION, params = {'transaction_number':transaction_number}) \n if r.status_code is 200:\n json_data = r.json()\n \n if hasUpdatePurchaseRecordAccess(request.user):\n data = json_data['purchase_trx_details'][0]\n data['last_updated_by'] = request.user.username\n if 'receipt_details' in data.keys():\n data['order_status'] = 'CANCELLED'\n for line in data['receipt_details']:\n if line['receipt_header_status'] == 'COMPLETE':\n data['order_status'] = 'COMPLETE'\n break\n \n else:\n data['order_status'] = 'CANCELLED'\n \n if 'transaction_header_id' in data.keys():\n data.pop('transaction_header_id')\n if 'transaction_date' in data.keys():\n data.pop('transaction_date')\n if 'buyer_id' in data.keys():\n data.pop('buyer_id')\n if 'buyer_name' in data.keys():\n data.pop('buyer_name')\n if 'supplier_id' in data.keys():\n data.pop('supplier_id')\n if 'purchase_trx_lines' in data.keys():\n data.pop('purchase_trx_lines')\n if 'amount' in data.keys():\n data.pop('amount')\n if 'created_by' in data.keys():\n data.pop('created_by')\n if 'creation_date' in data.keys():\n data.pop('creation_date')\n if 'last_update_date' in data.keys():\n data.pop('last_update_date')\n if 'receipt_details' in data.keys():\n data.pop('receipt_details')\n jsondata = json.dumps(data)\n r = requests.put('{}'.format(PURCHASE_TRANSACTION), json = jsondata) \n if r.status_code is 200:\n return redirect('/purchase/')\n \n elif r.status_code == 422:\n to_json = json.loads(r.content)['errors']\n return HttpResponse(json.dumps(to_json), status = 422)\n \n else:\n template = jinja_template.get_template('internal_server_error.html')\n return HttpResponse(template.render(request))\n else:\n template = jinja_template.get_template('access_denied.html')\n return HttpResponse(template.render(request))\n else:\n template = jinja_template.get_template('internal_server_error.html')\n return HttpResponse(template.render(request))\n ","repo_name":"tanumoychakraborty/basudebpur_agro_erp","sub_path":"purchase/view/purchase_close_view.py","file_name":"purchase_close_view.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"75017191031","text":"from functools import reduce\nimport sys\n\n\nN = int(input())\nnums = []\nfor _ in range(N):\n\tnums.append(int(sys.stdin.readline()))\nnums.sort()\ndiffs = []\n\nfor i in range(1,len(nums)):\n\tdiff = nums[i]-nums[i-1]\n\tdiffs.append(diff)\n\ndef get_gcd(a,b):\n\tif b == 0 :\n\t\treturn a\n\treturn get_gcd(b,a%b)\n\ngcd = reduce(get_gcd,diffs)\ntemp = []\n\nfor i in range(2,int(gcd**(1/2)+1)):\n\tif gcd%i==0:\n\t\tprint(i,end=\" \")\n\t\ttemp.append(i)\n\nfor i in temp[::-1]:\n\tif gcd//i not in temp:\n\t\tprint(gcd//i,end=\" \")\n\nprint(gcd)\n","repo_name":"bellamyee/algorithms","sub_path":"baekjoon/2981(sol).py","file_name":"2981(sol).py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26256352816","text":"import torch\nimport torch.nn as nn\nfrom torch_geometric.nn import SAGEConv\n\nclass GCNConvNet(nn.Module):\n\n def __init__(self, in_channels=3, out_channels=6):\n super(GCNConvNet, self).__init__()\n\n self.conv1 = SAGEConv(in_channels, out_channels=128, aggr=\"max\")\n self.conv2 = SAGEConv(in_channels=128, out_channels=128, aggr=\"max\")\n self.conv3 = SAGEConv(in_channels=128, out_channels=128, aggr=\"max\")\n self.dropout = nn.Dropout(0.25)\n self.lin1 = torch.nn.Linear(128, 128)\n self.lin2 = torch.nn.Linear(128, 64)\n self.lin3 = torch.nn.Linear(64, out_channels)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, batch):\n\n x, edge_index, batch = batch.x, batch.edge_index, batch.batch\n\n x = self.conv1(x, edge_index)\n x = self.conv2(x, edge_index)\n x = self.conv3(x, edge_index)\n\n x = torch.relu(self.lin1(x))\n x = self.dropout(x)\n x = torch.relu(self.lin2(x))\n x = self.lin3(x)\n x = self.sigmoid(x)\n return x\n","repo_name":"jbecker7326/GNN4CHEM","sub_path":"models/GNN_GS.py","file_name":"GNN_GS.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36317684552","text":"def traverse(a, row, col):\n if row < 0 or row >= len(a) or col < 0 or col >= len(a[0]) or a[row][col] != 0:\n return 0\n\n size = 1\n a[row][col] = -1\n\n for dr in range(-1, 2):\n for dc in range(-1, 2):\n size += traverse(a, row+dr, col+dc)\n\n return size\n\ndef pond_sizes(a):\n counts = []\n for row in range(len(a)):\n for col in range(len(a[row])):\n if a[row][col] == 0:\n c = traverse(a, row, col)\n counts.append(c)\n return counts\n\na = [[0, 2, 1, 0],\n [0, 1, 0, 1],\n [1, 1, 0, 1],\n [0, 1, 0, 1]]\n\nprint(pond_sizes(a))","repo_name":"nderkach/algorithmic-challenges","sub_path":"pond_sizes.py","file_name":"pond_sizes.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21228086518","text":"from datetime import datetime as dt\nimport os\n\nimport numpy as np\nimport sklearn\nimport pytest\n\nfrom yatsm.algorithms.yatsm import YATSM\n\nhere = os.path.dirname(__file__)\n\n\n# REAL DATA\n@pytest.fixture(scope='session')\ndef unmasked_ts(request):\n \"\"\" Return dict of an unmasked example timeseries\n\n Dict contains:\n dates: ordinal dates\n Y: observations of 7 bands + Fmask\n X: design matrix\n design_str: Patsy design specification\n design_dict: Patsy column name indices for design matrix X\n\n Mask based on Fmask values (retain 0 and 1) and optical data min/max\n of 0 to 10,000.\n \"\"\"\n f = os.path.join(here, 'data', 'example_timeseries.npz')\n return np.load(f)\n\n\n@pytest.fixture(scope='session')\ndef masked_ts(request):\n \"\"\" Return dict of a masked example timeseries\n\n Dict contains:\n dates: ordinal dates\n Y: observations of 7 bands + Fmask\n X: design matrix\n design_str: Patsy design specification\n design_dict: Patsy column name indices for design matrix X\n\n Mask based on Fmask values (retain 0 and 1) and optical data min/max\n of 0 to 10,000.\n \"\"\"\n f = os.path.join(here, 'data', 'example_timeseries_masked.npz')\n return np.load(f)\n\n\n# SIMULATED DATA\ndef setup_dummy_YATSM(X, Y, dates, i_breaks):\n \"\"\" Setup a dummy YATSM model\n\n Args:\n X (np.ndarray): n x p features\n Y (np.ndarray): n_series x n independent data\n dates (np.ndarray): n dates\n i_breaks (iterable): indices of ``dates`` representing break dates\n (can be zero or nonzero, but len(i_breaks) is len(yatsm.record))\n\n Returns:\n YATSM model\n \"\"\"\n n = dates.size\n yatsm = YATSM()\n yatsm.X, yatsm.Y, yatsm.dates = X, Y, dates\n yatsm.n_coef, yatsm.n_series = X.shape[1], Y.shape[0]\n yatsm.models = np.array([sklearn.clone(yatsm.estimator)\n for i in range(yatsm.n_series)])\n yatsm.test_indices = np.arange(yatsm.n_series)\n n_models = len(i_breaks)\n yatsm.record = np.hstack([yatsm.record_template] * n_models)\n\n def populate_record(yatsm, i_rec, i_start, i_end, i_break):\n yatsm.record[i_rec]['start'] = yatsm.dates[i_start]\n yatsm.record[i_rec]['end'] = yatsm.dates[i_end]\n yatsm.record[i_rec]['break'] = (yatsm.dates[i_break] if i_break\n else i_break)\n yatsm.fit_models(X[i_start:i_end, :], Y[:, i_start:i_end])\n for i, m in enumerate(yatsm.models):\n yatsm.record[i_rec]['coef'][:, i] = m.coef\n yatsm.record[i_rec]['rmse'][i] = m.rmse\n return yatsm\n\n i_start = 0\n i_end = i_breaks[0] - 1 if i_breaks[0] else n - 1\n i_break = i_breaks[0]\n yatsm = populate_record(yatsm, 0, i_start, i_end, i_break)\n\n for idx, i_break in enumerate(i_breaks[1:]):\n i_start = i_breaks[idx] + 1\n i_end = i_break - 1 if i_break else n - 1\n yatsm = populate_record(yatsm, idx + 1, i_start, i_end, i_break)\n\n return yatsm\n\n\ndef _sim_no_change_data():\n \"\"\" Return a simulated timeseries with no change\n \"\"\"\n np.random.seed(123456789)\n dates = np.arange(dt.strptime('2000-01-01', '%Y-%m-%d').toordinal(),\n dt.strptime('2005-01-01', '%Y-%m-%d').toordinal(),\n 16)\n n = dates.size\n X = np.column_stack((np.ones(n), dates)) # n x p\n _y = np.linspace(0, 10, n) + np.random.standard_normal(n)\n Y = np.array([_y] * 2) # nseries x n\n return X, Y, dates\n\n\n@pytest.fixture(scope='module')\ndef sim_nochange(request):\n \"\"\" Return a dummy YATSM model container with a no-change dataset\n\n \"No-change\" dataset is simply a timeseries drawn from samples of one\n standard normal.\n \"\"\"\n X, Y, dates = _sim_no_change_data()\n return setup_dummy_YATSM(X, Y, dates, [0])\n\n\n@pytest.fixture(scope='module')\ndef sim_no_real_change_1(request):\n \"\"\" Return a dummy YATSM model container with a spurious change\n\n \"Spurious\" dataset is simply a timeseries drawn from samples of one\n standard normal, but with a record indicating that there was a change.\n \"\"\"\n X, Y, dates = _sim_no_change_data()\n n = dates.size\n # Put a break somewhere in the middle\n return setup_dummy_YATSM(X, Y, dates, [n // 2, 0])\n\n\n@pytest.fixture(scope='module')\ndef sim_no_real_change_2(request):\n \"\"\" Return a dummy YATSM model container with two spurious changes\n\n \"Spurious\" dataset is simply a timeseries drawn from samples of one\n standard normal, but with a record indicating that there was a change.\n \"\"\"\n X, Y, dates = _sim_no_change_data()\n n = dates.size\n # Put two breaks somewhere in the middle\n return setup_dummy_YATSM(X, Y, dates, [n // 4, n // 2, 0])\n\n\n@pytest.fixture(scope='module')\ndef sim_real_change(request):\n \"\"\" Return a dummy YATSM model container with a real change\n\n \"Real change\" dataset is simply a timeseries drawn from samples of two\n normal distributions with greatly different mean values.\n \"\"\"\n np.random.seed(123456789)\n dates = np.arange(dt.strptime('2000-01-01', '%Y-%m-%d').toordinal(),\n dt.strptime('2005-01-01', '%Y-%m-%d').toordinal(),\n 16)\n n = dates.size\n X = np.column_stack((np.ones(n), dates)) # n x p\n n_1, n_2 = n // 2, n - n // 2\n _y1 = np.linspace(0, 10, n_1) + np.random.standard_normal(n_1)\n _y2 = np.linspace(20, 10, n_2) + np.random.standard_normal(n_2)\n Y = np.array([\n np.concatenate((_y1, _y2)),\n np.concatenate((_y1, _y2))\n ]) # nseries x n\n\n # Put a break somewhere in the middle\n return setup_dummy_YATSM(X, Y, dates, [n_1, 0])\n","repo_name":"ceholden/yatsm","sub_path":"tests/algorithms/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"95"} +{"seq_id":"24193130941","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Globale Funktion:\ndef f(r, x_n):\n return r * (x_n - x_n**2)\n\n\n# Simulation der Fixpunktbestimmung:\ndef Spinnennetz(r, x0, n, Dateiname):\n x = np.linspace(0, 1, 1000)\n N = int(n)\n xn = np.zeros(N+1)\n xn[0] = x0\n \n plt.figure(figsize=(10, 7.5))\n plt.plot(x, f(r, x), \"-b\")\n plt.plot(x, x, \"-k\")\n plt.plot([x0, x0], [0., x0], \"--r\") #Verbindungslinie zwischen x-Achse und Gerade bei x0\n \n for i in range(0, N):\n xn[i+1] = f(r, xn[i])\n plt.plot([xn[i], xn[i]], [xn[i], xn[i+1]], \"--r\") #Vertikale Linien\n plt.plot([xn[i], xn[i+1]], [xn[i+1], xn[i+1]], \"--r\") #Horizontale Linien\n \n plt.xlabel(r\"$x$\", fontsize=20)\n plt.ylabel(r\"$f(x)$\", fontsize=20)\n plt.title(r\"Simulation des Systems mit $r={}$\".format(r), fontsize=20)\n plt.tick_params(labelsize=16)\n plt.grid(True)\n #plt.savefig(Dateiname + \".png\")\n plt.show()\n print(\"Fixpunkt:\", xn[N]) #Zeigt den ermittelten Fixpunkt.\n return\n\n\n# Methodenaufrufe:\nSpinnennetz(1.4, 0.2, 100, \"A4.1\")\nSpinnennetz(2.5, 0.125, 100, \"A4.2\")\nSpinnennetz(2.8, 0.75, 100, \"A4.3\")\nSpinnennetz(3.7, 0.4, 100, \"A4.4\")\n\n\n#------------------------------------------------------------------------------\n\n\n# Berechnet einen Fixpunkt, solange r im Intervall (1,3) liegt:\ndef Spiderweb(r, x0, n):\n N = int(n)\n xn = np.zeros(N+1)\n xn[0] = x0\n for i in range(0, N):\n xn[i+1] = f(r, xn[i])\n \n return xn[N]\n\n\n# Plotet die berechneten Fixpunkte in Abhaengigkeit von r:\nr = np.linspace(0, 4, 10000)\nxS = np.zeros(10000)\nfor i in range(0, 10000):\n xS[i] = Spiderweb(r[i], 0.9, 100)\nplt.figure(figsize=(10, 7.5))\nplt.plot(r, xS, \".b\")\nplt.xlabel(r\"$r$\", fontsize=20)\nplt.ylabel(r\"$x_n$\", fontsize=20)\nplt.title(u\"Feigenbaum-Bifurkationsdiagramm\", fontsize=20)\nplt.tick_params(labelsize=16)\nplt.grid(True)\n#plt.savefig(\"Feigenbaum.png\")\nplt.show()","repo_name":"Br0Fi/NumMethKomp","sub_path":"WS/Zettel5/A4.py","file_name":"A4.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20334134820","text":"import random\n\nsample_data = []\nwith open('IntegerArray.txt', 'r') as f:\n for line in f:\n sample_data.append(int(line))\n\n\ndef random_selet(array, target):\n array_len = len(array)\n if array_len == 1:\n return array[0]\n pivot = int(random.random() * array_len)\n array[0], array[pivot] = array[pivot], array[0]\n divier_index = 1\n for i in range(1, len(array)):\n if array[i] < array[0]:\n array[i], array[divier_index] = array[divier_index], array[i]\n divier_index += 1\n inposition = divier_index - 1\n array[0], array[inposition] = array[inposition], array[0]\n if inposition == target:\n return array[inposition]\n elif inposition > target:\n return random_selet(array[:inposition], target)\n elif inposition < target:\n return random_selet(array[inposition + 1:], target - inposition - 1)\n\nfor j in range(5):\n print(random_selet(sample_data, 2))\n","repo_name":"JiyanBlack/Algorithms-Excercises","sub_path":"random_select.py","file_name":"random_select.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24242473656","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 21 17:43:27 2019\n\n@author: vishant\n\"\"\"\n\ndef selection(a):\n for j in range(len(a)):#scanning element\n min=j;\n for k in range(j+1,len(a)): # checking min element from list\n if a[min]> a[k]:\n min=k\n a[j],a[min]=a[min],a[j]\n \n\n\na=list(map(int,input().split()))\nprint(\"unsorted:\",a) \nselection(a) \nprint(\"sorted:\",a) \n","repo_name":"vishant16/Algorithms","sub_path":"sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"74665311679","text":"import tensorflow as tf\n\n\ndef hard_negative_mining(loss, gt_confs, neg_ratio):\n \"\"\" Hard negative mining algorithm\n to pick up negative examples for back-propagation\n base on classification loss values\n Args:\n loss: list of classification losses of all default boxes (B, num_default)\n gt_confs: classification targets (B, num_default)\n neg_ratio: negative / positive ratio\n Returns:\n conf_loss: classification loss\n loc_loss: regression loss\n \"\"\"\n # loss: B x N\n # gt_confs: B x N\n pos_idx = gt_confs > 0\n num_pos = tf.reduce_sum(tf.dtypes.cast(pos_idx, tf.int32), axis=1)\n num_neg = num_pos * neg_ratio\n\n rank = tf.argsort(loss, axis=1, direction='DESCENDING')\n rank = tf.argsort(rank, axis=1)\n neg_idx = rank < tf.expand_dims(num_neg, 1)\n\n return pos_idx, neg_idx\n\n\nclass SSDLosses(object):\n \"\"\" Class for SSD Losses\n Attributes:\n neg_ratio: negative / positive ratio\n num_classes: number of classes\n \"\"\"\n\n def __init__(self, neg_ratio, num_classes):\n self.neg_ratio = neg_ratio\n self.num_classes = num_classes\n\n def __call__(self, confs, locs, gt_confs, gt_locs):\n \"\"\" Compute losses for SSD\n regression loss: smooth L1\n classification loss: cross entropy\n Args:\n confs: outputs of classification heads (B, num_default, num_classes)\n locs: outputs of regression heads (B, num_default, 4)\n gt_confs: classification targets (B, num_default)\n gt_locs: regression targets (B, num_default, 4)\n Returns:\n conf_loss: classification loss\n loc_loss: regression loss\n \"\"\"\n cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n\n # compute classification losses\n # without reduction\n temp_loss = cross_entropy(\n gt_confs, confs)\n pos_idx, neg_idx = hard_negative_mining(\n temp_loss, gt_confs, self.neg_ratio)\n\n # classification loss will consist of positive and negative examples\n\n cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='sum')\n smooth_l1_loss = tf.keras.losses.Huber(reduction='sum')\n\n conf_loss = cross_entropy(\n gt_confs[tf.math.logical_or(pos_idx, neg_idx)],\n confs[tf.math.logical_or(pos_idx, neg_idx)])\n\n # regression loss only consist of positive examples\n loc_loss = smooth_l1_loss(\n # tf.boolean_mask(gt_locs, pos_idx),\n # tf.boolean_mask(locs, pos_idx))\n gt_locs[pos_idx],\n locs[pos_idx])\n\n num_pos = tf.reduce_sum(tf.dtypes.cast(pos_idx, tf.float32))\n\n conf_loss = conf_loss / num_pos\n loc_loss = loc_loss / num_pos\n\n return conf_loss, loc_loss\n\n\ndef create_losses(neg_ratio, num_classes):\n criterion = SSDLosses(neg_ratio, num_classes)\n\n return criterion\n\n","repo_name":"ChunML/ssd-tf2","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"97"} +{"seq_id":"12529987615","text":"from os import walk\n\nimport cv2\n\n\ndef get_image_filename_list(path):\n if path is None:\n return []\n image_filename_list = []\n for (dirpath, dirnames, filenames) in walk(path):\n for i in range(0, len(filenames)):\n filenames[i] = dirpath + '\\\\' + filenames[i]\n image_filename_list.extend(filenames)\n break\n\n return image_filename_list\n\n\ndef get_image_filename_list2(path):\n from os import listdir\n from os.path import isfile, join\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n return onlyfiles\n\n\ndef load_image_filename_list(filename_list, gray=False):\n image_list = []\n for filename in filename_list:\n image = cv2.imread(filename)\n if gray:\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n image_list.append(image)\n\n return image_list\n","repo_name":"Cry0g3n/CNNImageFiltration","sub_path":"utils/storage/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"28476049379","text":"'''\nThis file is part of the Ristra portage project.\nPlease see the license file at the root of this repository, or at:\n https://github.com/laristra/portage/blob/master/LICENSE\n'''\nimport ingen\nfrom ingen import gwiz, csv, altair\nfrom ingen.materials import material\n\ninnerCaseRadius = 1.75\nslugDX = 0.05\n\ndef suBlock(f,mat):\n # HACK: names is internal\n uf=altair.uFrame([getattr(f,a) for a in f.names],mat)\n # FIXME: without factor, this crashed with close generators\n uf.dxStipple(altair.voronoiMesh,f.dx()*1.1,100)\n return uf.block()\n\ndef block3(iMin,jMin,jMax,rule=altair.copyDistrib(),material=None,feather=None):\n # HACK: only copyDistrib() supported\n f=altair.frame3(iMin,jMin,jMax,material)\n f.copy()\n return suBlock(f,material),f.iMax\n\ndef block2(jMin,jMax,rule,material,feather=None):\n # HACK: only squareDistrib() supported\n f=altair.frame2(jMin,jMax,material)\n f.equal(f.dx())\n return suBlock(f,material)\n\n# For the original structured mesh:\n# from ingen.altair import block2,block3\n\ningen.init(globals(),None)\ngwiz.loadDirectory('contours',[('*',gwiz.noRule(),gwiz.dxDistribRule(0.005))],cntr)\nmat.pb = material(1)\nmat.plastic = material(2)\nmat.void = material(3)\n\nseg.slug_fin_jMin = altair.segment(cntr.slug,cntr.slug.pnt.p14,cntr.slug.pnt.p13).equalArcDistrib(slugDX)\nseg.slug_fin_jMax = altair.segment(cntr.slug,cntr.slug.pnt.p12,cntr.slug.pnt.p11).equalArcDistrib(slugDX)\nseg.slug_fin_iMin = altair.segment(cntr.slug,cntr.slug.pnt.p14,cntr.slug.pnt.p12).equalArcDistrib(slugDX)\nblk.slug_fin,sfiMax = block3(seg.slug_fin_iMin,seg.slug_fin_jMin,seg.slug_fin_jMax,material=mat.pb)\n\nseg.slug_hp = altair.segment(cntr.slug,cntr.slug.pnt.p7,cntr.slug.pnt.p8).equalArcDistrib(slugDX)\nseg.slug_nose = altair.segment(cntr.slug,cntr.slug.pnt.p8,cntr.slug.pnt.p9).equalArcDistrib(slugDX)\nseg.slug_base = altair.segment(cntr.slug,cntr.slug.pnt.p6,cntr.slug.pnt.p13).equalArcDistrib(slugDX)\n\nseg.slug_body_fore = altair.segment(cntr.slug,cntr.slug.pnt.p9,cntr.slug.pnt.p10).equalArcDistrib(slugDX)\nseg.slug_body_aft = altair.segment(cntr.slug,cntr.slug.pnt.p10,cntr.slug.pnt.p11).equalArcDistrib(slugDX)\nsbiMin=seg.slug_body_aft+seg.slug_body_fore\nblk.slug_body,_ = block3(sbiMin,\n seg.slug_base+sfiMax,\n seg.slug_hp+seg.slug_nose,\n material=mat.pb,\n rule=altair.copyDistrib(),\n feather=altair.fthr())\n\ndef avgDX(*ss): return sum(s.dx() for s in ss)/len(ss)\ncntr.sabot_outer = gwiz.rLine(innerCaseRadius,cntr.slug.pnt.p12.z+0.25,cntr.slug.pnt.p9.z+0.25)\nseg.sabotJMin=sbiMin+seg.slug_fin_jMax\nseg.sabotJMin.slide('ss_slide')\nseg.sabot_outer = altair.segment(cntr.sabot_outer)\\\n .equalArcDistrib(len(seg.sabotJMin))\nseg.sabotIMax=altair.segment(None,cntr.slug.pnt.p9,cntr.sabot_outer[-1])\\\n .equalArcDistrib(avgDX(seg.sabot_outer,seg.sabotJMin))\n\ncntr.wad_aft = gwiz.zLine(3.5,innerCaseRadius)\ncntr.wad_fore = gwiz.zLine(cntr.slug.pnt.p14.z,cntr.slug.pnt.p14.r)\nseg.wad_fore = altair.segment(cntr.wad_fore).equalArcDistrib(slugDX)\nseg.wadJMax=seg.wad_fore+seg.slug_fin_iMin\nseg.wadJMax.slide('wf_slide')\nseg.wad_aft = altair.segment(cntr.wad_aft).equalArcDistrib(seg.wadJMax.dx())\nseg.slug_base.slide('wf_slide')\nwadDX=avgDX(seg.wad_aft,seg.wadJMax)\nseg.wadIMax=altair.segment(None,cntr.wad_aft[-1],\n cntr.sabot_outer[0]).equalArcDistrib(wadDX)\nseg.wadIMin=altair.segment(None,cntr.wad_aft[0],\n cntr.wad_fore[0]).equalArcDistrib(wadDX)\n\ncntr.cush_aft = gwiz.zLine(2.75,innerCaseRadius)\nseg.cush_aft = altair.segment(cntr.cush_aft).equalArcDistrib(seg.wad_aft)\nblk.cushion = block2(seg.cush_aft,seg.wad_aft,material=mat.void,rule=altair.squareDistrib())\nseg.wad_aft.slide('cw-slide')\n\ndef interp(a,b,t): return a*(1-t)+b*t\ngenWadR,genWadZ=int(20/slugDX),int(80/slugDX)\ncorner=cntr.slug.pnt.p12\ngens=[gwiz.rz((i+.5)*innerCaseRadius/genWadR,(3.5+corner.z)/2)\n for i in xrange(genWadR)]+\\\n [gwiz.rz((corner.r+innerCaseRadius)/2,\n interp(corner.z,cntr.slug.pnt.p9.z,(i+.5)/genWadZ))\n for i in xrange(genWadZ)]\n#for p in gens: pnt().add(p,\"gen\")\nblk.wad_sabot=altair.uFrame([seg.wad_fore+seg.slug_fin_iMin,seg.sabotJMin,\n seg.sabotIMax,seg.sabot_outer+seg.wadIMax,\n seg.wad_aft,seg.wadIMin],mat.plastic)\\\n .rule(altair.voronoiMesh(gens,1000)).block()\n\naltair.finalize()\naltair.writeGMV('mesh/shotshell-v.gmv')\naltair.writeX3D('mesh/shotshell-v.x3d')\n","repo_name":"laristra/portage","sub_path":"test_data/ingen_scripts/shotshell-v/shotshell.py","file_name":"shotshell.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"97"} +{"seq_id":"10201308498","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nimport glob\nimport os\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import UserMixin\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database/vark_db1.db'\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\nclass Subject(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), unique=True, nullable=False)\n chapters = db.relationship('Chapter', backref='subject', lazy=True)\n def __repr__(self):\n return f\"Subject('{self.name}')\"\n\nclass Chapter(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n number = db.Column(db.Integer, nullable=False)\n subject_id = db.Column(db.Integer, db.ForeignKey('subject.id'), nullable=False)\n\n topics = db.relationship('Topic', backref='chapter', lazy=True)\n\n def __repr__(self):\n return f\"Chapter('{self.name}', '{self.number}')\"\n\nclass Topic(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n number = db.Column(db.String, nullable=False)\n chapter_id = db.Column(db.Integer, db.ForeignKey('chapter.id'), nullable=False)\n contents = db.relationship('Content', backref='topic', lazy=True)\n\n def __repr__(self):\n return f\"Topic('{self.name}', '{self.number}')\"\n\nclass Content(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n file_name = db.Column(db.String(250), nullable=False)\n c_type = db.Column(db.String(10), nullable=False)\n topic_id = db.Column(db.Integer, db.ForeignKey('topic.id'), nullable=False)\n \n def __repr__(self):\n return f\"Content('{self.file_name}','{self.c_type}','{self.topic_id}')\"\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n gender = db.Column(db.String(10), unique=False, nullable=False)\n firstname = db.Column(db.String(20), unique=False, nullable=False)\n lastname = db.Column(db.String(20), unique=False, nullable=False)\n age = db.Column(db.String(10), unique=False, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n user_type = db.Column(db.String(120), unique=False, nullable=False)\n password = db.Column(db.String(60), nullable=False)\n def __repr__(self):\n return f\"User('{self.gender}', '{self.firstname}', '{self.lastname}', '{self.age}', '{self.email}')\"\n \ndef create_varkdb():\n\n# Table 1 Subject\n path = \"static/การสร้างสื่อดิจิทัล/\"\n\n# Table 2 Chapter\n # {1: chapter1 name, 2: chapter2 name, ...}\n chapter = dict()\n ch_id = 1\n for ch in glob.glob(path+\"*\"):\n ch_name = os.path.basename(os.path.normpath(ch))\n chapter[ch_id] = ch_name\n ch_id += 1\n\n# Table 3 Topic\n # [{Pretest : name, 1.1 : name}]\n topic = []\n for ch in glob.glob(path+\"*\"):\n ch_topic = dict()\n for tp in glob.glob(ch+\"/*\"):\n tp_folder = os.path.basename(os.path.normpath(tp))\n tp_folder = tp_folder.split(\"_\")[1]\n\n tp_id = \"\"\n tp_name = \"\"\n if tp_folder == \"แบบทดสอบก่อนเรียน\":\n tp_id = \"P\"\n tp_name = tp_folder\n elif tp_folder == \"แบบทดสอบหลังเรียน\":\n tp_id = \"T\"\n tp_name = tp_folder\n else:\n \n tp_id = tp_folder.split()[0]\n topic_name = tp_folder.split()[1:]\n for pn in range(len(topic_name)):\n tp_name += topic_name[pn]\n if pn+1 < len(topic_name):\n tp_name += \" \"\n\n ch_topic[tp_id] = tp_name\n\n topic.append(ch_topic)\n \n# Table 4 Content\n # [{'P': path},\n # {'V' : path, 'A' : path, 'R': path , 'K' : path, 'E' : path},\n # {'T' : path },\n # ]\n content = []\n for ch in glob.glob(path+\"*\"):\n for tp in glob.glob(ch+\"/*\"):\n sub_content = dict()\n k = []\n for cont in glob.glob(tp+\"/*\"):\n file_name = os.path.basename(os.path.normpath(cont))\n file_path = cont.replace(\"\\\\\",\"/\")\n if 'Ans' not in file_name:\n if 'PreCh' in file_name:\n sub_content['P'] = file_path\n elif 'V' in file_name:\n sub_content['V'] = file_path\n elif 'A' in file_name:\n sub_content['A'] = file_path\n elif 'R' in file_name:\n sub_content['R'] = file_path\n elif 'K' in file_name:\n k.append(file_path)\n elif 'Exercise' in file_name:\n for ex in glob.glob(cont+\"/*\"):\n ex_name = os.path.basename(os.path.normpath(ex))\n if 'Exer' in ex_name and 'Ans' not in ex_name:\n sub_content['E'] = ex.replace(\"\\\\\",\"/\")\n elif 'PostCh' in file_name:\n sub_content['T'] = file_path\n\n # sorted swf file. \n if k:\n sub_content['K'] = sorted(k)\n\n # add content of each topic to list.\n type_sequence = ['V', 'A', 'R', 'K', 'E']\n if 'P' in sub_content or 'T' in sub_content:\n content.append(sub_content)\n else:\n sorted_content = dict()\n for seq in type_sequence:\n sorted_content[seq] = sub_content[seq]\n content.append(sorted_content)\n\n\n# Commit to Database\n\n # Table 1 Subject\n db.create_all()\n subject = Subject(name=\"การสร้างสื่อดิจิทัล\")\n db.session.add(subject)\n\n # Table 2 Chapter\n subject = Subject.query.filter_by(name='การสร้างสื่อดิจิทัล').first()\n for c in chapter:\n db.session.add(Chapter(name=chapter[c], number=c, subject_id=subject.id))\n \n # Table 3 Topic\n chapter = Chapter.query.all()\n index = 0\n for c in chapter:\n for t in topic[index]:\n db.session.add(Topic(name=topic[index][t], number=t, chapter_id=c.id))\n index += 1\n \n # Table 4 Content\n topic = Topic.query.all()\n index = 0\n for t in topic:\n for c in content[index]:\n if c == 'K':\n for part in content[index][c]:\n db.session.add(Content(file_name=part, c_type=c, topic_id=t.id))\n else:\n \n db.session.add(Content(file_name=content[index][c], c_type=c, topic_id=t.id))\n index += 1\n \n print(Content.query.all())\n\n # Table 5 User\n hashed_password = bcrypt.generate_password_hash('Am-1234').decode('utf-8')\n user = User(gender='M', firstname='Admin', \n lastname='Admin', age='21', email='vark_admin@gmail.com', password=hashed_password, user_type='Admin')\n db.session.add(user)\n\n db.session.commit()\n\ndef get_content():\n db.create_all()\n subject = Subject.query.first()\n chapter = Chapter.query.all()\n topic = Topic.query.all()\n chapters = dict()\n topics = []\n contents = []\n for c in chapter:\n chapters[c.number] = c.name\n dict_top = dict()\n for t in c.topics:\n dict_top[t.number] = t.name\n dict_cont = dict()\n k = 1\n for ct in t.contents:\n if ct.c_type == 'K':\n dict_cont[ct.c_type+str(k)] = ct.file_name\n k += 1\n else:\n dict_cont[ct.c_type] = ct.file_name\n contents.append(dict_cont)\n \n topics.append(dict_top)\n\n return chapters, topics, contents\n\n#create_varkdb()\n\ndef update_content_path():\n for c in Content.query.all():\n c.file_name = c.file_name.replace('การสร้างสื่อดิจิทัล', 'digital_media_creation')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 1 รู้จักกับการตัดต่อภาพยนตร์', 'chapter1')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 2 ขั้นตอนการทำงานในงานตัดต่อวิดีโอ', 'chapter2')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 3 การทำงานกับโปรเจ็กต์', 'chapter3')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 4 จัดการซีเควนซืและการใช้งานคลิป', 'chapter4')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 5 การตัดต่อบน Timeline', 'chapter5')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 6 การสร้างทรานซิชัน เอฟเฟกต์และภาพเคลื่อนไหว', 'chapter6')\n db.session.commit()\n for c in Content.query.all():\n c.file_name = c.file_name.replace('บทที่ 7 การใส่เสียงและการเผยแพร่ผลงาน', 'chapter7')\n db.session.commit()\n \n for c in Content.query.all():\n if 'แบบทดสอบก่อนเรียน' in c.file_name.split('/')[3]:\n c.file_name = c.file_name.replace('แบบทดสอบก่อนเรียน', 'Pretest')\n elif 'แบบทดสอบหลังเรียน' in c.file_name.split('/')[3]:\n c.file_name = c.file_name.replace('แบบทดสอบหลังเรียน', 'Posttest')\n else:\n c.file_name = c.file_name.replace(c.file_name.split('/')[3], c.file_name.split('/')[3].split(' ')[0])\n db.session.commit()\n for c in Content.query.all():\n if 'Exercise' in c.file_name.split('/')[4]:\n c.file_name = c.file_name.replace(c.file_name.split('/')[4], c.file_name.split('/')[4].replace(' ', '_'))\n db.session.commit()\n for c in Content.query.all():\n print(c.file_name)\n","repo_name":"FITM-KMUTNB/VARK_Learning","sub_path":"varkapp/create_varkDB.py","file_name":"create_varkDB.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"26156580787","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.contrib.auth.forms import UserCreationForm\nfrom rest_framework import status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.test import APITestCase\nfrom snippets.models import Snippet, User\nfrom rest_framework import serializers\nfrom snippets.test_utils import run_pyflakes_for_package, run_pep8_for_package\n\n\nclass Setup(TestCase):\n def setUp(self):\n User.objects.create_user(username='test--user')\n User.objects.create_user(username='second--user')\n Snippet.objects.create(title='Snippet Title', code='print hello world', owner_id='1')\n Snippet.objects.create(title='Second Snippet Title', code='print world hellow', owner_id='2')\n\n\nclass BasicMathTestCase(TestCase):\n def test_math(self):\n a = 1\n b = 1\n self.assertEqual(a+b, 2)\n\n def test_failing_case(self):\n a = 1\n b = 0\n self.assertEqual(a+b, 1)\n\n\n# class AccountTests(APITestCase):\n# def test_create_account(self):\n# \"\"\"\n# create a new account object\n# \"\"\"\n# url = reverse('user-list')\n# data = {'username': 'TestAccount'}\n# response = self.client.post(url, data, format='json')\n# self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n# # self.assertEqual(response.data, data)\n#\n# def test_create_account_username_error(self):\n# \"\"\"\n# error when using space in username\n# \"\"\"\n# url = reverse('user-list')\n# data = {'username': 'Test Account'}\n# response = self.client.post(url, data, format='json')\n# self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n#\n# def test_clean_username_exception(self):\n# # Create a user to test it is already taken\n# User.objects.create_user(username='test--user')\n#\n# url = reverse('user-list')\n# data = {'username': 'test--user'}\n# response = self.client.post(url, data, format='json')\n# self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n#\n# # setup form for testing\n# form = UserCreationForm()\n# form.cleaned_data = data\n#\n# # use a context manager to watch for validation error\n# with self.assertRaises(serializers.DjangoValidationError):\n# form.clean_username()\n#\n\nclass APIResponseTests(APITestCase):\n def setUp(self):\n User.objects.create_user(username='test--user')\n self.snippet = Snippet.objects.create(title='Snippet Title', code='print hello world', owner_id='1' )\n\n def test_users_response(self):\n response = self.client.get('/users/1/')\n data = {'username': 'test--user'}\n\n def extractDictAFromB(A, B):\n return dict([(k, B[k]) for k in A.keys() if k in B.keys()])\n self.assertEqual(data, extractDictAFromB(data, response.data))\n # self.assertDictContainsSubset({'username': 'test--user'}, response.data)\n\n\nclass ModelTestCase(TestCase):\n def setUp(self):\n self.snippet = Snippet.objects.create(title='Snippet Title', code='print hello world', owner_id='1' )\n\n def test_create_snippet(self):\n \"\"\"\n Test that a snippet is created\n \"\"\"\n self.assertEqual(self.snippet.title, 'Snippet Title')\n self.assertEqual(self.snippet.code, 'print hello world')\n self.assertEqual(self.snippet.owner_id, '1')\n\n def test_create_snippet_highlight_created(self):\n \"\"\"\n test the snippet highlight is created\n html line 82\n \"\"\"\n html_line_82 = '

{}

'.format(self.snippet.title)\n self.assertIn(html_line_82, self.snippet.highlighted)\n\n def test_snippet_detail(self):\n testsnippet = Snippet.objects.all()[0]\n url = '/snippets/{}/'.format(str(testsnippet.id))\n response = self.client.get(url)\n self.assertContains(response, testsnippet.title)\n\n\nclass SyntaxTest(TestCase):\n def test_syntax(self):\n \"\"\"\n Run pyflakes/pep8 across the code base to check for potential errors.\n \"\"\"\n packages = ['snippets']\n warnings = []\n # Eventually should use flake8 instead so we can ignore specific lines via a comment\n for package in packages:\n warnings.extend(run_pyflakes_for_package(package, extra_ignore=(\"_settings\",)))\n warnings.extend(run_pep8_for_package(package, extra_ignore=(\"_settings\",)))\n if warnings:\n self.fail(\"{0} Syntax warnings!\\n\\n{1}\".format(len(warnings), \"\\n\".join(warnings)))","repo_name":"kremerben/restframe","sub_path":"snippets/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"74653158399","text":"straight = [(-1,0), (0,1), (1,0), (0,-1)]\nback = [(1,0), (0,-1), (-1,0), (0,1)]\nleft = [(0,-1), (-1,0), (0,1), (1,0)]\n\nonly_rotate = 0\nnum_clean = 0\n\ndef clean(y, x) :\n global num_clean\n room[y][x] = 2\n num_clean += 1\n return room\n\ndef go_straight(y, x, direction) :\n y, x = y + straight[direction][0], x + straight[direction][1]\n return y,x\n\ndef go_back(y, x, direction) :\n y, x= y + back[direction][0], x + back[direction][1]\n return y, x\n\ndef turn_left(direction) :\n direction = (direction+3) % 4\n return direction\n\nn, m = map(int, input().split())\ny, x, direction = map(int, input().split())\nroom = [list(map(int, input().split())) for _ in range(n)]\n\nclean(y, x)\n\nwhile True :\n if only_rotate == 4 :\n if room[y+back[direction][0]][x+back[direction][1]] == 1 :\n break\n else :\n y, x = go_back(y, x, direction)\n only_rotate = 0\n\n elif room[y+left[direction][0]][x+left[direction][1]] == 0 :\n direction = turn_left(direction)\n y, x = go_straight(y, x, direction)\n room = clean(y, x)\n only_rotate = 0\n \n elif room[y+left[direction][0]][x+left[direction][1]] != 0 :\n direction = turn_left(direction)\n only_rotate += 1\n\nprint(num_clean)","repo_name":"jeonlego012/coding","sub_path":"robot_cleaner.py","file_name":"robot_cleaner.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"22797653104","text":"from datetime import datetime\n\n# a Rain contains a date, max temperature, min temperature, average temperature,\n# and precipitation\n# - Rain(date, max_temp, min_temp, avg_temp, precipitation)\n\nclass Rain:\n\n def __init__(self, date, max_temp, min_temp, avg_temp, precipitation):\n self.date = datetime.strptime(date, \"%m/%d/%Y\") # date\n self.max_temp = data_checker(max_temp) # number\n self.min_temp = data_checker(min_temp) # number\n self.avg_temp = data_checker(avg_temp) # number\n # self.precipitation = str(precipitation)\n self.precipitation = data_checker(precipitation) # number\n\n def __eq__(self, other):\n return type(other) == Rain and \\\n self.date == other.date and \\\n self.max_temp == other.max_temp and \\\n self.min_temp == other.min_temp and \\\n self.avg_temp == other.avg_temp and \\\n self.precipitation == other.precipitation \n\n def __repr__(self):\n return \"{!r} - {!r}\\n\".format(self.date, self.precipitation)\n\n\n\n# .csv -> List\n# Reads the rain .cvs file then turn all data into Rain objects.\ndef read_rain_from_file(filename):\n inFile = open(filename, \"r\")\n rains = []\n z = inFile.readlines()\n inFile.close()\n for i in range(1, len(z)):\n info = []\n info.append(z[i].split(\",\"))\n n = [info[0][0], info[0][1], info[0][2], info[0][3], info[0][4].strip('\"\\n\"')]\n new_info = Rain(*n)\n rains.append(new_info)\n return rains\n\n# str -> str or float\n# checks if data is float or str and returns the correct data type\ndef data_checker(x):\n try: \n return float(x)\n except ValueError:\n return x\n\ndef data_filter(rains, key, value):\n new_rains = []\n for rain in rains:\n if key == \"month\":\n if rain.date.month == value:\n new_rains.append(rain)\n elif key == \"year\":\n if rain.date.year == value:\n new_rains.append(rain)\n elif key == \"day\":\n if rain.date.day == value:\n new_rains.append(rain)\n elif key == \"date\":\n if rain.date == datetime.strptime(value, \"%m/%d/%Y\"):\n new_rains.append(rain)\n elif key == \"temp\":\n if rain.max_temp == float(value) or rain.min_temp == float(value)\\\n or rain.avg_temp == float(value):\n new_rains.append(rain)\n return new_rains\n\ndef precipitation_filter(rains, key, value):\n new_rains = []\n value = float(value)\n for rain in rains:\n if rain.precipitation != \"T\" and rain.precipitation != \"M\":\n if key == \">\":\n if rain.precipitation > value:\n new_rains.append(rain)\n elif key == \">=\":\n if rain.precipitation >= value:\n new_rains.append(rain)\n elif key == \"<\":\n if rain.precipitation < value:\n new_rains.append(rain)\n elif key == \"<=\":\n if rain.precipitation <= value:\n new_rains.append(rain)\n elif key == \"=\":\n if rain.precipitation == value:\n new_rains.append(rain)\n return new_rains\n\ndef counter(rains):\n return len(rains)\n\nrains = read_rain_from_file(\"testfile.csv\")\n# a = precipitation_filter(rains, \">=\", 0.25)\n# b = data_filter(a, \"month\", 5)\n# print(b)\n# print(counter(b))\nprint(rains)","repo_name":"Krazyman/MA431","sub_path":"funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"31062659277","text":"\nimport os\nimport csv\nimport pymongo\nfrom pymongo import MongoClient\nimport pandas as pd\nimport json\nimport sys\nimport numpy as np\nfrom datetime import datetime\n\n\nclass central_query_runtime():\n def __init__(self):\n self.client_mongo = MongoClient('mongodb://localhost:27017/')\n self.db = self.client_mongo.central_task_runtime_profiler\n\n def query_task(self,task_name,file_name):\n logging_droplet = self.db['droplet_runtime']\n #print(logging_droplet)\n print('************')\n print(task_name)\n print(file_name)\n input_timestamp = list(logging_droplet.find({\"Task Name\": task_name,'File Name':file_name,\"Type\":\"created_input\"}).sort([('Time', pymongo.ASCENDING)]))\n execution_timestamp = list(logging_droplet.find({\"Task Name\": task_name,'File Name':file_name,\"Type\":\"execution_time\"}).sort([('Time', pymongo.ASCENDING)]))\n finished_timestamp = list(logging_droplet.find({\"Task Name\": task_name,'File Name':file_name,\"Type\":\"finished_time\"}).sort([('Time', pymongo.ASCENDING)]))\n FMT = '%Y-%m-%d %H:%M:%S.%f'\n # print(input_timestamp)\n # print(execution_timestamp)\n # print(finished_timestamp)\n num_file = int(len(input_timestamp)/3)\n print(len(input_timestamp))\n print(num_file)\n try:\n if num_file == 1:#only_1_file\n input_info = datetime.strptime(input_timestamp[0].get(\"Time\"),FMT)\n execution_info = datetime.strptime(execution_timestamp[0].get(\"Time\"),FMT)\n finished_info = datetime.strptime(finished_timestamp[0].get(\"Time\"),FMT)\n # print('1')\n # print(input_info)\n # print(execution_info)\n # print(finished_info)\n else: #example: task 4 has 2 inputs from task 2, task 3\n input_info = datetime.strptime(input_timestamp[0].get(\"Time\"),FMT)\n execution_info = datetime.strptime(execution_timestamp[0].get(\"Time\"),FMT)\n finished_info = datetime.strptime(finished_timestamp[-1].get(\"Time\"),FMT)\n # print('2')\n # print(input_info)\n # print(execution_info)\n # print(finished_info)\n\n duration_time = finished_info - execution_info\n waiting_time = execution_info - input_info\n # output_size = [str(x.get(\"File Size\")) for x in finished_timestamp]\n # output_size = \",\".join(output_size)\n output_size = [x.get(\"File Size\") for x in finished_timestamp]\n output_size = sum(output_size)\n node_IP = input_timestamp[-1].get(\"Node IP\")\n return node_IP,str(duration_time.total_seconds()), str(waiting_time.total_seconds()), str(output_size)\n except StopIteration:\n print('No valid Task/File')\n return -1\n\n\n def query_check_runtime(self,task_name,file_name):\n node_IP,duration_time, waiting_time, output_size = self.query_task(task_name,file_name)\n print('The task is performed at node '+ node_IP)\n print('The duration time is '+str(duration_time)+ \" [sec] ; the waiting time is \"+ str(waiting_time)+ \" [sec]\")\n print('The output size is ' + str(output_size)+ ' [Kbits]')\n\n def query_write_profile(self,file_name):\n print('Export runtime information to runtime_profile.txt as input file for the scheduler')\n logging_droplet = self.db['droplet_runtime']\n task_list = logging_droplet.find().distinct(\"Task Name\")\n output_file='runtime_profile_%s.txt'%(file_name)\n f = open(output_file, 'w')\n f.write('task\\ttime (sec)\\toutput_data (Kbit)\\n')\n for task in task_list:\n print(task)\n node_IP,duration_time, waiting_time, output_size = self.query_task(task,file_name)\n line = task+ '\\t'+str(duration_time)+'\\t'+ str(output_size)+'\\n'\n f.write(line)\n f.close()\n\n def query_write_profile_total(self):\n print('Export total runtime information for evaluation')\n logging_droplet = self.db['scheduler_runtime']\n file_list = logging_droplet.find().distinct(\"File Name\")\n output_file='runtime_profile_total.txt'\n f = open(output_file, 'w')\n f.write('file name\\ttotal duration (sec)\\n')\n for file_name in file_list:\n print(file_name)\n input_timestamp = list(logging_droplet.find({'File Name':file_name,\"Type\":\"created_input\"}))\n finished_timestamp = list(logging_droplet.find({'File Name':file_name,\"Type\":\"finished_time\"}))\n FMT = '%Y-%m-%d %H:%M:%S.%f'\n input_info = datetime.strptime(input_timestamp[0].get(\"Time\"),FMT)\n finished_info = datetime.strptime(finished_timestamp[0].get(\"Time\"),FMT)\n duration_time = finished_info - input_info\n line = file_name+ '\\t'+str(duration_time)+'\\n'\n f.write(line)\n f.close()\n\n\nif __name__ == '__main__':\n d = central_query_runtime()\n if len(sys.argv)>3:\n print('Option 1 - Specific task & input file: python3 runSQuery [task_name] [input_task]')\n print('Option 2 - Write runtime profiler for a specific file: python3 runSQuery [input_task]')\n print('Option 3 - Write total runtime profiler: python3 runSQuery total')\n exit()\n if len(sys.argv)==1:\n d.query_write_profile_total()\n elif len(sys.argv)==2:\n file_name = sys.argv[1]\n d.query_write_profile(file_name)\n else:\n task_name = sys.argv[1]\n file_name = sys.argv[2]\n d.query_check_runtime(task_name,file_name)\n\n\n","repo_name":"ANRGUSC/CIRCE","sub_path":"centralized_scheduler_with_profiler/runSQuery.py","file_name":"runSQuery.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"97"} +{"seq_id":"71751864319","text":"from streamlit_embedcode import (\n codepen_snippet,\n github_gist,\n gitlab_snippet,\n ideone_snippet,\n pastebin_snippet,\n tagmycode_snippet,\n)\n\nfrom .. import extra\n\ncodepen_snippet = extra(codepen_snippet)\ngithub_gist = extra(github_gist)\ngitlab_snippet = extra(gitlab_snippet)\nideone_snippet = extra(ideone_snippet)\npastebin_snippet = extra(pastebin_snippet)\ntagmycode_snippet = extra(tagmycode_snippet)\n\n\ndef example_github():\n github_gist(\n \"https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087/\",\n width=700,\n height=400,\n )\n\n\ndef example_gitlab():\n gitlab_snippet(\n \"https://gitlab.com/snippets/1995463\",\n width=700,\n height=200,\n )\n\n\ndef example_codepen(codepen_snippet):\n codepen_snippet(\n \"https://codepen.io/randyzwitch/pen/GRrYrBw\",\n width=700,\n height=400,\n )\n\n\ndef example_ideone(ideone_snippet):\n ideone_snippet(\n \"https://ideone.com/5V7XZ6\",\n width=700,\n height=400,\n )\n\n\ndef example_pastebin(pastebin_snippet):\n pastebin_snippet(\n \"https://pastebin.com/8QZ7YjYD\",\n width=700,\n height=400,\n )\n\n\ndef example_tagmycode(tagmycode_snippet):\n tagmycode_snippet(\n \"https://tagmycode.com/snippet/1038\",\n width=700,\n height=400,\n )\n\n\n__title__ = \"Embed code\"\n__desc__ = \"Embed code from various platforms (Gists, snippets...)\"\n__icon__ = \"📋\"\n__examples__ = {\n example_github: [github_gist],\n example_gitlab: [gitlab_snippet],\n}\n__author__ = \"randyzwitch\"\n__github_repo__ = \"randyzwitch/streamlit-embedcode\"\n__pypi_name__ = \"streamlit-embedcode\"\n__package_name__ = \"streamlit_embedcode\"\n__experimental_playground__ = False\n","repo_name":"arnaudmiribel/streamlit-extras","sub_path":"src/streamlit_extras/embed_code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"97"} +{"seq_id":"29197434752","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n**QWidget_component.py**\n\n**Platform:**\n Windows, Linux, Mac Os X.\n\n**Description:**\n Defines the :class:`QWidgetComponent` class.\n\n**Others:**\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom PyQt4.QtCore import pyqtSignal\n\nimport foundations.exceptions\nimport foundations.verbose\nimport foundations.ui.common\n\n__author__ = \"Thomas Mansencal\"\n__copyright__ = \"Copyright (C) 2008 - 2014 - Thomas Mansencal\"\n__license__ = \"GPL V3.0 - http://www.gnu.org/licenses/\"\n__maintainer__ = \"Thomas Mansencal\"\n__email__ = \"thomas.mansencal@gmail.com\"\n__status__ = \"Production\"\n\n__all__ = [\"LOGGER\", \"QWidgetComponentFactory\"]\n\nLOGGER = foundations.verbose.install_logger()\n\n\ndef QWidgetComponentFactory(ui_file=None, *args, **kwargs):\n \"\"\"\n Defines a class factory creating :class:`QWidgetComponent` classes using given ui file.\n\n :param ui_file: Ui file.\n :type ui_file: unicode\n :param \\*args: Arguments.\n :type \\*args: \\*\n :param \\*\\*kwargs: Keywords arguments.\n :type \\*\\*kwargs: \\*\\*\n :return: QWidgetComponent class.\n :rtype: QWidgetComponent\n \"\"\"\n\n class QWidgetComponent(foundations.ui.common.QWidget_factory(ui_file=ui_file)):\n\n \"\"\"\n Defines the base class for **Manager** package QWidget Components.\n \"\"\"\n\n component_activated = pyqtSignal()\n \"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component is activated.\n \"\"\"\n\n component_deactivated = pyqtSignal()\n \"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component is deactivated.\n \"\"\"\n\n component_initialized_ui = pyqtSignal()\n \"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component ui is initialized.\n \"\"\"\n\n component_uninitialized_ui = pyqtSignal()\n \"\"\"\n This signal is emited by the :class:`QObjectComponent` class when the Component ui is uninitialized.\n \"\"\"\n\n def __init__(self, parent=None, name=None, *args, **kwargs):\n \"\"\"\n Initializes the class.\n\n :param parent: Object parent.\n :type parent: QObject\n :param name: Component name.\n :type name: unicode\n :param \\*args: Arguments.\n :type \\*args: \\*\n :param \\*\\*kwargs: Keywords arguments.\n :type \\*\\*kwargs: \\*\\*\n \"\"\"\n\n LOGGER.debug(\"> Initializing '{0}()' class.\".format(self.__class__.__name__))\n\n super(QWidgetComponent, self).__init__(parent, *args, **kwargs)\n\n # --- Setting class attributes. ---\n self.__name = None\n self.name = name\n\n self.__activated = False\n self.__initialized_ui = False\n self.__deactivatable = True\n\n @property\n def name(self):\n \"\"\"\n Property for **self.__name** attribute.\n\n :return: self.__name.\n :rtype: unicode\n \"\"\"\n\n return self.__name\n\n @name.setter\n @foundations.exceptions.handle_exceptions(AssertionError)\n def name(self, value):\n \"\"\"\n Setter for **self.__name** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"\n\n if value is not None:\n assert type(value) is unicode, \"'{0}' attribute: '{1}' type is not 'unicode'!\".format(\n \"name\", value)\n self.__name = value\n\n @name.deleter\n @foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)\n def name(self):\n \"\"\"\n Deleter for **self.__name** attribute.\n \"\"\"\n\n raise foundations.exceptions.ProgrammingError(\"{0} | '{1}' attribute is not deletable!\".format(\n self.__class__.__name__, \"name\"))\n\n @property\n def activated(self):\n \"\"\"\n Property for **self.__activated** attribute.\n\n :return: self.__activated.\n :rtype: unicode\n \"\"\"\n\n return self.__activated\n\n @activated.setter\n @foundations.exceptions.handle_exceptions(AssertionError)\n def activated(self, value):\n \"\"\"\n Setter for **self.__activated** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"\n\n if value is not None:\n assert type(value) is bool, \"'{0}' attribute: '{1}' type is not 'bool'!\".format(\"activated\", value)\n self.component_activated.emit() if value else self.component_deactivated.emit()\n self.__activated = value\n\n @activated.deleter\n @foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)\n def activated(self):\n \"\"\"\n Deleter for **self.__activated** attribute.\n \"\"\"\n\n raise foundations.exceptions.ProgrammingError(\"{0} | '{1}' attribute is not deletable!\".format(\n self.__class__.__name__, \"activated\"))\n\n @property\n def initialized_ui(self):\n \"\"\"\n Property for **self.__initialized_ui** attribute.\n\n :return: self.__initialized_ui.\n :rtype: bool\n \"\"\"\n\n return self.__initialized_ui\n\n @initialized_ui.setter\n @foundations.exceptions.handle_exceptions(AssertionError)\n def initialized_ui(self, value):\n \"\"\"\n Setter for **self.__initialized_ui** attribute.\n\n :param value: Attribute value.\n :type value: bool\n \"\"\"\n\n if value is not None:\n assert type(value) is bool, \"'{0}' attribute: '{1}' type is not 'bool'!\".format(\n \"initialized_ui\", value)\n self.component_initialized_ui.emit() if value else self.component_uninitialized_ui.emit()\n self.__initialized_ui = value\n\n @initialized_ui.deleter\n @foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)\n def initialized_ui(self):\n \"\"\"\n Deleter for **self.__initialized_ui** attribute.\n \"\"\"\n\n raise foundations.exceptions.ProgrammingError(\n \"{0} | '{1}' attribute is not deletable!\".format(self.__class__.__name__, \"initialized_ui\"))\n\n @property\n def deactivatable(self):\n \"\"\"\n Property for **self.__deactivatable** attribute.\n\n :return: self.__deactivatable.\n :rtype: unicode\n \"\"\"\n\n return self.__deactivatable\n\n @deactivatable.setter\n @foundations.exceptions.handle_exceptions(AssertionError)\n def deactivatable(self, value):\n \"\"\"\n Setter for **self.__deactivatable** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"\n\n if value is not None:\n assert type(value) is bool, \"'{0}' attribute: '{1}' type is not 'bool'!\".format(\"deactivatable\", value)\n self.__deactivatable = value\n\n @deactivatable.deleter\n @foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)\n def deactivatable(self):\n \"\"\"\n Deleter for **self.__deactivatable** attribute.\n \"\"\"\n\n raise foundations.exceptions.ProgrammingError(\"{0} | '{1}' attribute is not deletable!\".format(\n self.__class__.__name__, \"deactivatable\"))\n\n @foundations.exceptions.handle_exceptions(NotImplementedError)\n def activate(self):\n \"\"\"\n Sets Component activation state.\n\n :return: Method success.\n :rtype: bool\n \"\"\"\n\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.activate.__name__, self.__class__.__name__))\n\n @foundations.exceptions.handle_exceptions(NotImplementedError)\n def deactivate(self):\n \"\"\"\n Unsets Component activation state.\n\n :return: Method success.\n :rtype: bool\n \"\"\"\n\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))\n\n @foundations.exceptions.handle_exceptions(NotImplementedError)\n def initialize_ui(self):\n \"\"\"\n Initializes the Component ui.\n \"\"\"\n\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))\n\n @foundations.exceptions.handle_exceptions(NotImplementedError)\n def add_widget(self):\n \"\"\"\n Adds the Component Widget ui.\n \"\"\"\n\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))\n\n @foundations.exceptions.handle_exceptions(NotImplementedError)\n def remove_widget(self):\n \"\"\"\n Removes the Component Widget ui.\n \"\"\"\n\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))\n\n @foundations.exceptions.handle_exceptions(NotImplementedError)\n def uninitialize_ui(self):\n \"\"\"\n Uninitializes the Component ui.\n \"\"\"\n\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.deactivate.__name__, self.__class__.__name__))\n\n return QWidgetComponent\n","repo_name":"KelSolaar/Manager","sub_path":"manager/QWidget_component.py","file_name":"QWidget_component.py","file_ext":"py","file_size_in_byte":10055,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"23817951230","text":"from timer import timeit\n\n@timeit\ndef threeSum_1(nums):\n # all three indices must not be equal\n # nums[i] + nums[j] + nums[k] = 0 \n ans = set()\n nums_dict = {}\n for i, num in enumerate(nums):\n nums_dict[num] = i\n\n for i in range(len(nums) - 2):\n for j in range(i + 1, len(nums) - 1):\n nums_k = -(nums[i] + nums[j])\n if nums_k in nums_dict:\n if nums_dict[nums_k] not in (i, j): \n lis = [nums[i], nums[j], nums_k]\n lis.sort()\n ans.add(tuple(lis))\n return ans\n\n\n@timeit\ndef threeSum_2(nums):\n nums.sort()\n ans = set()\n i, j, k = 0, 1, 2\n while True:\n if nums[i] >= 0:\n break\n \n if nums[i] + nums[j] + nums[k] == 0:\n ans.add((nums[i], nums[j], nums[k]))\n k += 1\n if (k == len(nums)):\n j += 1\n k = j + 1\n if (j == len(nums) - 1):\n i += 1\n j = i + 1\n k = j + 1\n return ans\n\nsum = 0\nfor i in range(500):\n sum += i\n\nprint(threeSum_1([-1,0,1,2,-1,-4]))\nprint(threeSum_2([-1,0,1,2,-1,-4]))","repo_name":"sivansh11/leetcode-solution","sub_path":"q15.py","file_name":"q15.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"18543907685","text":"from flask import Flask,url_for\n\n#将当前的模块构建成flask应用\n#当前flask应用构建完成后就可以接收请求并给出响应\napp=Flask(__name__)\n\n# @app.route('/')#匹配当前服务器的根路径\n# @app.route('/index')#可以用多个路由装饰器\n# def index_view():\n# return 'this is the first index of flask'\n\n@app.route('/')\n@app.route('/index')\n@app.route('/')\ndef show_x(number=1): #视图函数名不能重复\n if number!=1 :\n return '这是flask的第%s页'%number\n return '这是flask的首页'\n\n@app.route('/mil')\ndef mil_vire():\n return 'this is web about mil'\n\n@app.route('/show//')\ndef show_view(name,age):\n # return 'welcome %s'%name\n print(type(name))\n print(type(age))\n myurl = url_for('show_view',name='pp',age=12)\n # return 'name:%s,age:%d'%(name,age)\n return '当前函数的访问路径是%s'%myurl\n\n#在页面上显示日期\n@app.route('/birthday///')\ndef show_birthday(year,month,day):\n return \"your birthday is %s-%s-%s\"%(year,month,day)\n\nif __name__=='__main__':\n # templates(host = None,port = None,debug = None)\n app.run(debug=True) #http://127.0.0.1:5000/当前参数的默认地址\n\n","repo_name":"SpringSnowB/All-file","sub_path":"m3/d11/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"10530386926","text":"from flask import Flask, request\nfrom flask_cors import CORS\nfrom flask_restful import Resource, Api\nfrom decouple import config\nimport openai\nimport os\n\nopenai.organization = \"org-9BDdApfdksfAJ2XsyNYvGTjI\"\nopenai.api_key = config('OPENAI_API_KEY')\nopenai.Model.list()\n\n#App object\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\nclass Lessons(Resource):\n\n def completion(self, params):\n return openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\n 'role': 'user',\n 'content': f'Answer the following query with the string \"yes\" or \"no\" (no punctuation), if \"no\", also provide an explaination:\\n\\nQ: Is this an acceptable translation (Mandarin : English) An acceptable translation is one that conveys the same meaning as the original sentence, and may not be a word-for-word translation : \"{params[\"pinyin\"]}\" : \"{params[\"english\"]}\"'\n }\n ]\n )\n\n def get(self):\n return 'hello :D'\n \n def post(self):\n data = request.get_json()\n return self.completion(data)\n\napi.add_resource(Lessons, '/')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"HectorC99/mandarin-learning-app","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"41578369647","text":"from django.conf.urls.defaults import *\nfrom django.views.generic import DetailView, ListView\nfrom talks.models import Talk\nfrom polls.models import Poll\nfrom polls.models import Comment\n\nurlpatterns = patterns('',\n (r'^$',\n ListView.as_view(\n queryset=Talk.objects.order_by('-pub_date'),\n context_object_name='latest_talk_list',\n template_name='talks/index.html')),\n (r'^(?P\\d+)/$',\n DetailView.as_view(\n model=Talk,\n template_name='talks/detail.html')),\n (r'^(?P\\d+)/comments/$',\n DetailView.as_view(\n model=Talk,\n template_name='talks/comments.html')),\n )\n","repo_name":"FND/Eventbewertungen","sub_path":"talks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"38553808889","text":"#save list of indices where epistatic strength is smaller\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nnum_samples = 1000\n\n# l = []\n# for i in range(50000):\n# with open(\"../../test_training/commands/command_stronger_\" + str(i), \"r\") as f:\n# s = f.readlines()[0].split()\n# if float(s[-1]) > float(s[-4]):\n# l.append(i)\n\n# with open(\"smaller_epistatic.txt\", \"w\") as f:\n# f.write(str(l))\n\n# exit()\nfor i in range(87500):\n with open(\"../../test_training/commands/command_stronger_\" + str(i), \"r\") as f:\n s = f.readlines()[0].split()\n\n if int(s[3]) > 550:\n print(i)\n exit()\n\n\nl = []\n\ny = []\nx = []\nidx = list(range(87500))\nrandom.shuffle(idx)\nfor p,i in enumerate(idx):\n\n sampling_file = \"../../test_training/sampled_genotypes/sample_stronger_\" + str(i)\n try:\n with open(sampling_file, \"r\") as f:\n lines = f.readlines()\n except FileNotFoundError:\n continue\n\n X = [[float(l) for l in line[:-1]] for line in lines]\n\n X = np.array(X) - 1\n with open(\"../../test_training/commands/command_stronger_\" + str(i), \"r\") as f:\n s = f.readlines()[0].split()\n\n X = X.sum(axis=0)\n\n points = [float(s[6]), float(s[7])]\n inds = [round(num_samples*point - 0.5) for point in points]\n regular_site = round(num_samples*float(s[10]) - 0.5)\n\n # x.append(float(s[8]) + float(s[11]))\n # y.append(min(X))\n # y.append(min(X[inds]) - X[regular_site])\n\n if min(X) > -85:\n l.append(i)\n\n if p % 1000 == 0:\n print(p)\n # print(s)\n # else: \n # print(s)\n\nwith open(\"smaller_strength.txt\", \"w\") as f:\n f.write(str(l))\n\n# plt.scatter(x,y)\n# plt.show()\n\n\nprint(len(l))\n\n\n\n","repo_name":"genicos/learned_epistasis","sub_path":"simulate_examples/training4/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"13930159521","text":"from flask import Flask\nfrom flask import g\nfrom flask import send_file\nfrom flask import request\nfrom flask import session\nfrom flask import abort\nfrom typing import Dict, Tuple, Union\nfrom os import urandom\nfrom flask_caching import Cache\nimport os.path\nimport lmdb\nimport struct\nimport hashlib\n\nSECRET_KEY = urandom(12)\nSESSION_TYPE = \"redis\"\napp = Flask(__name__)\napp.config.from_object(__name__)\ncache = Cache(config={\"CACHE_TYPE\": \"SimpleCache\"})\ncache.init_app(app)\n\ndef get_credentials() -> Union[None, Tuple[str, str]]:\n if os.path.exists(\"auth\"):\n with open(\"auth\") as fp:\n return tuple(fp.read().split()[:2])\n else:\n return None\n\n\ndef get_env():\n if getattr(g, \"lmdb_env\", None) is None:\n g.lmdb_env = lmdb.open(\"db\", max_dbs=2)\n return g.lmdb_env\n\n\ndef get_db(db_name):\n if getattr(g, f\"db_{db_name}\", None) is None:\n setattr(g, f\"db_{db_name}\", get_env().open_db(db_name.encode()))\n\n return getattr(g, f\"db_{db_name}\")\n\n\ndef get_login_method():\n if os.path.exists(\"auth\"):\n return \"auth\"\n else:\n return \"open\"\n\n@app.route(\"/dot.png\")\ndef dot():\n url: bytes = request.headers.get(\"referer\", \"dummy\").encode()\n useragent: bytes = request.headers.get(\"user-agent\", \"dummy\").encode()\n hit_db = get_db(\"hit\")\n useragent_db = get_db(\"ua\")\n with get_env().begin(write=True, db=hit_db) as txn:\n current_hit_packed: bytes = bytes(txn.get(url, default=b\"\\x00\"*8))\n current_hit_unpacked: int = struct.unpack(\"Q\", current_hit_packed)[0]\n current_hit_unpacked += 1\n current_hit_packed = struct.pack(\"Q\", current_hit_unpacked)\n if current_hit_packed == 1:\n txn.put(url, current_hit_packed)\n else:\n txn.replace(url, current_hit_packed)\n \n with get_env().begin(write=True, db=useragent_db) as txn:\n current_hit_packed: bytes = bytes(txn.get(useragent, default=b\"\\x00\"*8))\n current_hit_unpacked: int = struct.unpack(\"Q\", current_hit_packed)[0]\n current_hit_unpacked += 1\n current_hit_packed = struct.pack(\"Q\", current_hit_unpacked)\n if current_hit_unpacked == 1:\n txn.put(useragent, current_hit_packed)\n else:\n txn.replace(useragent, current_hit_packed)\n\n return send_file(\"dot.png\", mimetype=\"image/png\")\n\n\n@app.route(\"/stats\")\ndef stats():\n if session.get(\"who\", \"noone\") == \"noone\":\n abort(403)\n result: Dict[str, int] = dict()\n with get_env().begin(db=get_db(\"hit\")) as txn:\n for url, hit_packed in txn.cursor():\n result[url.decode()] = struct.unpack(\"Q\", hit_packed)[0]\n result[\"total\"] = sum(result.values())\n return result\n\n\n@app.route(\"/login\")\ndef login_method():\n return { \"method\": get_login_method() }\n\n\n@app.route(\"/login\", methods=(\"POST\", ))\ndef login():\n if not request.is_json:\n abort(404)\n credentials = get_credentials()\n if credentials is None:\n session[\"who\"] = \"admin\"\n print(session)\n return { \"logged_in\": \"success\"}\n else:\n username = request.json.get(\"username\")\n password = request.json.get(\"password\")\n \n if not username or not password:\n abort(404)\n \n if username != credentials[0]:\n return { \"logged_in\": \"fail\" }\n elif credentials[1] != hashlib.sha256(password.encode()).hexdigest():\n return { \"logged_in\": \"fail\" }\n else:\n session[\"who\"] = \"admin\"\n return { \"logged_in\": \"success\" }\n\n\n@app.route(\"/logout\")\ndef logout():\n session[\"who\"] = \"noone\"\n return {}\n\n@app.route(\"/useragent\")\n@app.route(\"/useragent/\", defaults=dict(text=\"\"))\n@app.route(\"/useragent/\")\ndef useragent(text: str = \"\"):\n if session.get(\"who\", \"noone\") == \"noone\":\n abort(403)\n result = dict(result=0, total=0)\n with get_env().begin(db=get_db(\"ua\")) as txn:\n for useragent, hit_packed in txn.cursor():\n hit_unpacked: int = struct.unpack(\"Q\", hit_packed)[0]\n if text in useragent.decode():\n result[\"result\"] += hit_unpacked\n result[\"total\"] += hit_unpacked\n\n return result\n\n\n@app.route(\"/whoami\")\ndef whoami():\n return { \"youre\": session.get(\"who\", \"noone\") }\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"farooqkz/tomato","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"26379969144","text":"# -*- coding: utf-8 -*-\n\n# Spanish language sounds configuration\n\nfrom tts_common import filename\n\n\nsystemSounds = []\nsounds = []\n\nfor i in range(101):\n systemSounds.append((str(i), filename(i)))\nsystemSounds.append((\"ciento\", filename(101)))\nsystemSounds.append((\"doscientos\", filename(102)))\nsystemSounds.append((\"trescientos\", filename(103)))\nsystemSounds.append((\"cuatrocientos\", filename(104)))\nsystemSounds.append((\"quinientos\", filename(105)))\nsystemSounds.append((\"sescientos\", filename(106)))\nsystemSounds.append((\"setecientos\", filename(107)))\nsystemSounds.append((\"ochocientos\", filename(108)))\nsystemSounds.append((\"nuevecientos\", filename(109)))\nsystemSounds.append((\"mil\", filename(110)))\nfor i, s in enumerate([\"coma\", \"un\", \"una\", \"i\", \"meno\", \"hora\", \"horas\", \"minuto\", \"minutos\", \"segundo\", \"segundos\"]):\n systemSounds.append((s, filename(111 + i)))\nfor i, (s, f) in enumerate([(\"Voltio\",\"volt0\"),\n (\"ampério\", \"amp0\"),\n (\"miliamperios\", \"mamp0\"),\n (\"knots\", \"knot0\"),\n (\"metros por segundo\", \"mps0\"),\n (\"pie por segundo\", \"fps0\"),\n (\"kilómetro por hora\", \"kph0\"),\n (\"millas por hora\", \"mph0\"),\n (\"metros\", \"meter0\"),\n (\"pie\", \"foot0\"),\n (\"grados\", \"celsius0\"),\n (\"fahrenheit\", \"fahr0\"),\n (\"por ciento\", \"percent0\"),\n (\"miliamperios por hora\", \"mamph0\"),\n (\"vatio\", \"watt0\"),\n (\"millivatio\", \"mwatt0\"),\n (\"D B\", \"db0\"),\n (\"R P M\", \"rpm0\"),\n (\"g\", \"g0\"),\n (\"grados\", \"degree0\"),\n (\"radianes \", \"rad0\"),\n (\"mililitro\", \"ml0\"),\n (\"onzas\", \"founce0\"),\n (\"hora\", \"hour0\"), (\"horas\", \"hour1\"),\n (\"milliliter per minute\", \"mlpm0\"), (\"milliliters per minute\", \"mlpm1\"),\n (\"minuto\", \"minute0\"), (\"minutos\", \"minute1\"),\n (\"segundo\", \"second0\"), (\"segundos\", \"second1\"),\n ]):\n systemSounds.append((s, filename(f)))\nfor s, f in [(\"me tienes abandonada\", \"inactiv\"),\n (\"batería del transmisor baja\", \"lowbatt\"),\n (\"El acelerador está activado, por favor, corrijalo\", \"thralert\"),\n (\"Los interruptores no están en la posición correcta, por favor corrijalo\", \"swalert\"),\n (\"eeprom corrompida\", \"eebad\"),\n (\"error\", \"error\"),\n (\"trim centrado\", \"midtrim\"),\n (\"trim al máximo\", \"maxtrim\"),\n (\"trim al mínimo\", \"mintrim\"),\n (\"consumo alto\", \"highmah\"),\n (\"temperatura alta\", \"hightemp\"),\n (\"Bienvenido a open t equis\", \"hello\"),\n (\"señal baja\", \"rssi_org\"),\n (\"señal crítica\", \"rssi_red\"),\n (\"Problemas con la antena del transmisor\", \"swr_red\"),\n (\"Sin telemetría\", \"telemko\"),\n (\"Telemetría disponible\", \"telemok\"),\n (\"sobrecarga de servo\", \"servoko\"),\n (\"sobrecarga de potencia\", \"rxko\"),\n (\"El receptor aún está encendido\", \"modelpwr\"),\n (\"cronómetro uno terminado\", \"timovr1\"),\n (\"cronómetro dos terminado\", \"timovr2\"),\n (\"cronómetro tres terminado\", \"timovr3\"),\n ]:\n systemSounds.append((s, filename(f)))\nfor i, s in enumerate([\"cronómetro\", \"cronómetro\", \"transmisión\", \"recepción\", \"A1\", \"A2\", \"altitud\", \"motor\",\n \"combustible\", \"temperatura\", \"temperatura\", \"velocidad\", \"distancia\", \"altitude\", \"célula lipo\",\n \"Total lipo\", \"voltaje\", \"corriente\", \"consumo\", \"potencia\", \"aceleración X\", \"aceleración Y\", \"aceleración Z\",\n \"dirección\", \"variómetro\", \"minimo\", \"máximo\"]):\n systemSounds.append((s, filename(141 + i)))\nfor i, (s, f) in enumerate([(\"tren arriba.\", \"gearup\"),\n (\"tren abajo.\", \"geardn\"),\n (\"flaps arriba\", \"flapup\"),\n (\"flaps abajo\", \"flapdn\"),\n (\"aterrizaje\", \"attero\"),\n (\"modo de entrenamiento activado\", \"trnon\"),\n (\"modo de entrenamiento deshabilitado\", \"trnoff\"),\n (\"motor apagado\", \"engoff\"),\n (\"muy alto\", \"tohigh\"),\n (\"muy bajo\", \"tolow\"),\n (\"bateria baja\", \"lowbat\"),\n (\"crou, activo\", \"crowon\"),\n (\"crou, desligado\", \"crowof\"),\n (\"modo de velocidad\", \"spdmod\"),\n (\"modo de térmica\", \"thmmod\"),\n (\"modo de vuelo normal\", \"nrmmod\"),\n (\"modo de vuelo 1\", \"fltmd1\"),\n (\"modo de vuelo 2\", \"fltmd2\"),\n (\"modo de vuelo 3\", \"fltmd3\"),\n (\"modo de vuelo 4\", \"fltmd4\"),\n (\"modo de vuelo 5\", \"fltmd5\"),\n (\"modo de vuelo 6\", \"fltmd6\"),\n (\"modo de vuelo 7\", \"fltmd7\"),\n (\"modo de vuelo 8\", \"fltmd8\"),\n (\"modo de vuelo 9\", \"fltmd9\"),\n ]):\n sounds.append((s, filename(f)))\n","repo_name":"opentx/opentx","sub_path":"radio/util/tts_es.py","file_name":"tts_es.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"es","doc_type":"code","stars":1978,"dataset":"github-code","pt":"97"} +{"seq_id":"34165348267","text":"PLACEHOLDER = \"[name]\"\n\nwith open(\".\\mail-merging\\invited_names.txt\") as names_file:\n # names = names_file.read()\n names = names_file.readlines()\n print(names)\n\nwith open(\".\\mail-meging\\starting_letter.txt\") as letter_file:\n letter_content = letter_file.read()\n for name in names:\n stripped_name = name.strip()\n new_letter = letter_content.replace(PLACEHOLDER, stripped_name)\n print(new_letter)\n with open(\".\\mail-merging\\ReadytoSend\\letter_for_{stripped_name}.txt\", mode=\"w\" ) as completed_letter:\n completed_letter = completed_letter.write(new_letter)\n print(completed_letter)\n","repo_name":"nabeela08/python-projects","sub_path":"mail-merging/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"19981247267","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport os \nfrom scipy.ndimage import gaussian_filter\n\n# The following 4 things should be passed FROM the second GUI\ntxt_directory = \"C:/Users/acer/Desktop/Test/exp2/labels/image1.txt\"\nimage_directory = \"C:/Users/acer/Desktop/sandbox-rgbi/yolov7/inference/images/image1.jpg\"\noutput_directory = \"C:/Users/acer/Desktop/Test\"\noutput_image_name = \"output_image2\"\n\n\n\ndef denormalize_box(x, y, w, h, image_width, image_height):\n x_new = float(x)\n y_new = float(y)\n w_new = float(w)\n h_new = float(h)\n # Denormalize the coordinates\n denorm_x = int(x_new * image_width)\n denorm_y = int(y_new * image_height)\n denorm_w = int(w_new * image_width)\n denorm_h = int(h_new * image_height)\n\n # Calculate the top-left corner coordinates\n x1 = int(denorm_x - (denorm_w / 2))\n y1 = int(denorm_y - (denorm_h / 2))\n\n # Calculate the bottom-right corner coordinates\n x2 = int(denorm_x + (denorm_w / 2))\n y2 = int(denorm_y + (denorm_h / 2))\n\n return x1, y1, x2, y2\n\ndef GT_Generate(txt_directory, image_directory, output_directory, output_image_name):\n txt_path = txt_directory\n with open(txt_path, \"r\") as f:\n annotations = f.readlines()\n\n img=cv2.imread(image_directory,0)\n img=img.astype(np.float32, copy=False)\n ht=img.shape[0]\n wd=img.shape[1]\n Coordinates=[]\n Main_Zeros=np.zeros((ht,wd),dtype=np.float32)\n\n # Create then canvas\n height, width = ht, wd\n image = np.ones((height, width, 3), dtype=np.uint8) * 255\n\n for ann in annotations:\n ann_parts = ann.strip().split(\" \")\n if len(ann_parts) != 5:\n print(f\"Invalid annotation format in {txt_path}: {ann}\")\n else:\n a, x, y, w, h = ann_parts\n x1, y1, x2, y2 = denormalize_box(x, y, w, h, wd, ht)\n cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)\n Coordinates.append([x1,y1,x2-x1,y2-y1,1/((x2-x1)*(y2-y1))])\n\n for bbox_left, bbox_top, bbox_w, bbox_h, bbox_weight in Coordinates:\n for i in range(int(bbox_left),int(bbox_left+bbox_w)):\n for j in range(int(bbox_top),int(bbox_top+bbox_h)):\n Main_Zeros[j, i] += bbox_weight\n den = gaussian_filter(Main_Zeros,sigma=6,truncate=6*6)\n\n normalized_array = (den - den.min()) * (255 / (den.max() - den.min()))\n\n plt.imshow(normalized_array, cmap='gray', vmin=0, vmax=255)\n plt.axis('off')\n # save the image in certain output path\n # A Ground Truth Image will be generated!!!!!!!\n plt.savefig(output_directory + \"/\" + output_image_name + \".jpg\", format = 'jpg', bbox_inches='tight',pad_inches=0)\n # plt.show()\n","repo_name":"deansmile/pedestrain_counting_SE","sub_path":"GT_Generate.py","file_name":"GT_Generate.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"44211344304","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tragopan', '0085_auto_20160128_1322'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fuelelementtypeposition',\n name='fuel_assembly_type',\n field=models.ForeignKey(to='tragopan.FuelAssemblyType', related_name='rod_positions'),\n ),\n ]\n","repo_name":"nustarnuclear/orient_linux","sub_path":"tragopan/migrations/0086_auto_20160128_1506.py","file_name":"0086_auto_20160128_1506.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"29580154540","text":"import ast\n\nfrom ANF_transformer.transformer import _new_name\nfrom ANF_transformer.transformer import _get_all_used_variable_names\n\n\ndef test_get_new_name_does_not_dupclicate_names():\n name = 'x'\n names_already_used = ['x', 'x0', 'x1']\n assert(_new_name(name, names_already_used) == 'x2')\n\ndef test_get_all_used_variable_names_gets_all_names_in_use():\n node = ast.parse(\n\"\"\"\nimport ast\ndef func():\n x = [1, 2, 3]\n len_x = len(x)\n for y in x:\n pass\n\"\"\")\n names_used = {'ast': None, 'func': None, 'x': None, 'len_x': None, 'y': None, 'len': None}\n assert(_get_all_used_variable_names(node) == names_used)\n","repo_name":"csci5535-s20/project-containerless-py","sub_path":"source code from other repo/ANF_transformer/tests/name_utils_test.py","file_name":"name_utils_test.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"16489226541","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\ndef sockMerchant(n, ar):\n checked = []\n sumOfPairs = 0\n totalCount = 0\n \n for x in ar:\n if n > totalCount and x not in checked:\n totalCount += ar.count(x)\n sumOfPairs += ar.count(x) // 2\n checked.append(x)\n return sumOfPairs\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().strip())\n\n ar = list(map(int, input().rstrip().split()))\n\n result = sockMerchant(n, ar)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"d3cod3monk78/hackerrank","sub_path":"salesByMatch.py","file_name":"salesByMatch.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"5479228617","text":"##cake will have one candle for each year of their total age. \n##They will only be able to blow out the tallest of the candles. \n## Count how many candles are tallest.\n\ndef birthdayCakeCandles(candles):\n count = 0\n max_candles = max(candles)\n for i in candles:\n if i == max_candles:\n count +=1\n return count\n\n## Lessons learnt: find max first from the list and declare it outside the loop and then use counter\n","repo_name":"sherryjasal/hackerrank_DS_algos","sub_path":"birthdayCakeCandles.py","file_name":"birthdayCakeCandles.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"40241157199","text":"import torch\r\nimport os\r\nimport os.path as osp\r\nimport init_paths\r\nimport numpy as np\r\nfrom opts import opts\r\nimport glob\r\nfrom torch_geometric.data import Data\r\nimport networkx as nx\r\n# from torch_geometric.utils import to_networkx\r\nimport matplotlib.pyplot as plt\r\ndevice = torch.device(\"cuda:4\")\r\nroot = '/home/coin/datasets/MOT2017/MOT17/images/train/MOT17-04-SDP/img1/'\r\ndef to_networkx(data, node_attrs=None, edge_attrs=None, to_undirected=False,\r\n remove_self_loops=False):\r\n if to_undirected:\r\n G = nx.Graph()\r\n else:\r\n G = nx.DiGraph()\r\n\r\n G.add_nodes_from(range(data.num_nodes))\r\n\r\n values = {}\r\n for key, item in data:\r\n if torch.is_tensor(item):\r\n values[key] = item.squeeze().tolist()\r\n else:\r\n values[key] = item\r\n if isinstance(values[key], (list, tuple)) and len(values[key]) == 1:\r\n values[key] = item[0]\r\n\r\n for i, (u, v) in enumerate(data.edge_index.t().tolist()):\r\n # print(i)\r\n if to_undirected and v > u:\r\n continue\r\n\r\n if remove_self_loops and u == v:\r\n continue\r\n\r\n G.add_edge(u, v,weight=values['edge_weight'][i])\r\n\r\n return G\r\ndef visualize(h, color, epoch=None, loss=None):\r\n plt.figure(figsize=(10,10))\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n if torch.is_tensor(h):\r\n h = h.detach().cpu().numpy()\r\n plt.scatter(h[:, 0], h[:, 1], s=140, c=color, cmap=\"Set2\")\r\n if epoch is not None and loss is not None:\r\n plt.xlabel(f'Epoch: {epoch}, Loss: {loss.item():.4f}', fontsize=16)\r\n else:\r\n labels = nx.get_edge_attributes(G, 'weight')\r\n\r\n # pos = nx.get_node_attributes(G, 'x')\r\n # pos = nx.spring_layout(G, seed=42)\r\n nx.draw_networkx(G, pos=nx.spring_layout(G,seed=2), with_labels=True,\r\n node_color=color, cmap=\"Set2\")\r\n nx.draw_networkx_edge_labels(G, pos=nx.spring_layout(G,seed=2), edge_labels=labels)\r\n\r\n # nx.draw_networkx(G, pos=nx.spring_layout(G, seed=42), with_labels=True,\r\n # node_color=color, cmap=\"Set2\")\r\n\r\n plt.show()\r\n plt.savefig(\"/home/kevinwm99/MOT/graph879.png\")\r\n\r\ndef cosine_distance(a, b, data_is_normalized=False):\r\n if not data_is_normalized:\r\n a = np.asarray(a) / np.linalg.norm(a)\r\n b = np.asarray(b) / np.linalg.norm(b)\r\n return 1. - np.dot(a, b.T)\r\nif __name__ == '__main__':\r\n embed_list = (sorted(glob.glob(osp.join(root,'*.pt'))))\r\n print(\"initialize frame, edge index, node feature, edge weight\")\r\n frame,edge_idx_total,node_feature_total,edge_weight_total = {},{},{},{}\r\n embed_total =[]\r\n for emb in (embed_list):\r\n embed_total.append(torch.load(emb))\r\n\r\n for emb in (embed_total):\r\n for i, feat in emb.items():\r\n frame[int(i)] = []\r\n edge_idx_total[int(i)] = []\r\n node_feature_total[int(i)] = []\r\n edge_weight_total[int(i)] = []\r\n\r\n for n,emb in enumerate(embed_list):\r\n frame_num = int(emb[::-1][emb[::-1].find('.')+1:emb[::-1].find('/')][::-1]) # get frame id\r\n embed = embed_total[n]\r\n for i, feat in embed.items():\r\n frame[int(i)].append(frame_num)\r\n node_feature_total[int(i)].append(feat.numpy())\r\n\r\n for object, frame_id in frame.items():\r\n frame_id = torch.tensor(frame_id)\r\n step = torch.abs(frame_id.reshape(-1,1)-frame_id.reshape(1,-1))\r\n step = step==1\r\n r,c = torch.where(step)\r\n edge_idx_total[object].append((torch.stack((r,c))))\r\n # edge_weight_total[object]=[]\r\n for i,j in zip(r,c):\r\n weight=cosine_distance(node_feature_total[object][i],node_feature_total[object][j])\r\n # if weight not in edge_weight_total[object]:\r\n edge_weight_total[object].append(weight)\r\n # construct one graph for id 909\r\n ID = 902\r\n edge_ix = torch.from_numpy(np.asarray(edge_idx_total[ID][0])).long()\r\n edge_weight = np.round(torch.from_numpy(np.asarray(edge_weight_total[ID])).numpy(),4)\r\n x = torch.from_numpy(np.asarray(node_feature_total[ID]))\r\n y = torch.from_numpy(np.asarray([ID]*len(x)))\r\n # edge_weight_ = [x for pair in zip(edge_weight,edge_weight) for x in pair]\r\n print(sorted(frame))\r\n print(len(edge_weight))\r\n print(len(x))\r\n data = Data(x=x,edge_index = edge_ix, edge_weight=edge_weight,y=y)\r\n # print(f'Number of nodes: {data.num_nodes}')\r\n # print(f'Number of edges: {data.num_edges}')\r\n # print(f'Number of edge features: {data.num_edge_features}')\r\n\r\n G = to_networkx(data, to_undirected=True,edge_attrs=['edge_weight'])\r\n visualize(G, color=data.y)","repo_name":"phuvm72/graph_mot","sub_path":"base/get_edge.py","file_name":"get_edge.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"37088240443","text":"# from importlib import import_module\r\n\r\n# def _get(data, index=0, e=None):\r\n# \ttry:return data[index] if type(data) in (list, dict, set, tuple) else data\r\n# \texcept:return e\r\n\r\n# def require(module, package=None, args=[], kwargs={}, var=False):\r\n# \t\"\"\"Import a module for only a particular function\r\n# The 'module' argument vspecifies the module to import\r\n\r\n# The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import.\r\n\r\n# The function recieving the decorator must also recieve the module(s) as parameter(s)\r\n\r\n# \t@require([\"numpy\", \"pandas\"])\r\n# \tdef main(np, pd):\r\n# \t\t...\r\n\r\n# \t@require(\"numpy\")\r\n# \tdef main(np):\r\n# \t\t...\r\n\r\n# You could also assign the modules to variables\r\n# \tnp = require(\"numpy\")\r\n\r\n# \tnp, pd, plt = require([\"numpy\", \"pandas\", \"matplotlib.plot\"])\r\n\r\n# \"\"\"\r\n# \tif type(module) is str and type(package) in (str, None):\r\n# \t\tmodules = import_module(module, package)\r\n# \telif type(module) is list:\r\n# \t\tmodules = []\r\n# \t\tfor x in module:\r\n# \t\t\tpackag = _get(package, x)\r\n# \t\t\tmodules.append([import_module(x, packag)])\r\n# \telse:modules=[]\r\n\t\r\n# \tif var == False:\r\n# \t\tdef wrap(func):\r\n# \t\t\tfunc(*modules, *args, **kwargs)\r\n# \t\treturn wrap\r\n# \telse:\r\n# \t\treturn modules\r\n\r\nimport sys\r\n\r\ndef _get(data, index=0, e=None):\r\n try:return data[index] if type(data) in (list, dict, set, tuple) else data\r\n except:return e\r\n\r\ndef load(target, **namespace):\r\n\t# Source bottle.py:load\r\n\tmodule, target = target.split(\":\", 1) if ':' in target else (target, None)\r\n\tif module not in sys.modules: __import__(module)\r\n\tif not target: return sys.modules[module]\r\n\tif target.isalnum(): return getattr(sys.modules[module], target)\r\n\tpackage_name = module.split('.')[0]\r\n\tnamespace[package_name] = sys.modules[package_name]\r\n\treturn eval('%s.%s' % (module, target), namespace)\r\n\r\nclass require:\r\n def __init__(self, module):\r\n \"\"\"Import a module for only a particular function\r\nThe 'module' argument vspecifies the module to import\r\n\r\nThe 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import.\r\n\r\nThe function recieving the decorator must also recieve the module(s) as parameter(s)\r\n\r\nOther function related arguments are passed when calling the function\r\n```python\r\n @require([\"numpy\", \"pandas\"])\r\n def main(np, pd, x):\r\n ...\r\n print(x)\r\n \r\n main(5)\r\n # prints 5\r\n\r\n @require(\"numpy\")\r\n def main(np):\r\n ...\r\n```\r\nYou can also call specific functions from modules\r\n```python\r\n\t@require(\"numpy:array\")\r\n\tdef plt(func, array):\r\n\t\tarr = func(array)\r\n\t\treturn arr\r\n\tplt([2,4,5,6,8])\r\n\r\n # Is same as:\r\n\r\n\tdef plt(array):\r\n\t\tfrom numpy import array as func\r\n\t\tarr = func(array)\r\n\t\treturn arr\r\n\tplt([2,4,5,6,8])\r\n```\r\nYou could also assign the modules to variables.. just put an extra parentheses to simulate function call\r\n```python\r\n np = require(\"numpy\")()\r\n\r\n np, pd, plt = require([\"numpy\", \"pandas\", \"matplotlib.plot\"])()\r\n```\r\n\"\"\"\r\n self.module = module\r\n \r\n if type(self.module) is str:\r\n self.modules = load(self.module)\r\n elif type(self.module) is list:\r\n self.modules = []\r\n for x in self.module:\r\n self.modules.append(load(x))\r\n else:self.modules=[]\r\n \r\n def __call__(self, func=None):\r\n if not func:\r\n return self.modules\r\n if type(self.modules) is not list:\r\n self.modules = [self.modules]\r\n return wrap(func, self.modules)\r\n \r\nclass wrap:\r\n def __init__(self, f, m):\r\n self.f = f\r\n self.m = m\r\n \r\n def __call__(self, *args, **kwargs):\r\n return self.f(*self.m, *args, **kwargs)\r\n","repo_name":"7HR4IZ3/requirePy","sub_path":"require.py","file_name":"require.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"41483452961","text":"from django import forms\nfrom django.forms import ModelForm\n\nfrom .models import Clients\n\nclass ClientForm(forms.ModelForm):\n name = forms.CharField(label='', widget=forms.TextInput(attrs={\"placeholder\": \"Name of the client\"}))\n short_code = forms.CharField(label='Code from the ads campaigns', widget=forms.TextInput(attrs={\"placeholder\": \"CLI\"}))\n\n class Meta:\n model = Clients\n fields = [\n 'name',\n 'short_code',\n 'is_active',\n ]","repo_name":"ilkka-puumala/proto_app","sub_path":"clients/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"14556902662","text":"# -*- coding: utf-8 -*-\nfrom Qt.QtCore import QByteArray, Qt, Slot\nfrom Qt.QtGui import QGuiApplication\nfrom Qt.QtNetwork import QAbstractSocket, QHostAddress, QTcpServer, QTcpSocket\nfrom Qt.QtWidgets import (\n QApplication,\n QDialog,\n QDialogButtonBox,\n QLabel,\n QMessageBox,\n QProgressBar,\n QPushButton,\n QVBoxLayout,\n QWidget,\n)\n\nTOTAL_BYTES = 50 * 1024 * 1024\nPAYLOAD_SIZE = 64 * 1024 # 64 KB\n\n\nclass Dialog(QDialog):\n def __init__(self, parent: QWidget = None) -> None:\n super().__init__(parent)\n\n self.tcpServer = QTcpServer()\n self.tcpClient = QTcpSocket()\n self.tcpServerConnection: QTcpSocket = None\n\n self.bytesToWrite = 0\n self.bytesWritten = 0\n self.bytesReceived = 0\n\n self.clientProgressBar = QProgressBar()\n self.clientStatusLabel = QLabel(self.tr(\"Client ready\"))\n self.serverProgressBar = QProgressBar()\n self.serverStatusLabel = QLabel(self.tr(\"Server ready\"))\n\n self.startButton = QPushButton(self.tr(\"&Start\"))\n self.quitButton = QPushButton(self.tr(\"&Quit\"))\n\n self.buttonBox = QDialogButtonBox()\n self.buttonBox.addButton(self.startButton, QDialogButtonBox.ActionRole)\n self.buttonBox.addButton(self.quitButton, QDialogButtonBox.RejectRole)\n\n self.startButton.clicked.connect(self.start)\n self.quitButton.clicked.connect(self.close)\n self.tcpServer.newConnection.connect(self.acceptConnection)\n self.tcpClient.connected.connect(self.startTransfer)\n self.tcpClient.bytesWritten.connect(self.updateClientProgress)\n self.tcpClient.error.connect(self.displayError)\n\n mainLayout = QVBoxLayout(self)\n mainLayout.addWidget(self.clientProgressBar)\n mainLayout.addWidget(self.clientStatusLabel)\n mainLayout.addWidget(self.serverProgressBar)\n mainLayout.addWidget(self.serverStatusLabel)\n mainLayout.addStretch(1)\n mainLayout.addSpacing(10)\n mainLayout.addWidget(self.buttonBox)\n\n @Slot()\n def start(self):\n self.startButton.setEnabled(False)\n\n QGuiApplication.setOverrideCursor(Qt.WaitCursor)\n\n self.bytesWritten = 0\n self.bytesReceived = 0\n\n while not self.tcpServer.isListening() and not self.tcpServer.listen():\n ret = QMessageBox.critical(\n self,\n self.tr(\"Loopback\"),\n self.tr(\n \"Unable to start the test: %s\" % (self.tcpServer.errorString())\n ),\n QMessageBox.Retry | QMessageBox.Cancel,\n )\n if ret == QMessageBox.Cancel:\n return\n\n self.serverStatusLabel.setText(self.tr(\"Listening\"))\n self.clientStatusLabel.setText(self.tr(\"Connecting\"))\n self.tcpClient.connectToHost(\n QHostAddress.LocalHost, self.tcpServer.serverPort()\n )\n\n @Slot()\n def acceptConnection(self):\n self.tcpServerConnection = self.tcpServer.nextPendingConnection()\n if not self.tcpServerConnection:\n self.serverStatusLabel.setText(\n self.tr(\"Error: got invalid pending connection!\")\n )\n return\n\n self.tcpServerConnection.readyRead.connect(self.updateServerProgress)\n self.tcpServerConnection.error.connect(self.displayError)\n self.tcpServerConnection.disconnected.connect(\n self.tcpServerConnection.deleteLater\n )\n\n self.serverStatusLabel.setText(self.tr(\"Accepted connection\"))\n self.tcpServer.close()\n\n @Slot()\n def startTransfer(self):\n # called when the TCP client connected to the loopback server\n self.bytesToWrite = TOTAL_BYTES - int(\n self.tcpClient.write(QByteArray(PAYLOAD_SIZE, \"@\"))\n )\n self.clientStatusLabel.setText(self.tr(\"Connected\"))\n\n @Slot()\n def updateServerProgress(self):\n self.bytesReceived += int(self.tcpServerConnection.bytesAvailable())\n self.tcpServerConnection.readAll()\n\n self.serverProgressBar.setMaximum(TOTAL_BYTES)\n self.serverProgressBar.setValue(self.bytesReceived)\n self.serverStatusLabel.setText(\n self.tr(\"Received %dMB\" % (self.bytesReceived / (1024 * 1024),))\n )\n\n if self.bytesReceived == TOTAL_BYTES:\n self.tcpServerConnection.close()\n self.startButton.setEnabled(True)\n\n QGuiApplication.restoreOverrideCursor()\n\n @Slot(\"qint64\")\n def updateClientProgress(self, numBytes):\n self.bytesWritten += int(numBytes)\n\n if self.bytesToWrite > 0 and self.tcpClient.bytesToWrite() <= 4 * PAYLOAD_SIZE:\n self.bytesToWrite -= self.tcpClient.write(\n QByteArray(min(self.bytesToWrite, PAYLOAD_SIZE), \"@\")\n )\n\n self.clientProgressBar.setMaximum(TOTAL_BYTES)\n self.clientProgressBar.setValue(self.bytesWritten)\n self.clientStatusLabel.setText(\n self.tr(\"Sent %dMB\" % (self.bytesWritten / (1024 * 1024),))\n )\n\n @Slot(QAbstractSocket.SocketError)\n def displayError(self, socketError):\n if socketError == QTcpSocket.RemoteHostClosedError:\n return\n\n QMessageBox.information(\n self,\n self.tr(\"Network error\"),\n self.tr(\n \"The following error occurred: {}.\".format(self.tcpClient.errorString())\n ),\n )\n self.tcpClient.close()\n self.tcpServer.close()\n self.clientProgressBar.reset()\n self.serverProgressBar.reset()\n self.clientStatusLabel.setText(self.tr(\"Client ready\"))\n self.serverStatusLabel.setText(self.tr(\"Server ready\"))\n self.startButton.setEnabled(True)\n QGuiApplication.restoreOverrideCursor()\n\n\ndef main() -> None:\n import sys\n\n app = QApplication(sys.argv)\n\n dialog = Dialog()\n dialog.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eyllanesc/QtExamples","sub_path":"official/network/loopback/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"97"} +{"seq_id":"9369116565","text":"import time\n\nimport numpy as np\nfrom scipy.sparse import isspmatrix\nfrom scipy.special import expit as sigmoid\n\nfrom base_logistic import BaseLogistic\nfrom constants import INIT_WEIGHT_STD, LOSS_PER_EPOCH\nfrom memory import GradientMemory\nfrom parameters import Parameters\n\n\nclass LogisticSGD(BaseLogistic):\n \"\"\"\n 2 classes logistic regression on dense dataset.\n X: (num_samples, num_features)\n y: (num_features, ) 0, 1 labels\n \"\"\"\n\n def __init__(self, params: Parameters):\n super().__init__(params)\n self.w = None\n self.w_estimate = None\n\n def fit(self, X, y):\n num_samples, num_features = X.shape\n p = self.params\n\n losses = np.zeros(p.num_epoch + 1)\n\n if self.w is None:\n self.w = np.random.normal(0, INIT_WEIGHT_STD, size=(num_features,))\n self.w_estimate = np.copy(self.w)\n\n memory = GradientMemory(take_k=p.take_k, take_top=p.take_top, with_memory=p.with_memory, qsgd_s=p.qsgd_s)\n\n shuffled_indices = np.arange(num_samples)\n\n # epoch 0 loss evaluation\n losses[0] = self.loss(X, y)\n\n train_start = time.time()\n\n compute_loss_every = int(X.shape[0] / LOSS_PER_EPOCH)\n all_losses = np.zeros(LOSS_PER_EPOCH * p.num_epoch + 1)\n\n started = time.time()\n\n for epoch in np.arange(p.num_epoch):\n np.random.shuffle(shuffled_indices)\n\n for iteration in range(num_samples):\n t = epoch * num_samples + iteration\n if t % compute_loss_every == 0:\n loss = self.loss(X, y)\n print('{} t {} epoch {} iter {} loss {} elapsed {}s'.format(p, t, epoch, iteration, loss, time.time() - started))\n all_losses[t // compute_loss_every] = loss\n\n sample_idx = shuffled_indices[iteration]\n\n lr = self.lr(epoch, iteration, num_samples, num_features)\n\n x = X[sample_idx]\n\n minus_grad = y[sample_idx] * x * sigmoid(-y[sample_idx] * x.dot(self.w).squeeze())\n if isspmatrix(x):\n minus_grad = minus_grad.toarray().squeeze(0)\n if p.regularizer:\n minus_grad -= p.regularizer * self.w\n\n lr_minus_grad = memory(lr * minus_grad)\n self.w += lr_minus_grad\n\n self.update_estimate(t)\n\n losses[epoch + 1] = self.loss(X, y)\n print(\"epoch {}: loss {} score {}\".format(epoch, losses[epoch + 1], self.score(X, y)))\n\n print(\"Training took: {}s\".format(time.time() - train_start))\n\n return losses, all_losses\n","repo_name":"epfml/sparsifiedSGD","sub_path":"logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"97"} +{"seq_id":"6128018961","text":"import gc\nimport os.path\nimport shutil\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import transforms\n\nfrom net.Res_net import Res_net\nfrom net.Fc_net import Fc_net\nfrom net.Conv_net import Conv_net\n\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.benchmark = True\n\n\nclass CatDogDataset(Dataset):\n def __init__(self, root_dir, transform=None):\n self.root_dir = root_dir\n self.transform = transform\n self.image_paths = self.get_image_paths()\n\n def get_image_paths(self):\n image_paths = []\n for subdir in os.listdir(self.root_dir):\n subdir_path = os.path.join(self.root_dir, subdir)\n if os.path.isdir(subdir_path):\n for filename in os.listdir(subdir_path):\n if filename.endswith('.jpg'):\n image_path = os.path.join(subdir_path, filename)\n image_paths.append(image_path)\n return image_paths\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n image_path = self.image_paths[index]\n image = Image.open(image_path)\n\n if self.transform:\n image = self.transform(image)\n\n label = 0 if 'cat' in image_path else 1 # 根据文件路径确定标签,猫为0,狗为1\n one_hot = F.one_hot(torch.tensor(label), num_classes=2).float() # 进行独热编码\n\n return np.float32(image), np.float32(one_hot)\n\n\n# 自定义训练器\nclass Trainer:\n def __init__(self):\n # 数据集路径\n data_dir = r'E:/catdog/train'\n\n transform = transforms.Compose([transforms.ToTensor()])\n # 创建自定义数据集实例\n dataset = CatDogDataset(data_dir, transform=transform)\n # 创建数据加载器\n self.train_loader = DataLoader(dataset, batch_size=2, shuffle=True, pin_memory=True)\n\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n self.net = Conv_net()\n self.net.to(self.device)\n\n self.optim = torch.optim.Adam(self.net.parameters(), lr=0.001)\n\n def train(self):\n train_epoch_size = 500\n for epoch in range(train_epoch_size):\n sum_loss = 0\n for i, (img, label) in enumerate(self.train_loader):\n self.net.train()\n img, label = img.to(self.device), label.to(self.device)\n h = self.net(img)\n\n loss = torch.mean((h - label) ** 2)\n self.optim.zero_grad()\n\n self.optim.step()\n\n sum_loss += loss\n\n avg_loss = sum_loss / len(self.train_loader)\n print(f'第{epoch}轮次的损失是{avg_loss.item()}')\n gc.collect()\n torch.cuda.empty_cache()\n\n def test(self):\n pass\n\n\nif __name__ == '__main__':\n # deal_data()\n trainer = Trainer()\n trainer.train()\n","repo_name":"Mazmots/pytorchProject","sub_path":"module1_fcnet/cat_dog.py","file_name":"cat_dog.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"14425627973","text":"from __future__ import print_function, division\nimport sys, math\n\n# Modified cubic formula solution.\n# Rounds for floating point error.\ndef ashish(candies):\n p = -0.5\n q = 1.5*candies\n r = 1/6\n\n g = math.pow((q**2) + math.pow(r-(p**2), 3), 1/2)\n x = math.pow(q + g, 1/3) + math.pow(q - g, 1/3) + p\n return int(round(x, 8))\n\n# a=1/3, b=1/2, c=1/6 d=-candies\ndef cubic_forumla(a, b, c, d):\n p = -b/(3*a)\n q = (p**3) + (b*c - 3*a*d)/(6*(a**2))\n r = c/(3*a)\n\n g = math.pow((q**2) + math.pow(r-(p**2), 3), 1/2)\n x = math.pow(q + g, 1/3) + math.pow(q - g, 1/3) + p\n return int(round(x, 12))\n\ndef main():\n T = int(input())\n lines = [int(x.strip()) for x in sys.stdin.readlines()[:T]]\n\n for line in lines:\n print(ashish(line))\n\nif __name__ == '__main__':\n main()\n","repo_name":"shanesatterfield/hacker-rank","sub_path":"Python/Mathematics/Number_Theory/Little_Ashish/python_little_ashish/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"97"} +{"seq_id":"8941915020","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom datetime import date\nfrom django.contrib import auth\nfrom django.urls import reverse\n\nfrom texas.forms import *\nfrom texas.models import *\nfrom haikunator import Haikunator\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n@login_required(login_url='login')\ndef new_game(request):\n context = {}\n context['searchForm'] = SearchUser()\n user = request.user\n # edit here\n\n if request.method == 'GET':\n return render(request, 'new_game.html', context)\n else:\n # new a game\n # validate input ???\n entry_funds = request.POST['entry_funds']\n no_players = request.POST['no_players']\n haikunator = Haikunator()\n game_no = haikunator.haikunate()\n new_game = Game(creator=request.user, player_num=no_players, entry_funds=entry_funds, game_no=game_no)\n new_game.save()\n # new socket here?\n return render(request, 'game_init_success.html',\n {\"game_no\": game_no, \"entry_funds\": entry_funds, \"players\": no_players,\n \"email_form\": EmailPassword(), \"searchForm\": SearchUser()})\n\n\n@login_required(login_url='login')\ndef game_join(request):\n context = {}\n context['searchForm'] = SearchUser()\n if request.method == 'GET':\n context['join_room_form'] = JoinRoomForm()\n return render(request, 'game_join.html', context)\n\n form = JoinRoomForm(request.POST, username=request.user.username)\n context['join_room_form'] = form\n if not form.is_valid():\n return render(request, 'game_join.html', context)\n game_no = form.cleaned_data['room_number']\n return redirect(\"/game_ongoing/\" + game_no)\n\n\n@login_required(login_url='login')\ndef game_ongoing(request, game_no):\n context = {}\n context['searchForm'] = SearchUser()\n # Update the user's balance for entry funds\n try:\n game = Game.objects.get(game_no=game_no)\n except:\n log.debug('ws room does not exist label=%s', game_no)\n return HttpResponse('ws room does not exist label=' + game_no)\n if request.user.userinfo.balance < game.entry_funds:\n # TODO [Handle] no notification while fund efficient,need to change html or form\n log.debug('user %s fund insufficient', request.user.id)\n # Make the user not able to enter\n return render(request, 'new_game.html', context)\n\n context['game_no'] = game_no\n context['login_user'] = request.user\n players = game.players.all()\n context['players'] = players\n return render(request, 'game_ongoing.html', context)\n\n\n@login_required(login_url='login')\ndef exit_room(request, game_no, id):\n player = get_object_or_404(User, id=id)\n game = get_object_or_404(Game, game_no=game_no)\n game.players.remove(player)\n\n\n@login_required(login_url='login')\ndef dashboard(request):\n context = {}\n context['searchForm'] = SearchUser()\n user = request.user\n # edit here\n return render(request, 'dashboard.html', context)\n\n\n@login_required(login_url='login')\ndef myfriends(request):\n # show all friends list\n context = {}\n context['searchForm'] = SearchUser()\n userInfo = UserInfo.objects.get(user=request.user)\n context['friends'] = userInfo.friends.all()\n return render(request, 'myfriends.html', context)\n\n\n@login_required(login_url='login')\ndef search_friend(request):\n context = {}\n if request.method == 'GET':\n context['searchForm'] = SearchUser()\n return render(request, 'search_friend.html', context)\n\n form = SearchUser(request.POST)\n context['searchForm'] = form\n if not form.is_valid():\n return render(request, \"search_friend.html\", context)\n keyword = form.cleaned_data['keyword']\n keyword = keyword.strip()\n if keyword == '':\n users = User.objects.all()\n context['users'] = users\n return render(request, 'search_friend.html', context)\n users = User.objects.filter(username__icontains=keyword)\n context['users'] = users\n # edit here\n return render(request, 'search_friend.html', context)\n\n\n@login_required(login_url='login')\ndef game_result(request):\n context = {}\n context['searchForm'] = SearchUser()\n user = request.user\n # edit here\n return render(request, 'game_result.html', context)\n","repo_name":"GraceChan03/Texas-holdem-poker","sub_path":"texas/views_game.py","file_name":"views_game.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"42561389800","text":"import random\nfrom itertools import product\nfrom datasets.data_loader import ABSADataset\n\n\ndef get_composed_data(positives, negatives, random_threshold=0.2):\n random.seed(10)\n all_punctuation = '!\"#$%&+,-.;=@~。,'\n composed_records = []\n for item in product(positives, negatives):\n if random.random() > random_threshold:\n continue\n if item[0][\"aspect\"] == item[1][\"aspect\"]:\n if random.random() < 0.5:\n text = f\"{item[0]['text']} {random.choice(all_punctuation)} {item[1]['text']}\"\n else:\n text = f\"{item[1]['text']} {random.choice(all_punctuation)} {item[0]['text']}\"\n composed_records.append(\n {\"text\": text, \n \"aspect\": item[1][\"aspect\"], \n \"has_positive\": (item[0][\"polarity\"] == \"positive\") or (item[1][\"polarity\"] == \"positive\"),\n \"has_negative\": (item[0][\"polarity\"] == \"negative\") or (item[1][\"polarity\"] == \"negative\")\n })\n else:\n if random.random() < 0.5:\n text = f\"{item[0]['text']} {random.choice(all_punctuation)} {item[1]['text']}\"\n else:\n text = f\"{item[1]['text']} {random.choice(all_punctuation)} {item[0]['text']}\"\n composed_records.append(\n {\"text\": text, \n \"aspect\": item[0][\"aspect\"], \n \"has_positive\": item[0][\"polarity\"] == \"positive\",\n \"has_negative\": item[0][\"polarity\"] == \"negative\"})\n composed_records.append(\n {\"text\": text, \n \"aspect\": item[1][\"aspect\"], \n \"has_positive\": item[1][\"polarity\"] == \"positive\",\n \"has_negative\": item[1][\"polarity\"] == \"negative\"})\n return composed_records\n\n\nclass ComposedABSADataset:\n\n def __init__(self, dataset):\n self.raw_data = ABSADataset(dataset)\n self.train_data = self.initialize(self.raw_data.train_data)\n self.test_data = self.initialize(self.raw_data.test_data)\n \n def get_labels(self):\n return [False, True]\n\n def initialize(self, records):\n text_label = {}\n for item in records:\n aspect = item['aspect']\n text = f\"{item['text_left']} {item['aspect']} {item['text_right']}\"\n polarity = item['polarity']\n if text not in text_label:\n text_label[text] = {}\n text_label[text][aspect] = polarity\n\n records_multi_aspect = []\n records_single_aspect = []\n for text, value in text_label.items():\n if len(value) > 1:\n for aspect, polarity in value.items():\n records_multi_aspect.append(\n {\"text\": text, \"aspect\": aspect, \"polarity\": polarity})\n else:\n for aspect, polarity in value.items():\n records_single_aspect.append(\n {\"text\": text, \"aspect\": aspect, \"polarity\": polarity})\n\n pos_examples = [example for example in records_single_aspect if example[\"polarity\"] == \"positive\" and len(example[\"text\"]) < 60]\n neg_examples = [example for example in records_single_aspect if example[\"polarity\"] == \"negative\" and len(example[\"text\"]) < 60]\n\n new_records = []\n for dataset in [pos_examples, neg_examples]:\n for record in dataset:\n new_records.append(\n {\"text\": record[\"text\"], \n \"aspect\": record[\"aspect\"], \n \"has_positive\": record[\"polarity\"] == \"positive\",\n \"has_negative\": record[\"polarity\"] == \"negative\"})\n new_records.extend(\n get_composed_data(pos_examples, neg_examples, random_threshold=0.003))\n \n return new_records\n\n\nif __name__ == \"__main__\":\n ds = ComposedABSADataset(\"all\")\n print(f\"train_data size: {len(ds.train_data)}. test_data size: {len(ds.test_data)}\")\n","repo_name":"TVect/ChinSA","sub_path":"models/bert_spc_multilabel/composed_dataset.py","file_name":"composed_dataset.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"10767706790","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nfrom os.path import exists\nimport subprocess\nimport sys\n\n\ndef get_env(**kwargs):\n xonai_home = os.environ['XONAI_HOME']\n # Find benchmark data generators.\n tpcds_dsdgen_dir = os.path.join(xonai_home, \"tpcds-kit\", \"tools\")\n tpch_dbgen_dir = os.path.join(xonai_home, \"tpch-dbgen\")\n if not os.path.isdir(tpcds_dsdgen_dir):\n print(\"error: {} is not a directory\".format(tpcds_dsdgen_dir))\n sys.exit(1)\n if not os.path.isdir(tpch_dbgen_dir):\n print(\"error: {} is not a directory\".format(tpch_dbgen_dir))\n sys.exit(1)\n\n env = os.environ.copy()\n env[\"TPCDS_DSDGEN_DIR\"] = tpcds_dsdgen_dir\n env[\"TPCH_DBGEN_DIR\"] = tpch_dbgen_dir\n for key, val in kwargs.items():\n if val != '':\n env[key] = val\n return env\n\n\ndef get_cmd(script, s3_location):\n xonai_home = os.environ['XONAI_HOME']\n spark_home = os.environ['SPARK_HOME']\n spark_shell = os.path.join(spark_home, \"bin\", \"spark-shell\")\n script = os.path.join(xonai_home, \"xonai-benchmarks\", \"scripts\", script)\n spark_sql_perf_jar = os.path.join(xonai_home, \"spark-sql-perf\", \"target\", \"scala-2.12\",\n \"spark-sql-perf_2.12-0.5.1-SNAPSHOT.jar\")\n conf_file = os.path.join(spark_home, \"conf\", \"spark-defaults.conf\")\n command = [spark_shell, \"-i\", script, \"--jars\", spark_sql_perf_jar, \"--master\"]\n if s3_location:\n command.append(\"local[*]\")\n command.append(\"--driver-memory\")\n command.append(\"6g\")\n command.append(\"--packages\")\n command.append(\"org.apache.hadoop:hadoop-aws:3.2.0\")\n command.append(\"--conf\")\n command.append(\"spark.local.dir=/data/spark-temp\") # external volume mount point\n else:\n command.append(\"local\")\n if exists(conf_file):\n command.append(\"--properties-file\")\n command.append(conf_file)\n print('\\n\\nCreated shell command: ' + str(command) + '\\n\\n')\n return command\n\n\ndef gen_tpc(suite, scale_factor, root):\n echo_ps = subprocess.run((\"echo\", \":exit\"))\n cmd = get_cmd(\"GenTPC.scala\", root.startswith('s3a://'))\n env = get_env(TPC_SUITE=suite, TPC_SF=str(scale_factor), ROOT_DIR=root)\n cmd_ps = subprocess.run(cmd, env=env, stdin=echo_ps.stdout)\n\n\ndef run_tpc(suite, num_iter, queries, root, output):\n echo_ps = subprocess.run((\"echo\", \":exit\"))\n cmd = get_cmd(\"RunTPC.scala\", root.startswith('s3a://'))\n env = get_env(TPC_SUITE=suite, TPC_ITER=str(num_iter), TPC_QUERIES=queries, ROOT_DIR=root, OUTPUT_DIR=output)\n cmd_ps = subprocess.run(cmd, env=env, stdin=echo_ps.stdout)\n\n\ndef main():\n if 'XONAI_HOME' not in os.environ:\n print(\"error: XONAI_HOME not in env (did you forget to activate?)\")\n sys.exit(1)\n if 'SPARK_HOME' not in os.environ:\n print(\"error: Set SPARK_HOME to the location of your local Spark distribution\")\n sys.exit(1)\n\n # Top-level argument parser.\n parser = argparse.ArgumentParser(\n description=\"Spark benchmarking with Xonai integration\")\n parser.add_argument(\n \"suite\", choices=[\"tpcds\", \"tpch\"], help=\"benchmark suite name\")\n\n action_parsers = parser.add_subparsers(dest=\"action\", help=\"benchmark actions\", required=True)\n\n # Argument parser for data generation.\n gen_parser = action_parsers.add_parser(\"gen\", help=\"generate dataset for a benchmark suite\")\n gen_parser.add_argument(\"-s\", \"--size\", type=int, default=1, choices=range(1, 100000), help=\"data set size in GB\")\n gen_parser.add_argument(\"-r\", \"--root\", type=str, default='', help=\"root directory for tpc tables\")\n\n # Argument parser for benchmark execution.\n run_parser = action_parsers.add_parser('run', help=\"run a benchmark suite\")\n run_parser.add_argument(\"-i\", \"--iter\", type=int, default=1, choices=range(1, 1000),\n help=\"number of times to run each query\")\n run_parser.add_argument(\"-q\", \"--queries\", default=\"\", help=\"comma-separated list of query names to execute\")\n run_parser.add_argument(\"-r\", \"--root\", type=str, default='', help=\"root directory for tpc tables\")\n run_parser.add_argument(\"-o\", \"--output\", default=\"\", help=\"directory where results are stored\")\n\n args = parser.parse_args()\n\n if args.action == \"gen\":\n gen_tpc(args.suite, args.size, args.root)\n elif args.action == \"run\":\n run_tpc(args.suite, args.iter, args.queries, args.root, args.output)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xonai-computing/xonai-benchmarks","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"35020434705","text":"class Solution:\n def coinChange(self, coins, amount: int) -> int:\n dp = [0] * (amount+1)\n \n for i in range(1,amount+1):\n minimum = 100000\n for c in coins:\n if( i-c >=0):\n minimum = min(minimum, 1+dp[i-c])\n \n dp[i] = minimum\n \n return dp[amount] if dp[amount] < 100000 else -1","repo_name":"vr1090/slidingWindow","sub_path":"12_coin_change_322.py","file_name":"12_coin_change_322.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"33862274036","text":"#!/user/bin/env python\n# coding=utf-8\n'''\n@author: yuxiaoqi\n@contact: rpyxqi@gmail.com\n@file: ml_cls_models.py\n@time: 19-11-15 下午4:03\n@desc:\n'''\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom sklearn import tree, svm, naive_bayes, neighbors\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import make_scorer\n\nfrom logger import Logger\n\nlogger = Logger().get_log()\n\n\ndef standadize(arr):\n arr = arr.replace(np.inf, 0.0)\n arr = arr.replace(-np.inf, 0.0)\n _max, _min = arr.max(), arr.min()\n return (arr - _min) / (_max - _min)\n\n\ndef _get_min(bartime_lst):\n ret = []\n for item in bartime_lst:\n _h, _m = item.split(':')\n ret.append((int(_h) - 9) * 60 + int(_m))\n _max, _min = max(ret), min(ret)\n return [(item - _min) / (_max - _min) for item in ret]\n\n\ndef load_features(all_features=False, security_id=None):\n ret = os.listdir('data/')\n lst = []\n for item in ret:\n if item.endswith('csv') and 'corr' not in item and (not security_id or (security_id in item)):\n _df = pd.read_csv('data/{0}'.format(item))\n # index, exchangeCD, ticker, dataDate\n # barTime: 改成第几分钟\n bar_time_lst = _df['barTime']\n label5 = _df['label5']\n _df.drop(\n ['index', 'exchangeCD', 'ticker', 'dataDate', 'barTime', 'barTime.1', 'index.1', 'label5'],\n axis=1,\n inplace=True)\n _df = _df.apply(standadize, axis=0)\n bar_time_lst = _get_min(bar_time_lst)\n _df['barTime'] = bar_time_lst\n _df['label5'] = label5\n if not all_features:\n return _df\n if _df['label'][0] == 1.0 or ('barTime' not in _df.columns) or (_df['barTime'][0] != _df['barTime'][0]):\n logger.debug('verify data')\n lst.append(_df)\n df = pd.concat(lst)\n df.to_csv('data/all_features.csv')\n return df\n\n\ndef train_models(model_name='svc'):\n df = load_features(all_features=False)\n\n # targets = [2 if item > 0 else 1 if item == 0 else 0 for item in df['label5']] # 3 classes\n targets = [1 if item >= 0 else 0 for item in df['label5']] # 2 classes\n # it seems the label here does not hv big influence to the results\n # df.drop(['label'], axis=1, inplace=True)\n data = df.values\n m = {\n 'svc': svm.SVC(kernel='rbf', C=1),\n 'adbc': AdaBoostClassifier(n_estimators=50)\n }.get(model_name)\n\n x_train, x_test, y_train, y_test = train_test_split(data, targets, test_size=0.3, random_state=0)\n m.fit(x_train, y_train)\n # print(clf.score(x_test, y_test))\n cv = ShuffleSplit(n_splits=3, test_size=0.3, random_state=0)\n # scorer = make_scorer(f1_score, average='weighted')\n scorer = make_scorer(f1_score, average='micro')\n scores = cross_val_score(m, data, targets, cv=cv, scoring=scorer)\n print(scores)\n print(m.predict(data[:2]), targets[:2])\n\n\nif __name__ == \"__main__\":\n train_models('svc')\n","repo_name":"dxcv/hf_micro_research","sub_path":"ml_cls_models.py","file_name":"ml_cls_models.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"19035233532","text":"import sys\nimport os\nimport subprocess\nfrom codegen import *\nfrom utilities import add_arguments, ArgSpec\n\n\ninfo_prog = sys.argv[1]\ninitial_conditions = subprocess.check_output([info_prog, \"incoming\"]).strip()\ninitial_conditions = initial_conditions.split(\"\\n\")\n\ntarget_path = os.path.join(\"branchedflowsim\", \"incoming\")\nif not os.path.exists(target_path):\n os.makedirs(target_path)\n\ndisclaimer = '''\"\"\"\nThis file was automatically generated by `incoming_codegen.py`. Do not edit manually!\n\"\"\"\n'''\n\nfor incoming in initial_conditions:\n target_file = open(os.path.join(target_path, incoming + \".py\"), \"w\") # type: file\n data = subprocess.check_output([info_prog, \"incoming\", \"args\", incoming])\n description = subprocess.check_output([info_prog, \"incoming\", \"doc\", incoming]).strip()\n result = eval(data) # type: list[ArgSpec]\n\n obs_class = Class(incoming.title().replace(\"_\", \"\"), \"Incoming\")\n obs_class.add_code(\"_ARGUMENTS_ = %s\" % data)\n obs_class.add_code(\"_NAME_ = '%s'\" % incoming)\n\n init = Function(\"__init__\")\n obs_class.add_method(init)\n obs_class.docstring = description\n\n init.add_argument(\"self\")\n add_arguments(result, obs_class, init)\n\n target_file.write(disclaimer)\n target_file.write(\"from ..cmdline_helpers import ArgSpec\\n\")\n target_file.write(\"from .incoming import Incoming\\n\\n\\n\")\n target_file.write(str(obs_class))\n target_file.close()\n\n\n__init__file = open(os.path.join(target_path, \"__init__.py\"), \"w\")\n\n\n\nfor incoming in initial_conditions:\n __init__file.write(\"from .{} import {}\\n\".format(incoming, incoming.title().replace(\"_\", \"\")))\n","repo_name":"Mesocopic/branchedflowsim","sub_path":"python/codegen/incoming_codegen.py","file_name":"incoming_codegen.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"12054080967","text":"import datetime\nfrom enum import Enum\nfrom uuid import UUID\n\nfrom pydantic import Field, BaseModel\n\n__all__ = (\n 'UnitDeliveryStatistics',\n 'LateDeliveryVoucher',\n 'LateDeliveryVoucherIssuer',\n)\n\n\nclass UnitDeliveryStatistics(BaseModel):\n unit_uuid: UUID = Field(alias='unitId')\n unit_name: str = Field(alias='unitName')\n average_cooking_time: int = Field(alias='avgCookingTime')\n average_delivery_order_fulfillment_time: int = Field(alias='avgDeliveryOrderFulfillmentTime')\n average_heated_shelf_time: int = Field(alias='avgHeatedShelfTime')\n average_order_trip_time: int = Field(alias='avgOrderTripTime')\n couriers_shifts_duration: int = Field(alias='couriersShiftsDuration')\n delivery_orders_count: int = Field(alias='deliveryOrdersCount')\n delivery_sales: int = Field(alias='deliverySales')\n late_orders_count: int = Field(alias='lateOrdersCount')\n orders_with_courier_app_count: int = Field(alias='ordersWithCourierAppCount')\n trips_count: int = Field(alias='tripsCount')\n trips_duration: int = Field(alias='tripsDuration')\n\n @property\n def orders_per_labor_hour(self) -> int | float:\n if self.couriers_shifts_duration == 0:\n return 0\n return round(self.delivery_orders_count / (self.couriers_shifts_duration / 3600), 1)\n\n\nclass LateDeliveryVoucherIssuer(Enum):\n SYSTEM = 'System'\n CONTACT_CENTER = 'ContactCenter'\n\n\nclass LateDeliveryVoucher(BaseModel):\n order_id: UUID = Field(alias='orderId')\n order_number: str = Field(alias='orderNumber')\n order_accepted_at_local: datetime.datetime = Field(alias='orderAcceptedAtLocal')\n unit_uuid: UUID = Field(alias='unitId')\n predicted_delivery_time_local: datetime.datetime = Field(alias='predictedDeliveryTimeLocal')\n order_fulfilment_flag_at_local: datetime.datetime | None = Field(alias='orderFulfilmentFlagAtLocal')\n delivery_deadline_local: datetime.datetime = Field(alias='deliveryDeadlineLocal')\n issuer_name: LateDeliveryVoucherIssuer | None = Field(alias='issuerName')\n courier_staff_id: UUID | None = Field(alias='courierStaffId')\n","repo_name":"goretsky-integration/api","sub_path":"src/models/external_api_responses/dodo_is_api/delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"72946176958","text":"from __future__ import annotations\n\nimport asyncio\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Final, Literal, cast, overload\n\nfrom typing_extensions import Self # noqa: TCH002\n\nfrom ..._const import DOCS_BUILDING, VDF_LOADS, timeout\nfrom ..._gc import Client as Client_\nfrom ..._gc.client import ClientUser as ClientUser_\nfrom ...app import TF2, App\nfrom ...ext import commands\nfrom ...utils import cached_property # noqa: TCH001\nfrom .protobufs import struct_messages\nfrom .state import GCState # noqa: TCH001\n\nif TYPE_CHECKING:\n import os\n from collections.abc import Collection\n\n from ...enums import Language as Language_\n from ...ext import tf2\n from ...trade import Inventory, Item\n from .backpack import Backpack, BackpackItem, Schema\n\n__all__ = (\n \"Client\",\n \"Bot\",\n)\n\n\nclass ClientUser(ClientUser_):\n if TYPE_CHECKING:\n\n @overload\n async def inventory(self, app: Literal[TF2], *, language: object = ...) -> Backpack: # type: ignore\n ...\n\n @overload\n async def inventory(self, app: App, *, language: Language_ | None = None) -> Inventory[Item[Self], Self]: # type: ignore\n ...\n\n\nclass Client(Client_):\n _APP: Final = TF2\n _ClientUserCls = ClientUser\n _state: GCState # type: ignore # PEP 705\n if TYPE_CHECKING:\n\n @cached_property\n def user(self) -> ClientUser:\n ...\n\n @property\n def schema(self) -> Schema:\n \"\"\"TF2's item schema. ``None`` if the user isn't ready.\"\"\"\n return self._state.schema\n\n @property\n def backpack_slots(self) -> int:\n \"\"\"The client's number of backpack slots.\"\"\"\n if self._state.backpack_slots is None:\n raise RuntimeError(\"GC isn't ready yet\")\n return self._state.backpack_slots\n\n def is_premium(self) -> bool:\n \"\"\"Whether or not the client's account has TF2 premium. ``None`` if the user isn't ready.\"\"\"\n return self._state._is_premium # type: ignore\n\n def set_language(self, file: os.PathLike[str]) -> None: # TODO this doesn't work\n \"\"\"Set the localization files for your bot.\n\n This isn't necessary in most situations.\n \"\"\"\n file = Path(file).resolve()\n self._state.localisation = VDF_LOADS(file.read_text())\n\n async def craft(\n self, items: Collection[BackpackItem[ClientUser]], recipe: int = -2\n ) -> list[BackpackItem[ClientUser]]:\n \"\"\"Craft a set of items together with an optional recipe.\n\n Parameters\n ----------\n items\n The items to craft.\n recipe\n The recipe to craft them with default is -2 (wildcard). Setting for metal crafts isn't required. See\n https://raw.githubusercontent.com/Gobot1234/TF2-Crafting-Recipe/master/craftRecipe.json for other recipe\n details.\n\n Returns\n -------\n The crafted items, ``None`` if crafting failed.\n \"\"\"\n\n def check_craft(msg: struct_messages.CraftResponse) -> bool:\n if not msg.being_used: # craft queue is FIFO, so this works fine\n msg.being_used = True\n return True\n\n return False\n\n future = self._state.ws.gc_wait_for(struct_messages.CraftResponse, check=check_craft)\n await self._state.ws.send_gc_message(\n struct_messages.CraftRequest(recipe=recipe, items=[item.id for item in items])\n )\n\n try:\n async with timeout(60):\n resp = await future\n except asyncio.TimeoutError:\n raise ValueError(\"crafting failed\")\n else:\n if resp.recipe_id == -1: # error occurred\n raise ValueError(\"crafting failed\")\n\n return cast(\"list[BackpackItem[ClientUser]]\", await asyncio.gather(*map(self._state.wait_for_item, resp.ids)))\n\n if TYPE_CHECKING or DOCS_BUILDING:\n\n async def on_account_update(self) -> None:\n \"\"\"Called when the client user's account is updated. This can happen from any one of the below changing:\n\n - :meth:`is_premium`\n - :attr:`backpack_slots`\n \"\"\"\n\n async def on_item_receive(self, item: tf2.BackpackItem) -> None:\n \"\"\"Called when the client receives an item.\n\n Parameters\n ----------\n item\n The received item.\n \"\"\"\n\n async def on_item_remove(self, item: tf2.BackpackItem) -> None:\n \"\"\"Called when the client has an item removed from its backpack.\n\n Parameters\n ----------\n item\n The removed item.\n \"\"\"\n\n async def on_item_update(self, before: tf2.BackpackItem, after: tf2.BackpackItem) -> None:\n \"\"\"Called when the client has an item in its backpack updated.\n\n Parameters\n ----------\n before\n The item before being updated.\n after\n The item now.\n \"\"\"\n\n\nclass Bot(commands.Bot, Client):\n pass\n","repo_name":"Gobot1234/steam.py","sub_path":"steam/ext/tf2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"97"} +{"seq_id":"71039599038","text":"import xmlrpc.client\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\na=np.array([4,5,6])\r\ntype(a)#输出a的类型\r\na.shape#维数大小numpy.ndarray\r\na[0]\r\nb=np.array([[4,5,6],[1,2,3]])\r\nprint(b)\r\nb.shape#输出维数大小(2,3)\r\nc=np.zeros((3,3),dtype=int)#全为0的矩阵大小3*3\r\nd=np.ones((4,5),dtype=int)#全为1的矩阵大小为4*5\r\nm=np.identity(4)#单位矩阵大小4*4\r\nn=np.random.randn(3,2)#随机数矩阵\r\nq=np.arange(1,13).reshape(3,4)#输出3*4的矩阵元素为1到12\r\np=q[0:2,1:3]#将q中1到2行和2到3列放入p中\r\nw=q[1:3,:]#将q中2到3行放入w中\r\ne=np.array([[1,2],[3,4],[5,6]])\r\ne[[0,1,2],[0,1,0]]#[1,4,5]输出(0,0),(1,1),(2,0)三个元素\r\nr=np.arange(1,13).reshape(4,3)\r\nt=np.array([0,2,0,1])\r\nr[np.arange(4),t]#输出r中(0,0)(1,2)(2,0)(3,1)四个元素\r\nr[np.arange(4),t]+=10#每个元素加10\r\ny=np.array([1,2])\r\ny.dtype#输出int32\r\nu=np.array([1.0,2.0])\r\nu.dtype#输出float64\r\ni=np.array([[1,2],[3,4]],dtype=np.float64)\r\no=np.array([[5,6],[7,8]],dtype=np.float64)\r\ni+o\r\nnp.add(i,o)#输出[[6.,8.],[10.,12.]]\r\ni-o\r\nnp.subtract(i,o)#输出[[-4.,-4.],[-4.,-4.]]\r\ni*o\r\nnp.multiply(i,o)#输出[[5.,12.],[21.,32.]]\r\nnp.dot(i,o)#满足矩阵相乘\r\n#f=np.array([[2,3,4],[5,6,7]],dtype=int)\r\n#g=np.array([[1,2],[7,8],[1,3]],dtype=int)\r\n#np.dot(f,g)\r\n#np.multiply(f,g)\r\ni/o\r\nnp.divide(i,o)#俩矩阵下标相同的相除\r\nnp.sqrt(i)#矩阵元素开根号\r\ni.dot(o)\r\nnp.dot(i,o)#俩运行结果相同,都是满足矩阵相乘定理\r\nnp.sum(i)#矩阵中所有元素相加\r\nnp.sum(i,axis=0)#列和 [4. 6.]\r\nnp.sum(i,axis=1)#行和 [3. 7.]\r\nnp.mean(i)#结果2.5平均值的求解\r\nnp.mean(i,axis=0)#列和的平均值,结果为[2. 3.]\r\nnp.mean(i,axis=1)#行和的平均值,结果为[1.5 3.5]\\\r\ni.T#矩阵的转置\r\nnp.exp(i)#e的指数[[2.71,7.38],[20.08,54.598]]\r\ns=np.array([[7,8],[4,2]])\r\nnp.argmax(s)#输出1\r\nnp.argmax(s,axis=0)#输出[0 0]\r\nnp.argmax(s,axis=1)#输出[1 0]\r\nx=np.arange(0,100,0.1)#建立一个0到100每个相邻元素相差0.1的矩阵\r\nh=x*x\r\nplt.figure(figsize=(6,6))#建立画布,并指定画布大小\r\nplt.plot(x,h)#在画布上画图\r\nplt.show()#展示画图结果\r\nb=np.arange(0,3*np.pi,0.1)\r\ny1=np.sin(b)#建立sin矩阵\r\ny2=np.cos(b)\r\nplt.figure(figsize=(10,6))\r\nplt.plot(b,y1,color='Red')#在画布上画图,颜色为红\r\nplt.plot(b,y2,color='Blue')\r\nplt.legend(['Sin','Cos'])#给俩条线做标记\r\nplt.show()","repo_name":"rvhyb/numpy","sub_path":"888/使用numpy模块,矩阵建立的运用.py","file_name":"使用numpy模���,矩阵建立的运用.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"8993659865","text":"from rest_framework import serializers\n\nfrom utilities.python_utils import get_schema_from_url\nfrom .models import *\n\n\nclass ClientSerializer(serializers.ModelSerializer):\n domains = serializers.SerializerMethodField('get_domains', read_only=True)\n\n def validate(self, initial_data):\n schema_name = get_schema_from_url(self.context)\n domain_exist = Client.objects.filter(schema_name=schema_name).exists()\n name_exist = Client.objects.filter(name=initial_data['name']).exists()\n if name_exist:\n raise serializers.ValidationError('A client with this name already exists')\n if domain_exist:\n raise serializers.ValidationError('A client with this domain already exists')\n initial_data['schema_name'] = schema_name\n initial_data['domain_url'] = self.context\n return initial_data\n\n def create(self, validated_data):\n client = Client.objects.create(name=validated_data['name'], schema_name=validated_data['schema_name'])\n domain = Domain()\n domain.domain = validated_data['domain_url']\n domain.tenant = client\n domain.is_primary = True\n domain.save()\n return client\n\n def get_domains(self, obj):\n domains = Domain.objects.filter(tenant=obj)\n serializer = DomainSerializer(domains, many=True)\n return serializer.data\n\n class Meta:\n model = Client\n fields = ('name', 'domains', 'schema_name')\n\n\nclass DomainSerializer(serializers.ModelSerializer):\n class Meta:\n model = Domain\n fields = ('domain', 'is_primary')\n","repo_name":"louieee/Sudden","sub_path":"customer/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"41529260932","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 9 21:58:57 2023\n\n@author: Shubhi Tiwari\n\"\"\"\n\nintervals = [[1,3],[2,6],[8,10],[15,18]]\ndef merge(intervals):\n \"\"\"\n :type intervals: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n intervals.sort()\n #print(intervals)\n ans=[]\n ans.append(intervals[0])\n k=0\n for j in range(1,len(intervals)):\n \n #print(ans[k][1],\"ans\")\n #print(intervals[j][0])\n if intervals[j][0] <= ans[k][1]:\n \n #print(intervals[j][1])\n #print(ans[k][1])\n if intervals[j][1] < ans[k][1]:\n ans[k][1]=ans[k][1]\n else:\n ans[k][1]=intervals[j][1]\n \n else:\n ans.append(intervals[j])\n k=k+1 \n \n return(ans)\n \n \n\nprint(merge(intervals)) ","repo_name":"Shubhi2807/DSA","sub_path":"merge sub-intervals.py","file_name":"merge sub-intervals.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"20101877631","text":"import time\n\nfrom timer import Timer\nfrom tkinter import Tk, Button, Entry, Text, INSERT, Label, StringVar\nimport random\n\nroot = Tk()\nroot.title(\"Type faster!\")\nroot.geometry(\"650x170\")\n\nentry_text = StringVar()\nentry_text.set(\"\")\n\nTEXTS = [\n \"No horizonte distante, o sol se despede, pintando o ceu com tons de saudade.\",\n \"Entre suspiros de vento, as folhas dancam, celebrando a poesia silenciosa da natureza.\",\n \"No silencio da madrugada, as estrelas sussurram segredos cosmicos aos sonhadores.\"\n]\n\nsorted_text = TEXTS[random.randint(0, 2)]\n\ntimer = Timer(root)\n\n\ndef verify_text(e):\n timer.start_timer()\n new_text = entry.get()\n if new_text == sorted_text and timer.status:\n finish_text = Label(root, text=f'Você levou {timer.stop_timer()} segundos!')\n finish_text.pack()\n\n\ntext = Label(root, text=sorted_text)\ntext.pack()\n\nentry = Entry(root)\nentry.pack(pady=5)\n\nentry.bind(\"\", verify_text)\n\nroot.mainloop()\n","repo_name":"Guilherme-Castello-Clear/typing-speed-py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"18910694597","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/2/22 20:41\n# @Author : SunriseCai\n# @Software: PyCharm\n\n\nimport json\nimport re\nimport requests\nimport threading\n\n\n\"\"\"Bilibili视频下载小程序\"\"\"\n\nsession = requests.session()\n\n\nclass BilibiliSpider:\n def __init__(self, url):\n self.url = url\n self.pageHeaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',\n }\n self.dataHeaders = {\n 'Host': 'bilivideo.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',\n 'Accept': '*/*',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Range': 'bytes=0-1000000000000',\n 'Origin': 'https://www.bilibili.com',\n 'Connection': 'keep-alive',\n 'Referer': 'https://www.bilibili.com/video/'\n }\n\n def get_url(self):\n \"\"\"\n 请求视频播放页面,在源码中获取视音频链接和视频名称\n :return: 视频链接、音频链接、视频名称\n \"\"\"\n htmlData = requests.get(self.url, headers=self.pageHeaders, verify=False).text\n urlData = json.loads(re.findall('', htmlData, re.M)[0])\n videoUrl = urlData['data']['dash']['video'][0]['baseUrl']\n audioUrl = urlData['data']['dash']['audio'][0]['baseUrl']\n name = re.findall('

', htmlData, re.M)[0]\n return videoUrl, audioUrl, name\n\n def download_video(self, videoUrl, name):\n \"\"\"\n 传入url和名称,开始下载\n :param videoUrl: 视频链接\n :param name: 视频名称\n :return:\n \"\"\"\n videoContent = session.get(videoUrl, headers=self.dataHeaders).content\n with open('%s.mp4' % name, 'wb') as f:\n f.write(videoContent)\n f.close()\n print('video download Success')\n\n def download_audio(self, audioUrl, name):\n \"\"\"\n 传入url和名称,开始下载\n :param audioUrl: 音频链接\n :param name: 音频名称\n :return:\n \"\"\"\n audioContent = session.get(audioUrl, headers=self.dataHeaders).content\n with open('%s.mp3' % name, 'wb') as f:\n f.write(audioContent)\n f.close()\n print('audio download Success')\n\n def main(self):\n \"\"\"\n 主程序,利用多线程下载视音频会比较快\n :return:\n \"\"\"\n videoUrl, audioUrl, name = self.get_url()\n videoThread = threading.Thread(target=self.download_video, args=(videoUrl, name))\n audioThread = threading.Thread(target=self.download_audio, args=(audioUrl, name))\n videoThread.start()\n audioThread.start()\n videoThread.join()\n audioThread.join()\n # 退出保持会话\n session.close()\n\n\nif __name__ == '__main__':\n url = 'https://www.bilibili.com/video/av25621315/'\n spider = BilibiliSpider(url)\n spider.main()\n","repo_name":"13433354333/spiderCode","sub_path":"BilibiliSpider/BilibiliSpider.py","file_name":"BilibiliSpider.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"13319599454","text":"import django, pandas as pd, pdb, numpy as np\n\nfrom eBmap.models import Building\n\nkc_d = pd.read_excel('../eBuildings.xlsx')\nkc_d.replace(np.nan,'',inplace=True)\n\nbuildings = []\nfor index, bdg in kc_d.iterrows():\n print(bdg['Building Name'],bdg['Floor Area (sq ft)'])\n if (bdg['Floor Area (sq ft)']==''):\n bdg['Floor Area (sq ft)']=None\n building = Building(\n year = int(2021),\n Major = 0, #int(bdg[1]['Major']),\n Minor = 0, #int(bdg[1]['Minor']),\n BldgNbr = 0, #int(bdg[1]['BldgNbr']),\n Address = bdg['Address'],\n main_use = bdg['Building Type'],\n Number_of_Stories = 2, #int(bdg[1]['NbrStories']),\n Construction_Class = 0, #int(bdg[1]['ConstrClass']),\n Year_built = 2018, #int(bdg[1]['YrBuilt']),\n Year_remodeled = 2020, #int(bdg[1]['EffYr']),\n Building_Name = bdg['Building Name'],\n Space_heat_type = bdg['Space_heat_type'],\n Cook_type = bdg['Cook_type'],\n DHW_type = bdg['DHW_type'],\n City = bdg['City'],\n County = bdg['County'],\n Leg_District = bdg['Leg District'],\n Floor_Area = bdg['Floor Area (sq ft)'],\n Description = bdg['Description'],\n link = bdg['link'],\n img_link = bdg['img_link'],\n Architect = bdg['Architect'],\n Engineer = bdg['Engineer'],\n Builder = bdg['Builder'],\n lat = bdg['lat'],\n lon = bdg['lon'], )\n buildings.append(building)\n\nBuilding.objects.bulk_create(buildings)\n\n","repo_name":"stinsong4100/WA-Electric-Buildings","sub_path":"eBuildingsMap/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"27461658490","text":"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\n\ndef gen_delta(input_list):\n l1 = input_list[:-1]\n l2 = input_list[1:]\n return np.subtract(l2,l1)\n\ndef load_trace_file(input_file,thres):\n ret = []\n with open(input_file) as f:\n for line in f:\n timeval, rmw_evicts = [int(x) for x in line.split(';')]\n if rmw_evicts >= thres:\n ret.append(timeval)\n return ret\n\ndef save_diff_file(diff_list,outfile_name):\n with open(outfile_name,'w') as o:\n for val in diff_list:\n o.write(str(val/1000)+'\\n')\n\n\ndef main():\n if len(sys.argv) != 4:\n print(f'Usage: {sys.argv[0]} ')\n return\n\n input_file = sys.argv[1]\n output_file = sys.argv[2]\n rmw_thres = int(sys.argv[3])\n\n print(f\"Generating for {input_file}\")\n trace = load_trace_file(input_file,rmw_thres)\n print(f\"Trace length: {len(trace)}\")\n delta = gen_delta(trace)\n save_diff_file(delta,output_file)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Systems-ShiftLab/optane_sec23_ae","sub_path":"keystroke/gen_delta.py","file_name":"gen_delta.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"16873635577","text":"\"\"\"\nImplements several functions for working with continued fractions. Functionality consists almost\nentirely of approximting irrationals by continued fractions.\n\"\"\"\nimport math\n\n\ndef fraction_coeffs(decimal):\n \"\"\"\n Yields an infinite sequence of a_k's approximating the given decimal\n\n Example:\n >>> from math import sqrt\n >>> from itertools import islice\n >>> coeffs = fraction_coeffs(sqrt(2))\n >>> list(islice(coeffs, 4)) # Grab the first four values of the infinite sequence\n [1, 2, 2, 2]\n \"\"\"\n x_k = decimal\n\n while True:\n a_k = math.floor(x_k)\n r_k = x_k - a_k\n x_k = 1 / r_k\n yield a_k\n\n\ndef fractions(decimal):\n \"\"\"\n Yields an infinite sequence of fractions (pk, qk) approximating the given decimal. Each\n successive fraction will be a better and better approximation.\n\n Example:\n >>> from math import sqrt\n >>> from itertools import islice\n >>> pq = fractions(sqrt(2))\n >>> list(islice(pq, 4))\n [(1, 1), (3, 2), (7, 5), (17, 12)]\n \"\"\"\n p_curr, p_prev = 1, 0\n q_curr, q_prev = 0, 1\n\n for a_k in fraction_coeffs(decimal):\n p_next = a_k * p_curr + p_prev\n q_next = a_k * q_curr + q_prev\n p_prev, p_curr = p_curr, p_next\n q_prev, q_curr = q_curr, q_next\n yield p_next, q_next\n\n\ndef approximate_decimal(decimal, tolerance):\n \"\"\"\n Approximates the given decimal with a rational number to some specified tolerance.\n Returns a (num, denom) tuple.\n\n Example:\n >>> from math import pi\n >>> approximate_decimal(pi, 1e-5)\n (355, 113)\n \"\"\"\n for num, denom in fractions(decimal):\n tol = abs(decimal - num / denom)\n if tol <= tolerance:\n return num, denom\n","repo_name":"Notgnoshi/cryptography","sub_path":"crypto/math/continued_fractions.py","file_name":"continued_fractions.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"1409481893","text":"# Partimos del codigo de programa1\nimport http.client\nimport json\n\nheaders = {'User-Agent': 'http-client'}\n\nconn = http.client.HTTPSConnection(\"api.fda.gov\") # Establecemos conexion con el servidor\nconn.request(\"GET\", \"/drug/label.json?limit=10\", None, headers) # Como buscamos informacion de\n # 10 medicamentos ponemos limit=10\nr1 = conn.getresponse() # Obtenemos la informacion requerida\nprint(r1.status, r1.reason) # Comprobamos status y razon (200, OK)\n\nmedicina_raw = r1.read().decode(\"utf-8\") # Lectura del contenido en json y transformacion en cadena\nconn.close()\n\nmedicina = json.loads(medicina_raw)\n\n# Itero con un bucle for para acceder a la informacion de los 10 medicamentos\nfor medicamento in range(len(medicina['results'])):\n datos = medicina['results'][medicamento]\n print('El medicamento', medicamento+1, ' tiene como id: ', datos['id'])\n","repo_name":"martacastellano/openfda","sub_path":"openfda-1/programa2.py","file_name":"programa2.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"40597388487","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nfrom typing import Optional\n\n\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n dummy = ListNode(0, head)\n left, right = dummy, head\n \n while n > 0 and right:\n right = right.next\n n -=1\n while right:\n left = left.next\n right = right.next\n \n left.next = left.next.next\n \n return dummy.next ","repo_name":"vicodevv/DataStructures-Algorithm","sub_path":"Linked List/19. Remove Nth Node From End of List/19. Remove Nth Node From End of List.py","file_name":"19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"97"} +{"seq_id":"30293916456","text":"# a simple parser for python. use get_number() and get_word() to read\ndef parser():\n while 1:\n data = list(input().split(' '))\n for number in data:\n if len(number) > 0:\n yield(number) \n\ninput_parser = parser()\n\ndef get_word():\n global input_parser\n return next(input_parser)\n\ndef get_number():\n data = get_word()\n try:\n return int(data)\n except ValueError:\n return float(data)\n\n# numpy and scipy are available for use\nimport numpy\n# import scipy\n\nclass Student:\n def __init__(self):\n self.bffs = []\n self.bff_pos = 1\n def set_pos(self, x: int):\n self.pos = x\n def add_bff(self, stu: Student):\n self.bffs.append(stu)\n self.opp_pos = stu.pos\n \n\nclass Classroom:\n def __init__(self):\n stu = Student()\n stu.set_pos(0)\n self.students = [stu]\n self.count = 1\n\n def addStudent(self, bff: int):\n self.count += 1\n stu = Student()\n self.students.append(stu)\n self.students[bff-1].add_bff(stu)\n \n\nN = get_number()\ncr = Classroom()\nfor _ in range(N-1):\n bff = get_number()\n cr.addStudent(bff)\n\nfor s in cr.students:\n print(s, s.bffs)\n\n# dists = [[] for _ in range(N)]\n\n# for key in classroom:\n# bffs = classroom[key]\n# if len(bffs) < 3:\n# dists[key][bffs[0]] = 1\n# dists[key][bffs[1]] = 1\n# dists[bffs[0]][bffs[1]] = 2\n# else:\n# for i in range(len(bffs)//2):\n\n\n\n# total = 0\n# for n in dists:\n# for m in n:\n# total += m\n# print(total)","repo_name":"GambuzX/IEEEXtreme2020","sub_path":"social-distancing-in-class/social-distancing-in-class.py","file_name":"social-distancing-in-class.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"19502728592","text":"\n#変数名が汚いので注意\nn=int(input())\na=list(map(int,input().split()))\nta=-50000#高橋の答え\ntans=0#高橋の丸つけるans\nfor i in range(n):#高橋が丸つけたところ\n aa=-50000\n tc=0#青木の一番スコアが高いときの高橋の答えの記録\n for j in range(n):#青木が〇つけたところ\n t=[]\n tcount=0#tの要素の合計\n acount=0#\n if i!=j:\n for z in range(min(i,j),max(i,j)+1):#配列を作る\n t.append(a[z])\n for z in range(len(t)):#足す\n if z%2==0:\n tcount+=t[z]\n else:\n acount+=t[z]\n if aa