\n >>> explainer = NonLinearitiesTunnel(Saliency(mlp))\n >>> attr = explainer.attribute(inputs, target=0)\n \"\"\"\n\n def __init__(\n self,\n attribution_method: Attribution,\n ) -> None:\n self.attribution_method = attribution_method\n self.is_delta_supported = (\n self.attribution_method.has_convergence_delta()\n )\n self._multiply_by_inputs = self.attribution_method.multiplies_by_inputs\n self.is_gradient_method = isinstance(\n self.attribution_method, GradientAttribution\n )\n Attribution.__init__(self, self.attribution_method.forward_func)\n\n @property\n def multiplies_by_inputs(self):\n return self._multiply_by_inputs\n\n def has_convergence_delta(self) -> bool:\n return self.is_delta_supported\n\n @log_usage()\n def attribute(\n self,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n to_replace: Union[nn.Module, Tuple[nn.Module, ...]] = nn.ReLU(),\n replace_with: Union[nn.Module, Tuple[nn.Module, ...]] = nn.Softplus(),\n **kwargs: Any,\n ) -> Union[\n Union[\n Tensor,\n Tuple[Tensor, Tensor],\n Tuple[Tensor, ...],\n Tuple[Tuple[Tensor, ...], Tensor],\n ]\n ]:\n r\"\"\"\n Args:\n inputs (tensor or tuple of tensors): Input for which integrated\n gradients are computed. If forward_func takes a single\n tensor as input, a single input tensor should be provided.\n If forward_func takes multiple tensors as input, a tuple\n of the input tensors should be provided. It is assumed\n that for all given input tensors, dimension 0 corresponds\n to the number of examples, and if multiple input tensors\n are provided, the examples must be aligned appropriately.\n It is also assumed that for all given input tensors,\n dimension 1 corresponds to the time dimension, and if\n multiple input tensors are provided, the examples must\n be aligned appropriately.\n to_replace (nn.Module, tuple, optional): Non linearities\n to be replaced. The linearities of type listed here will be\n replaced by ``replaced_by`` non linearities before running\n the attribution method. This can be an instance or a class.\n If a class is passed, default attributes are used.\n Default: nn.ReLU()\n replace_with (nn.Module, tuple, optional): Non linearities\n to replace the ones listed in ``to_replace``.\n Default: nn.Softplus()\n **kwargs: (Any, optional): Contains a list of arguments that are\n passed to `attribution_method` attribution algorithm.\n Any additional arguments that should be used for the\n chosen attribution method should be included here.\n For instance, such arguments include\n `additional_forward_args` and `baselines`.\n\n Returns:\n **attributions** or 2-element tuple of **attributions**, **delta**:\n - **attributions** (*tensor* or tuple of *tensors*):\n Attribution with\n respect to each input feature. attributions will always be\n the same size as the provided inputs, with each value\n providing the attribution of the corresponding input index.\n If a single tensor is provided as inputs, a single tensor\n is returned. If a tuple is provided for inputs, a tuple of\n corresponding sized tensors is returned.\n - **delta** (*float*, returned if return_convergence_delta=True):\n Approximation error computed by the\n attribution algorithm. Not all attribution algorithms\n return delta value. It is computed only for some\n algorithms, e.g. integrated gradients.\n \"\"\"\n # Keeps track whether original input is a tuple or not before\n # converting it into a tuple.\n is_inputs_tuple = isinstance(inputs, tuple)\n\n inputs = _format_inputs(inputs)\n\n # Check if needs to return convergence delta\n return_convergence_delta = (\n \"return_convergence_delta\" in kwargs\n and kwargs[\"return_convergence_delta\"]\n )\n\n _replaced_layers_tpl = None\n _replaced_functions = dict()\n try:\n # Replace layers using to_replace and replace_with\n if not isinstance(to_replace, tuple):\n to_replace = (to_replace,)\n if not isinstance(replace_with, tuple):\n replace_with = (replace_with,)\n\n if isinstance(self.attribution_method.forward_func, nn.Module):\n _replaced_layers_tpl = tuple(\n replace_layers(\n self.attribution_method.forward_func, old, new\n )\n for old, new in zip(to_replace, replace_with)\n )\n\n # Replace functional using to_replace and replace_with\n for old, new in zip(to_replace, replace_with):\n name, _ = get_functional(old)\n _, func = get_functional(new)\n _replaced_functions[name] = getattr(F, name)\n setattr(F, name, func)\n\n # Get attributions\n attributions = self.attribution_method.attribute.__wrapped__(\n self.attribution_method, # self\n inputs if is_inputs_tuple else inputs[0],\n **kwargs,\n )\n\n # Get delta if required\n delta = None\n if self.is_delta_supported and return_convergence_delta:\n attributions, delta = attributions\n\n # Format attributions\n is_attrib_tuple = _is_tuple(attributions)\n attributions = _format_tensor_into_tuples(attributions)\n\n # Even if any error is raised, restore layers and functions\n # before raising\n finally:\n # Restore layers\n if _replaced_layers_tpl is not None:\n for layer in _replaced_layers_tpl:\n reverse_replace_layers(\n self.attribution_method.forward_func,\n layer,\n )\n\n # Restore functions\n for name, func in _replaced_functions.items():\n setattr(F, name, func)\n\n return self._apply_checks_and_return_attributions(\n attributions,\n is_attrib_tuple,\n return_convergence_delta,\n delta,\n )\n\n def _apply_checks_and_return_attributions(\n self,\n attributions: Tuple[Tensor, ...],\n is_attrib_tuple: bool,\n return_convergence_delta: bool,\n delta: Union[None, Tensor],\n ) -> Union[\n TensorOrTupleOfTensorsGeneric,\n Tuple[TensorOrTupleOfTensorsGeneric, Tensor],\n ]:\n attributions = _format_output(is_attrib_tuple, attributions)\n\n ret = (\n (attributions, cast(Tensor, delta))\n if self.is_delta_supported and return_convergence_delta\n else attributions\n )\n ret = cast(\n Union[\n TensorOrTupleOfTensorsGeneric,\n Tuple[TensorOrTupleOfTensorsGeneric, Tensor],\n ],\n ret,\n )\n return ret\n\n\ndef replace_layers(model, old, new):\n \"\"\"\n Replace all the layers of type old into new.\n\n Returns:\n dict: Dictionary of replaced layers, saved to be restored after\n running the attribution method.\n\n References:\n https://discuss.pytorch.org/t/how-to-modify-a-pretrained-model/60509/12\n \"\"\"\n replaced_layers = dict()\n for n, module in model.named_children():\n if len(list(module.children())) > 0:\n replaced_layers[n] = replace_layers(module, old, new)\n\n if isinstance(module, old if type(old) == type else type(old)):\n replaced_layers[n] = getattr(model, n)\n setattr(model, n, new)\n return replaced_layers\n\n\ndef reverse_replace_layers(model, replaced_layers):\n \"\"\"\n Reverse the layer replacement using the ``replaced_layers``\n dictionary created when running ``replace_layers``.\n \"\"\"\n for k, v in replaced_layers.items():\n if isinstance(v, dict):\n reverse_replace_layers(getattr(model, k), v)\n else:\n setattr(model, k, v)\n\n\ndef get_functional(module):\n \"\"\"\n Map a nn Non linearity to a corresponding function.\n It returns the name of the function to be replaced and the function\n to replace it with.\n \"\"\"\n # Get default instance if ony type provided\n if type(module) == type:\n module = module()\n\n assert isinstance(module, nn.Module), \"You must provide a PyTorch Module.\"\n\n if isinstance(module, nn.Threshold):\n threshold = module.threshold\n value = module.value\n inplace = module.inplace\n if inplace:\n return \"threshold_\", lambda x: F.threshold_(x, threshold, value)\n return \"threshold\", lambda x: F.threshold(x, threshold, value)\n\n if isinstance(module, nn.ReLU):\n inplace = module.inplace\n if inplace:\n return \"relu_\", F.relu_\n return \"relu\", F.relu\n\n if isinstance(module, nn.ReLU6):\n inplace = module.inplace\n return \"relu6\", lambda x: F.relu6(x, inplace)\n\n if isinstance(module, nn.ELU):\n alpha = module.alpha\n inplace = module.inplace\n if inplace:\n return \"elu_\", lambda x: F.elu_(x, alpha)\n return \"elu\", lambda x: F.elu(x, alpha)\n\n if isinstance(module, nn.CELU):\n alpha = module.alpha\n inplace = module.inplace\n if inplace:\n return \"celu_\", lambda x: F.celu_(x, alpha)\n return \"celu\", lambda x: F.celu(x, alpha)\n\n if isinstance(module, nn.LeakyReLU):\n negative_slope = module.negative_slope\n inplace = module.inplace\n if inplace:\n return \"leaky_relu_\", lambda x: F.leaky_relu_(x, negative_slope)\n return \"leaky_relu\", lambda x: F.leaky_relu(x, negative_slope)\n\n if isinstance(module, nn.Softplus):\n beta = module.beta\n threshold = module.threshold\n return \"softplus\", lambda x: F.softplus(x, beta, threshold)\n\n if isinstance(module, nn.Softmax):\n dim = module.dim\n return \"softmax\", lambda x: F.softmax(x, dim=dim)\n\n if isinstance(module, nn.LogSoftmax):\n dim = module.dim\n return \"log_softmax\", lambda x: F.log_softmax(x, dim=dim)\n\n if isinstance(module, nn.Sigmoid):\n return \"sigmoid\", F.sigmoid\n\n if isinstance(module, nn.Tanh):\n return \"tanh\", F.tanh\n\n if isinstance(module, nn.Tanhshrink):\n return \"tanhshrink\", F.tanhshrink\n\n return None\n","repo_name":"josephenguehard/time_interpret","sub_path":"tint/attr/non_linearities_tunnel.py","file_name":"non_linearities_tunnel.py","file_ext":"py","file_size_in_byte":12913,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"}
+{"seq_id":"31566102002","text":"import os\nimport sys\nfrom remote_runner import SSHWorker, SyncSSHWorker\n\n\ndef ssh_worker_factory():\n worker = SSHWorker(host='localhost')\n worker.remote_user_rc = f\"\"\"\nsource {os.path.abspath(os.path.join(os.path.dirname(sys.executable), \"activate\"))}\n\"\"\"\n return worker\n\n\ndef sync_ssh_worker_factory():\n worker = SyncSSHWorker(sync_period=0.2, host='localhost')\n worker.remote_user_rc = f\"\"\"\nsource {os.path.abspath(os.path.join(os.path.dirname(sys.executable), \"activate\"))}\n\"\"\"\n return worker\n","repo_name":"sizmailov/remote-runner","sub_path":"tests/ssh_common.py","file_name":"ssh_common.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33167155031","text":"import pysnooper \n\ndef get_input():\n data = {\n 'account_no': {\n 'prompt': \"Enter the account number: \",\n 'item_value': None},\n 'balance': {\n 'prompt': \"Enter account balance: \",\n 'item_value': None},\n 'customer_credit': {\n 'prompt': \"Enter the customer credit: \",\n 'item_value': None},\n 'customer_name': {\n 'prompt': \"Enter the customer name: \",\n 'item_value': None},\n }\n #field is the name of the key (account #, balance), field_data are entire dictionaries and prompt and item value are keys \n #data.items() converts into a list of tuples that contains the key value pair \n for field, field_data in data.items():\n field_data['item_value'] = input(field_data['prompt'])\n\n return data\n\n\ndef validate_account_no(account_number):\n if len(account_number) < 5:\n return False\n return True\n\n\ndef validate_balance(balance):\n if len(balance) < 5:\n return False\n return True\n\n\ndef validate_customer_credit(customer_credit):\n if len(customer_credit) < 5:\n return False\n return True\n\n\ndef validate_customer_name(customer_name):\n if len(customer_name) < 5:\n return False\n return True\n\n#unvalidates account no, returns the account_no dictionary \n#adding a new key to the embedded dictionary called is_valid \n#ammends to the data dictionary is_valid key \n\n#this is called a decorator with the @ sign. Modifies function \n@pysnooper.snoop()\ndef validate(unvalidated):\n unvalidated['account_no']['is_valid'] = False\n unvalidated['account_no']['validate'] = validate_account_no\n #creating new key called validate and assigning a value of type function (validate_account_no variable) \n unvalidated['balance']['is_valid'] = False\n unvalidated['balance']['validate'] = validate_balance\n unvalidated['customer_credit']['is_valid'] = False\n unvalidated['customer_credit']['validate'] = validate_customer_credit\n unvalidated['customer_name']['is_valid'] = False\n unvalidated['customer_name']['validate'] = validate_customer_name\n for field, field_data in unvalidated.items():\n field_data['is_valid'] = \\\n field_data['validate'](field_data['item_value'])\n \n #validate key is the variable is the variable with the function in it. Paranthese is the paraneter you are passing to it \n \n #you cannot have a tuple with 1 value in it, unless you have \n\n return unvalidated\n\n\ndef main():\n mydata = get_input()\n my_valid_data = validate(mydata)\n print(my_valid_data) # just to show\n\n\n#calling main \n#run main \nif __name__ == \"__main__\":\n main()","repo_name":"cryptosnowmanETH/exercises","sub_path":"lesson_part2.py","file_name":"lesson_part2.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"16752974237","text":"# Authors: Gabriel Bayer, Eugênio Pozzobon\n# e-mails: gbayer.formula@gmail.com, eugeniopp00@gmail.com\n# Github: https://github.com/Eugenio-Pozzobon\n# Linkedin: https://www.linkedin.com/in/eugeniopozzobon/\n# Licensed under the GNU General Public License v3.0\n\nimport socket\nimport src.settings as settings\nfrom tkinter import messagebox\nimport src.wcuScreen as wcuScreen\n\ncli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ndef conectClient():\n '''\n conect to the server based on settings file\n :return: none\n '''\n cli.connect((settings.socketIP, settings.socketPort))\n\ndef clientRecieve():\n '''\n read server until get an aceptable message\n :return: none\n '''\n while True:\n try:\n msg = cli.recv(1024)\n\n # check message integrity\n splitmsg = msg.decode('utf8').split(',')\n do = True\n for value in splitmsg:\n if value == '':\n value = '0'\n\n # if all bytes get recieved\n if(len(msg.decode('utf8').split(','))==(10+26)) & do:\n print((','.join(splitmsg)).encode())\n return (','.join(splitmsg)).encode()\n except:\n # if server disconect, end program\n messagebox.showwarning(title='Connection Warning', message = 'Server disconect. Close the navegator tab to end the aplication')\n wcuScreen.endWCU()\n sys.exit('Exit')\n\n\n\n\n\n","repo_name":"Eugenio-Pozzobon/Formula-UFSM-Data-Analysis","sub_path":"V4.0 - Python/src/wcuClientSocket.py","file_name":"wcuClientSocket.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1407933714","text":"import os\nimport shutil\n\n\ndef run():\n current_dir =input('Enter the directory where the files can be found \\n')\n current_dir = current_dir.replace(\"\\\\\",'/')\n location = input('Enter the directory where the files should be copied to \\n')\n location = location.replace(\"\\\\\",'/')\n checking = input('What is the file name category to be used to copy your files \\n')\n copy_or_cut = input('Do You want to cut the files, Please Y for yes \\n')\n \n os.chdir(current_dir)\n if os.path.isdir(os.getcwd()):\n for f in os.listdir():\n if len(f) >= len(checking):\n if (f[0:len(checking)]).strip() ==checking:\n if copy_or_cut =='Y':\n shutil.move(os.path.join(os.getcwd(),f),os.path.join(location, f)) \n else:\n shutil.copyfile(os.path.join(os.getcwd(),f),os.path.join(location, f)) \n \nrun()\n \nprint('Files moved, successfully') \nexit() \n \n \n","repo_name":"chukwudiMorganHezekiah/Python-Scripts","sub_path":"copy_cut.py","file_name":"copy_cut.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10203641849","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nimport tf2_ros\nimport geometry_msgs.msg\n\nfrom gazebo_msgs.msg import ModelState\nfrom geometry_msgs.msg import Pose\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Quaternion\nfrom geometry_msgs.msg import Vector3\nfrom geometry_msgs.msg import Twist\nfrom scipy.spatial.transform import Rotation as R\nimport numpy as np\n\nif __name__ == '__main__':\n\n\trospy.init_node('markermove_gazebo', anonymous=True)\n\tpub = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size=10)\n\tcounter = 0\n\ttransy = 0\n\ttransz = 0\n\n\t# define start point\n\ty0 = 0\t#-.02496\n\tz0 = 0.527\n\n\twhile not rospy.is_shutdown():\n\n\t\tif counter < 301:\n\t\t\ttransz=transz+0.001\n\t\t\tz = z0 - transz\n\t\t\ty = y0\n\t\t\tprint('move Z')\n\t\t\tprint(z)\t \n\t\telif counter > 300 and counter < 501:\t\n\t\t\ttransy=transy+0.001\n\t\t\ty = y0 - transy\n\t\t\tprint('move Y')\t\n\t\t\tprint(y)\n\t\telse: \n\t\t\tprint('finished')\n\t\t\tbreak \n\t\t\n\t\tpoint = Point(0.83, y, z)\n\t\tquaternion = Quaternion(1,1,1,1)\n\t\tpose = Pose(point,quaternion)\n\t\tvector = Vector3(0,0,0)\n\t\ttwist = Twist(vector,vector)\n\t\t\n\t\tmsg = ModelState('aruco_visual_marker_5', pose ,twist, 'world')\n\t\trospy.loginfo(msg)\n\t\tpub.publish(msg)\n\n\t\tcounter+=1\n\t\t# speed\t\n\t\trospy.sleep(0.05)\n\n\n","repo_name":"paulrupprecht/VisualServoing","sub_path":"src/custom_pkg/src/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"}
+{"seq_id":"22980977826","text":"# noqa: D100\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\n\ndef parse_reqs(file):\n \"\"\"Parse dependencies from requirements file with regex.\"\"\"\n egg_regex = re.compile(r\"#egg=(\\w+)\")\n reqs = list()\n for req in open(file):\n req = req.strip()\n git_url_match = egg_regex.search(req)\n if git_url_match:\n req = git_url_match.group(1)\n reqs.append(req)\n return reqs\n\n\nwith open(Path(__file__).parent / \"birdy\" / \"__init__.py\") as f:\n version = re.search(r'__version__ = [\\'\"](.+?)[\\'\"]', f.read()).group(1)\n\ndescription = \"Birdy provides a command-line tool to work with Web Processing Services.\"\nlong_description = (\n open(\"README.rst\").read()\n + \"\\n\"\n + open(\"AUTHORS.rst\").read()\n + \"\\n\"\n + open(\"CHANGES.rst\").read()\n)\n\nrequirements = parse_reqs(\"requirements.txt\")\ndev_requirements = parse_reqs(\"requirements_dev.txt\")\n\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\",\n]\n\nsetup(\n name=\"birdhouse-birdy\",\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n classifiers=classifiers,\n keywords=\"wps pywps owslib geopython birdy birdhouse\",\n author=\"Carsten Ehbrecht\",\n author_email=\"ehbrecht@dkrz.de\",\n url=\"https://github.com/bird-house/birdy\",\n license=\"Apache License v2.0\",\n # This qualifier can be used to selectively exclude Python versions -\n # in this case early Python 2 and 3 releases\n python_requires=\">=3.6.0\",\n packages=find_packages(),\n include_package_data=True,\n install_requires=requirements,\n extras_require={\n \"dev\": dev_requirements, # pip install \".[dev]\"\n },\n entry_points={\"console_scripts\": [\"birdy=birdy.cli.run:cli\"]},\n)\n","repo_name":"bird-house/birdy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"}
+{"seq_id":"16017022436","text":"#!/usr/local/bin/python3.9\nimport random\n\norigin_board = [[0 for j in range(0,9)] for i in range(0,9)]\nboard = [[0 for j in range(0,9)] for i in range(0,9)]\n\nrow = [[0 for j in range(0,10)] for i in range(0,10)]\ncol = [[0 for j in range(0,10)] for i in range(0,10)]\ndiag = [[0 for j in range(0,10)] for i in range(0,10)]\n\nterminate_flag = False\n\ndef board_init():\n seq_diag = [0,4,8]\n for offset in range(0,9,3):\n seq = [i for i in range(1,10)]\n random.shuffle(seq)\n for idx in range(0,9):\n i, j = idx//3, idx%3\n row[offset+i][seq[idx]] = 1\n col[offset+j][seq[idx]] = 1\n k = seq_diag[offset//3]\n diag[k][seq[idx]] = 1\n origin_board[offset+i][offset+j] = seq[idx]\n\ndef make_sudoku(k):\n global terminate_flag, board\n\n if terminate_flag == True:\n return True\n\n if k > 80:\n for i in range(0,9):\n for j in range(0,9):\n board[i][j] = origin_board[i][j]\n\n terminate_flag = True\n return True\n\n i, j = k//9, k%9\n start_num = random.randint(1,9)\n\n if origin_board[i][j] != 0:\n make_sudoku(k+1)\n\n for m in range(1,10):\n #m = 1 + (m + start_num)%9\n d = (i//3)*3 + (j//3)\n \n if row[i][m] == 0 and col[j][m] == 0 and diag[d][m] == 0:\n row[i][m], col[j][m], diag[d][m] = 1, 1, 1\n origin_board[i][j] = m\n make_sudoku(k+1)\n row[i][m], col[j][m], diag[d][m] = 0, 0, 0\n origin_board[i][j] = 0\n\n\nboard_init()\nmake_sudoku(0)\nready_board = [board[i] for i in range(0,9)]\n\nprint(ready_board)\n\n'''\ndef dfs(depth):\n if depth == blank_num:\n for v in board:\n print(' '.join(map(str, v)))\n exit(0)\n\n y, x = pos[depth]\n for n in range(1, 10):\n if not row_arr[y][n] and not col_arr[x][n] and not box_arr[y//3*3+x//3][n]:\n row_arr[y][n] = col_arr[x][n] = box_arr[y//3*3+x//3][n] = True\n board[y][x] = n\n dfs(depth+1)\n row_arr[y][n] = col_arr[x][n] = box_arr[y//3*3+x//3][n] = False\n board[y][x] = 0\n\nboard = [list(map(int, input().split())) for _ in range(9)] \nrow_arr = [[False]*10 for _ in range(10)]\ncol_arr = [[False]*10 for _ in range(10)]\nbox_arr = [[False]*10 for _ in range(10)]\n\npos = []\nfor r in range(9):\n for c in range(9):\n if not board[r][c]:\n pos.append([r, c])\n else:\n row_arr[r][board[r][c]] = True\n col_arr[c][board[r][c]] = True\n box_arr[r//3*3+c//3][board[r][c]] = True\n\nblank_num = len(pos)\ndfs(0) \n'''","repo_name":"insertbaek/dev02","sub_path":"app/python/sudoku/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"35895460775","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.animation as animation\nimport keyboard\n\np = 8\ni = 2\nn_arrows = 33\ndistance = 3\nring_distance = 2.8\n\n\np *= 2 if p%2 == 0 else 1\ni /= p*2\nx,y = np.meshgrid(np.linspace(-5,5,n_arrows),np.linspace(-5,5,n_arrows))\nangle, m = np.meshgrid(np.linspace(0,2*np.pi,3*n_arrows),np.linspace(0,0,1))\n\n\ndef magnetic_field(cx, cy, i):\n Bx = -i*(y-cy)/((x-cx)**2 + (y-cy)**2)\n By = i*(x-cx)/((x-cx)**2 + (y-cy)**2)\n M = np.sqrt(Bx**2 + By**2)\n return Bx, By, M\n\n\ndef magnetic_field_modules_at_distance(cx, cy, i, distance):\n x_coord = np.cos(angle)*distance\n y_coord = np.sin(angle)*distance\n BxD = -i*(y_coord-cy)/((x_coord-cx)**2 + (y_coord-cy)**2)\n ByD = i*(x_coord-cx)/((x_coord-cx)**2 + (y_coord-cy)**2)\n MD = np.sqrt(BxD**2 + ByD**2)\n return BxD, ByD, MD\n\n\ndef rotate_vector(x, y, theta):\n x_rot = x*np.cos(theta) - y*np.sin(theta)\n y_rot = x*np.sin(theta) + y*np.cos(theta)\n return x_rot, y_rot\n\n\ndef magnetic_field_on_couples(p, i, distance, time):\n startx, starty = 0, distance\n sum_x, sum_y = 0, 0\n for k in range(p):\n vec_x, vec_y, M = magnetic_field(startx,starty,i*np.sin(2*np.pi/p*k+time))\n sum_x += vec_x\n sum_y += vec_y\n vec_x, vec_y, M = magnetic_field(-startx,-starty,-i*np.sin(2*np.pi/p*k+time))\n sum_x += vec_x\n sum_y += vec_y\n startx, starty = rotate_vector(startx, starty, 2*np.pi/p)\n tot_M = np.sqrt(sum_x**2 + sum_y**2)\n return sum_x, sum_y, tot_M\n\n\ndef magnetic_field_modules_on_couples(p, i, distance, ring_distance, time):\n startx, starty = 0, distance\n ang_x, ang_y = 0, 0\n for k in range(p):\n vecd_x, vecd_y, dM = magnetic_field_modules_at_distance(startx,starty,i*np.sin(2*np.pi/p*k+time), ring_distance)\n ang_x += vecd_x\n ang_y += vecd_y\n vecd_x, vecd_y, dM = magnetic_field_modules_at_distance(-startx,-starty,-i*np.sin(2*np.pi/p*k+time), ring_distance)\n ang_x += vecd_x\n ang_y += vecd_y\n startx, starty = rotate_vector(startx, starty, 2*np.pi/p)\n tot_MD = np.sqrt(ang_x**2 + ang_y**2)\n return ang_x, ang_y, tot_MD\n\n\nfig, (ax, ax2) = plt.subplots(1, 2)\ndivnorm = colors.TwoSlopeNorm(vmin=-1, vcenter=5, vmax=40)\ncolor_map_name = 'plasma'\n\n\nqr = ax.quiver(x,y, 0, 0, 0, cmap=color_map_name, norm=divnorm)\nqr2 = ax2.quiver(angle,m,0,0, cmap=color_map_name, norm=divnorm)\nqr2circle = ax2.quiver(np.cos(angle),np.sin(angle),0,0, cmap=color_map_name, norm=divnorm)\n\n\ntime = 1.01\ndelta_time = 0.1\nshow_circle = False\nnormalized = False\n\n\ndef iterate(first=False):\n global time\n global delta_time\n global ring_distance\n global show_circle\n global normalized\n if keyboard.is_pressed('d'):\n delta_time = 0.1\n elif keyboard.is_pressed('a'):\n delta_time = -0.1\n elif keyboard.is_pressed('space'):\n if delta_time == 0:\n delta_time = 0.1\n else:\n delta_time = 0\n elif keyboard.is_pressed('up'):\n ring_distance += 0.1\n elif keyboard.is_pressed('down'):\n ring_distance -= 0.1\n elif keyboard.is_pressed('c'):\n show_circle = not show_circle\n elif keyboard.is_pressed('n'):\n normalized = not normalized\n \n sum_x, sum_y, tot_M = magnetic_field_on_couples(p, i, distance, time)\n ang_x, ang_y, MD = magnetic_field_modules_on_couples(p, i, distance, ring_distance, time)\n mod = ang_x*np.cos(angle) + ang_y*np.sin(angle)\n \n if normalized:\n sum_x, sum_y = sum_x/tot_M, sum_y/tot_M\n ang_x, ang_y = ang_x/MD, ang_y/MD\n mod = mod/MD\n\n C1, C2, C3 = 1/tot_M, 1/MD, 1/MD\n if first:\n C1, C2, C3 = divnorm.vmax, divnorm.vmax, divnorm.vmax\n \n qr.set_UVC(sum_x, sum_y, C1)\n qr2circle.set_UVC(ang_x, ang_y, C2)\n qr2.set_UVC(0, mod, C3)\n\n if not first:\n if show_circle:\n ax2.set_xlim(-1.2, 1.2)\n qr2.set_UVC(0, 0, divnorm.vmax)\n else:\n ax2.set_xlim(0, 2*np.pi)\n qr2circle.set_UVC(0, 0, divnorm.vmax)\n time += delta_time\n\n\niterate(first=True)\n\n\ndef animate(num):\n iterate()\n\n\nani = animation.FuncAnimation(fig, animate, interval=100, blit=False)\nplt.show()","repo_name":"Asventalis/Magnetic-Field","sub_path":"vectors copy.py","file_name":"vectors copy.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72337285905","text":"from Face_Recog.divide_image import FeatureExtraction\nimport glob\nimport cv2\nimport numpy as np\nfrom Face_Recog.ultils import tranpose,covariance\n\ndata_path = \"C:/Users/maiho/PycharmProjects/DPT/database/Face_Detected\"\nfeatures_path = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recog/features_color_histogram.csv\"\n# initialize the color descriptor\ncd = FeatureExtraction((16, 24, 8))\n# open the output index file for writing\noutput = open(features_path, \"w\")\n# use glob to grab the image paths and loop over them\nface_vector = []\nfor imagePath in glob.glob(data_path + \"/*.jpg\"):\n\n\timageID = imagePath[imagePath.rfind(\"/\") + 1:]\n\timage = cv2.imread(imagePath)\n\t# print(image.shape)\n\t# describe the image\n\tfeatures = cd.extract(image)\n\tface_vector.append(features)\n\tfeatures = [str(f) for f in features]\n\n\toutput.write(\"%s,%s\\n\" % (imageID, \",\".join(features)))\n# close the index file\noutput.close()\n\nface_vector = np.asarray(face_vector)\nface_vector = face_vector.transpose()\n\n#STEP2: Normalize the face vectors by calculating the average face vector and subtracting it from each vector\navg_face_vector = face_vector.mean(axis=1)\navg_face_vector = avg_face_vector.reshape(face_vector.shape[0], 1)\nnormalized_face_vector = face_vector - avg_face_vector\n\n#STEP3: Calculate the Covariance Matrix or the Sigma\ncovariance_matrix = np.cov(np.transpose(normalized_face_vector))\neigen_values, eigen_vectors = np.linalg.eig(covariance_matrix)\n\n# chuyen vi ma tran\n\n# tranpose_matrix = tranpose(normalized_face_vector)\n# covariance_matrix = covariance(tranpose_matrix)\n# covariance_matrix = np.transpose(normalized_face_vector).dot(normalized_face_vector)\n# print(covariance_matrix)\n\n# STEP4: Calculate Eigen Vectors\n\n\n# STEP5: Select the K best Eigen Faces, K < M\n\nk = 30\nk_eigen_vectors = eigen_vectors[0:k, :]\neigen_faces = k_eigen_vectors.dot(np.transpose(normalized_face_vector))\n\n\n# STEP7: Represent Each eigen face as combination of the K Eigen Vectors\nweights = np.transpose(normalized_face_vector).dot(np.transpose(eigen_faces))\npca_features_path = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recog/features_pca.csv\"\noutput_1 = open(pca_features_path, \"w\")\n\n# use glob to grab the image paths and loop over them\nfor i,imagePath in enumerate(glob.glob(data_path + \"/*.jpg\")):\n\t# extract the image ID (i.e. the unique filename) from the image\n\t# path and load the image itself\n\timageID = imagePath[imagePath.rfind(\"/\") + 1:]\n\timage = cv2.imread(imagePath)\n\n\tfeatures = weights[i]\n\t# write the features to file\n\tfeatures = [str(f) for f in features]\n\n\toutput_1.write(\"%s,%s\\n\" % (imageID, \",\".join(features)))\noutput_1.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# # STEP8: Testing Phase\n# test_add = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recog/s50_15.jpg\"\n#\n# cd = ColorDescriptor((8, 12, 3))\n# # load the query image and describe it\n# query = cv2.imread(test_add)\n# query = cv2.resize(query,(240,360))\n# print(query.shape)\n# features_r = cd.describe(query)\n#\n# features_r = np.asarray(features_r)\n# print(features_r.shape)\n# features_r = features_r.reshape(1440,1)\n# test_normalized_face_vector = features_r - avg_face_vector\n# test_weight = np.transpose(test_normalized_face_vector).dot(np.transpose(eigen_faces))\n# index = np.argmin(np.linalg.norm(test_weight - weights, axis=1))\n# # print(\"------------------\")\n# # print(weights[345])\n# print(index)\n# for i,imagePath in enumerate(glob.glob(data_path + \"/*.jpg\")):\n# if(i==index):\n# result = cv2.imread(imagePath)\n# result = cv2.resize(result, (256, 256))\n# cv2.imshow(\"Result\", result)\n# cv2.waitKey(0)\n\n","repo_name":"thangnvkcn/MultimediaDatabaseAssignment","sub_path":"Face_Recog/features_extraction.py","file_name":"features_extraction.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"9711073406","text":"# By submitting this assignment, I agree to the following:\r\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do.\"\r\n# \"I have not given or received any unauthorized aid on this assignment.\"\r\n#\r\n# Name: Daniel Mireles\r\n# Section: 102-540\r\n# Assignment: Lab3b_Act4_Prog1d\r\n# Date: 09/12/2019\r\n#\r\n\r\n#Calculate the production of a well (barrels/day) after a given number of days, for a given initial\r\n#production rate (barrels/day), initial decline rate (barrels/day), and hyperbolic constant (no\r\n#dimensions).\r\nprint(\"This program calculates the production of a well (barrels/day) after a given number of days for a given initial production rate (barrels/day), initial decline rate (barrels/day), and hyperbolic constant.\")\r\nInitial_Production_Rate = float(input(\"Please input the initial production rate in barrels/day: \")) #barrels/day\r\nInitial_Decline_Rate = float(input(\"Please inpute the initial decline rate in barrels/day: \")) #barrels/day\r\nHyperbolic_Constant = float(input(\"Please insert the hyperbolic constant: \")) #NoConstants\r\nArps_Equation = Initial_Production_Rate/((1+(Hyperbolic_Constant*Initial_Decline_Rate*Time))**(1/Hyperbolic_Constant))\r\nDecimal = \"%6.4f\" % Arps_Equation\r\nprint (\"The prodcution of the well is\", Decimal, \"barrels/day.\")\r\n","repo_name":"DannyMireles/Python","sub_path":"Lab3b_Act4_Prog1d.py","file_name":"Lab3b_Act4_Prog1d.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"6613761673","text":"\"\"\"\n Use CNN for instances encoder\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\n\nclass CnnEncoder(nn.Module):\n def __init__(self, opt):\n super(CnnEncoder, self).__init__()\n\n self.opt = opt\n\n self.cnn = nn.Conv2d(\n in_channels=1,\n out_channels=opt.hidden_size,\n kernel_size=(opt.cnn_window_size, opt.word_vec_size + 2*opt.position_size),\n stride=(1, 1),\n padding=(1, 0)\n )\n\n self.activation = nn.ReLU()\n\n def forward(self, embeddings):\n \"\"\"\n Encode embeddings, including convolution and max-pooling operations\n\n Args:\n embeddings: [batch_size, num_step, embedding_size]\n Return:\n hidden state of each sentence: [batch_size, hidden_size]\n \"\"\"\n\n embeddings = torch.unsqueeze(embeddings, dim=1)\n embeddings = self.cnn(embeddings)\n\n # Max-pooling\n x, _ = torch.max(embeddings, dim=2)\n x = x.view(-1, self.opt.hidden_size)\n\n return self.activation(x) \n","repo_name":"WHUNLPLab/FAST-NER","sub_path":"nre/layers/encoder/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"}
+{"seq_id":"72732788626","text":"from random import random\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nclass data:\n def __init__(self, application, credit):\n self.df = application\n self.df2 = credit\n\n def caseOne(self):\n df_pivot = pd.pivot(\n self.df2,\n index='ID',\n values='STATUS',\n columns='MONTHS_BALANCE'\n )\n df_pivot.reset_index(inplace=True)\n df_pivot['window'] = df_pivot.isna().sum(axis=1)\n df_pivot = df_pivot[df_pivot.window > 20]\n df_pivot['due_count'] = np.where((df_pivot.iloc[:, 1:-1]=='0') | (df_pivot.iloc[:, 1:-1]=='1') | (df_pivot.iloc[:, 1:-1]=='2') | (df_pivot.iloc[:, 1:-1]=='3') | (df_pivot.iloc[:, 1:-1]=='4') | (df_pivot.iloc[:, 1:-1]=='5'), 1, 0).sum(axis=1)\n df_clean = pd.merge(self.df, df_pivot[['ID', 'window', 'due_count']], on='ID', how='inner')\n df_clean['isBadCustomer'] = np.where(df_clean.due_count > df_clean.due_count.median(), 1, 0)\n df_validation, df_train = train_test_split(df_clean, test_size=0.3, random_state=0)\n df_train, df_test = train_test_split(df_train, test_size=0.3, random_state=0)\n\n return [df_validation, df_test, df_train]","repo_name":"isaacebi/Data-Science-Project","sub_path":"1. Credit Card Approval/caseData.py","file_name":"caseData.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1175130434","text":"from selenium.common.exceptions import NoAlertPresentException\nfrom .base_page import BasePage\nfrom selenium.webdriver.common.by import By\nfrom .locators import ProductPageLocators\nimport math \n\nclass ProductPage(BasePage, ProductPageLocators):\n def click_add_to_basket_button(self):\n \"\"\"\n Метод для добавления товара в корзину\n \"\"\"\n basket_button = self.browser.find_element(*ProductPageLocators.ADD_TO_BASKET_BTN)\n basket_button.click()\n \n def solve_quiz_and_get_code(self):\n \"\"\"\n Метод для решения задачи в алерте со страницы с промо акцией\n \"\"\"\n alert = self.browser.switch_to.alert\n x = alert.text.split(\" \")[2]\n answer = str(math.log(abs((12 * math.sin(float(x))))))\n alert.send_keys(answer)\n alert.accept()\n try:\n alert = self.browser.switch_to.alert\n alert_text = alert.text\n print(f\"Your code: {alert_text}\")\n alert.accept()\n except NoAlertPresentException:\n print(\"No second alert presented\")\n \n def should_be_product_in_basket(self):\n \"\"\"\n Метод для проверки на соответствие названия товара на странице названию только что добавленного\n \"\"\"\n name = self.browser.find_element(*ProductPageLocators.NAME_OF_PRODUCT)\n second_name = self.browser.find_element(*ProductPageLocators.SECOND_NAME_OF_PRODUCT)\n assert name.text == second_name.text, 'Naming is the same'\n \n def price_in_basket_should_be_match(self):\n \"\"\"\n Метод для проверки на соответствие цены товара на странице цене только что добавленного\n \"\"\"\n price = self.browser.find_element(*ProductPageLocators.PRICE_OF_PRODUCT)\n second_price = self.browser.find_element(*ProductPageLocators.TOTAL_PRICE)\n assert price.text == second_price.text, 'Price is the same'\n \n def should_not_be_success_message(self):\n \"\"\"\n Метод для проверки наличия/отсутствия сообщения об успешном добавлении товара в корзину\n \"\"\"\n assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \\\n \"Success message is presented, but should not be\"\n \n def should_dissapear_of_success_message(self):\n \"\"\"\n Метод для проверки наличия/отсутствия сообщения об успешном добавлении товара в корзину\n \"\"\"\n assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \\\n \"Success message is not disappeared\"","repo_name":"aidarette/Selenium-Python","sub_path":"pages/product_page.py","file_name":"product_page.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1730359734","text":"import numpy as np\r\nimport cv2\r\n\r\ndef getImage(path):\r\n return cv2.imread(path)\r\n\r\nx = np.array([])\r\n\r\n#Getting all images then reshaping them to 128x128x3 (3 means RGB)\r\nfor i in range(60):\r\n path = 'datas/{}.jpg'.format(i+1)\r\n \r\n image = getImage(path)\r\n \r\n image = cv2.resize(image, (128, 128))\r\n \r\n x = np.append(x, image)\r\n \r\nx = x.reshape((-1, 128, 128, 3))\r\n\r\n#Seperating datas to train and test datas\r\nx_train = np.append(x[:20,...], x[30:50,...]).reshape((-1,128,128,3))\r\nx_test = np.append(x[20:30,...], x[50:,...]).reshape((-1,128,128,3))\r\n\r\ny_train = np.array([])\r\ny_test = np.array([])\r\n\r\n#Labelling datas \r\nfor i in range(x_train.shape[0]):\r\n if i < 20:\r\n y_train = np.append(y_train, 0)\r\n else:\r\n y_train = np.append(y_train, 1)\r\n \r\nfor i in range(x_test.shape[0]):\r\n if i < 10:\r\n y_test = np.append(y_test, 0)\r\n else:\r\n y_test = np.append(y_test, 1)\r\n \r\n \r\ny_train = y_train.reshape((y_train.shape[0],1))\r\ny_test = y_test.reshape((y_test.shape[0],1))\r\n \r\ndata = (x_train, y_train), (x_test, y_test)\r\n\r\n#Save the labeled datas as npy file\r\nnp.save('cat_and_dog', data)\r\n","repo_name":"GHasanKaraman/Cat_Dog_Classifier_CNN","sub_path":"preparingImages.py","file_name":"preparingImages.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"7310815995","text":"from turtle import title\n\nimport requests\nfrom html2text import HTML2Text\nfrom lxml import etree\nfrom html import unescape\nimport os\n\n\"\"\"\nrequirements\n打了箭头的才需要手动安装,其余是自动安装的依赖库\ncertifi==2021.10.8\ncharset-normalizer==2.0.7\ncssselect==1.1.0\nhtml2text==2020.1.16 -- <--\nidna==3.3\nlxml==4.6.3 ----------- <--\nrequests==2.26.0 ------- <--\nurllib3==1.26.7\n\"\"\"\n\n\ndef crawl(url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/95.0.4638.54 Safari/537.36\",\n }\n print(\"在爬了...\")\n # 配置header破反爬\n response = requests.get(url, headers=headers)\n # 200就继续\n if response.status_code == 200:\n html = response.content.decode(\"utf8\")\n # print(html)\n tree = etree.HTML(html)\n # 找到需要的html块\n title = tree.xpath('//*[@id=\"articleContentId\"]/text()')[0]\n block = tree.xpath('//*[@id=\"content_views\"]')\n # html\n ohtml = unescape(etree.tostring(block[0]).decode(\"utf8\"))\n\n print(\"title:\", title)\n save(ohtml, title)\n # 完成!\n print(\"爬完噜!\")\n else:\n print(\"错了错了!\")\n\n\ndef save(html, title):\n if \"output\" not in os.listdir():\n # 不存在输出文件夹就创建\n os.mkdir(\"output\")\n with open(f\"output/{title}.html\", 'w', encoding='utf8') as html_file:\n # 保存html\n html_file.write(html)\n\n with open(f\"output/{title}.md\", 'w', encoding='utf8') as md_file:\n # 保存markdown\n text_maker = HTML2Text()\n # md转换\n md_text = text_maker.handle(html)\n md_file.write(md_text)\n\n\nif __name__ == '__main__':\n # 你想要爬取的文章url\n url = \"https://blog.csdn.net/LW_20180806/article/details/123718853?spm=1001.2014.3001.5502\"\n crawl(url)\n","repo_name":"kixuan/Crawling-around","sub_path":"CSDN文章转md/CSDN笔记.py","file_name":"CSDN笔记.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71833850065","text":"#\n# @lc app=leetcode.cn id=338 lang=python3\n#\n# [338] 比特位计数\n#\n\n# @lc code=start\nclass Solution:\n def countBits(self, n: int) -> List[int]:\n output = []\n for i in range(n+1):\n output.append(bin(i).count(\"1\"))\n return output\n# @lc code=end\n\n","repo_name":"zch0423/leetcode","sub_path":"338.比特位计数.py","file_name":"338.比特位计数.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41168430970","text":"from django.utils import timezone\nfrom .models import Offre\nfrom background_task import background\n\ndef update_expired_offers():\n now = timezone.now()\n expired_offers = Offre.objects.filter(date_of_expiry__lt=now, valable=1)\n\n for offer in expired_offers:\n offer.valable = 0\n offer.stagiaire_set.filter(status__in=[1, 2]).update(status=0)\n offer.save()\n\n@background(schedule=60) # Schedule to run every 60 seconds (adjust as needed)\ndef schedule_update_expired_offers():\n update_expired_offers()","repo_name":"ElbachaliMouad/internshipintel","sub_path":"intellcapstg/stagiaire/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41225655279","text":"\r\ndef Soluiton():\r\n N=int(input())\r\n ans=0\r\n for pw in range(1,N+1):\r\n for d in range(1,10):\r\n if(pw*d <=N):\r\n ans+=1\r\n pw= pw*10\r\n return ans\r\n\r\n\r\n\r\ndef main():\r\n \r\n test=int(input())\r\n for _ in range(test):\r\n print(Soluiton())\r\n \r\n \r\n \r\n \r\n \r\nif __name__==\"__main__\":\r\n main()","repo_name":"TheReinforce43/Competitive-Progamming","sub_path":"B. Ordinary Numbers.py","file_name":"B. Ordinary Numbers.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"11226923375","text":"# tic tac toe by FE\r\n\r\n# playing board\r\ndef board():\r\n print (\"\\n\")\r\n print (\"\\033[47m\"+ ttt1[0] +\"|\"+ ttt1[1] +\"|\"+ ttt1[2] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt[0] +\"|\"+ ttt[1] +\"|\"+ ttt[2] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt2[0] +\"|\"+ ttt2[1] +\"|\"+ ttt2[2] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt1[3] +\"|\"+ ttt1[4] +\"|\"+ ttt1[5] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt[3] +\"|\"+ ttt[4] +\"|\"+ ttt[5] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt2[3] +\"|\"+ ttt2[4] +\"|\"+ ttt2[5] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt1[6] +\"|\"+ ttt1[7] +\"|\"+ ttt1[8] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt[6] +\"|\"+ ttt[7] +\"|\"+ ttt[8] +\"\\033[0m\")\r\n print (\"\\033[47m\"+ ttt1[6] +\"|\"+ ttt1[7] +\"|\"+ ttt1[8] +\"\\033[0m\")\r\n print (\"\\n\")\r\n return\r\n\r\n# player 1 and payer 2 plays\r\ndef play1():\r\n try:\r\n var = int(input (\"Your turn \\033[44m\"+ p1 +\"\\033[0m!\\n\"))\r\n var = var - 1\r\n if (var in t): # used to check if play is valid and check box on board\r\n ttt[var] = (\"\\033[44m \\033[47m\")\r\n ttt1[var] = (\"\\033[44m \\033[47m\")\r\n ttt2[var] = (\"\\033[44m_______\\033[47m\")\r\n t[var] = (\"null\") # removes position from play\r\n xxx.append (var) # adds position to X(blue) played\r\n else: # screams invalid position of play\r\n board()\r\n print (\"\\n\\n\\nChoose a valid position \\033[34m\"+ p1 +\"\\033[0m!\")\r\n play1()\r\n return\r\n except ValueError: # if not int input, ask for int input\r\n board()\r\n print(\"Incorrect input, Try again.\")\r\n print(\"Choose an available position on the board.\\n\")\r\n play1()\r\n\r\ndef play2():\r\n try:\r\n var = int(input (\"Your time to play \\033[33m\"+ p2 +\"\\033[0m!\\n\"))\r\n var = var - 1\r\n if (var in t):\r\n ttt[var] = (\"\\033[43m \\033[47m\")\r\n ttt1[var] = (\"\\033[43m \\033[47m\")\r\n ttt2[var] = (\"\\033[43m_______\\033[47m\")\r\n t[var] = (\"null\")\r\n ooo.append (var)\r\n else:\r\n board()\r\n print (\"Choose a valid position \\033[33m\"+ p2 +\"\\033[0m!\")\r\n play2()\r\n except ValueError:\r\n board()\r\n print(\"Incorrect input, Try again\\n\")\r\n play2()\r\n\r\n# check win \r\ndef win(xoro):\r\n array2 = [[0, 1, 2], [0, 4, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [3, 4, 5], [6, 7, 8], [2, 4, 6]] # winning plays\r\n x = y = qwerty = 0\r\n for x in range (8):\r\n count = 0\r\n for y in range (3):\r\n for qwerty in range (len(xoro)):\r\n if xoro[qwerty] == array2[x][y] :\r\n count = count + 1\r\n if count == 3: # a winning combination was inserted by one player\r\n if xoro == xxx: # player 1 wins \r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[44mCongrats \" + p1 + \"!! You've won the game.\\033[0m\\n\")\r\n input(\"\\033[44mPress any key to exit\\033[0m\")\r\n print(\"\\n\\n\\n\\n\\n\\033[44mBye!\\033[0m\\n\\n\\n\\n\\n\\n\")\r\n exit()\r\n else: # player 2 wins\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n print (\"\\n\\033[43mCongrats \" + p2 + \"!! You've won the game.\\033[0m\\n\")\r\n input(\"\\033[43mPress any key to exit\\033[0m\")\r\n print(\"\\n\\n\\n\\n\\n\\033[43mBye!\\033[0m\\n\\n\\n\\n\\n\\n\")\r\n exit()\r\n qwerty = qwerty + 1\r\n y = y + 1 \r\n x = x + 1\r\n\r\n# game\r\ndef game():\r\n board()\r\n play1()\r\n board()\r\n play2()\r\n board()\r\n play1()\r\n board()\r\n play2()\r\n board()\r\n play1()\r\n board()\r\n win(xxx)\r\n play2()\r\n board()\r\n win(ooo)\r\n play1()\r\n board()\r\n win(xxx)\r\n play2()\r\n board()\r\n win(ooo)\r\n play1()\r\n board()\r\n win(xxx)\r\n\r\n# reset and start again\r\ndef reset():\r\n ttt = [' 1 ', ' 2 ', ' 3 ', ' 4 ', ' 5 ', ' 6 ', ' 7 ', ' 8 ', ' 9 ']\r\n ttt1 = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\n ttt2 = ['_______', '_______', '_______', '_______', '_______', '_______', '_______', '_______', '_______']\r\n t = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n xxx = []\r\n ooo = []\r\n \r\n\r\n\r\n# defining tables\r\nttt = [' 1 ', ' 2 ', ' 3 ', ' 4 ', ' 5 ', ' 6 ', ' 7 ', ' 8 ', ' 9 ']\r\nttt1 = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\nttt2 = ['_______', '_______', '_______', '_______', '_______', '_______', '_______', '_______', '_______']\r\nt = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nxxx = []\r\nooo = []\r\n\r\n# defining players name, colours and welcome screen\r\np1 = input (\"\\n\\n\\n\\033[34mPlayer One, Insert your Name: \\033[0m\\n \") \r\np2 = input (\"\\n\\n\\n\\033[33mPlayer Two, Insert your Name: \\033[0m\\n \") \r\nprint (\"\\n\\n\\nWelcome \\033[34m\"+ p1 +\"\\033[0m!\\n You'll play with the \\033[34mBlue\\033[0m.\\n\") \r\nprint (\"\\nHi there, \\033[33m\"+ p2 + \"\\033[0m.\\n You'll play with the \\033[33mYellow\\033[0m.\\n\\n\")\r\n\r\n# printing game rules\r\ninput(\"Press ENTER to see Game Rules\\n\")\r\nboard()\r\nprint (\"\\033[45mIt's a simple game. You have to place tic tac and toe in a line.\\033[0m\")\r\nprint (\"\\n\\033[45mChoose the number of the square you want to play.\\033[0m\")\r\n\r\n# continue and confirming fisrt play\r\nprint(\"\\n\\033[34m\"+ p1 +\"\\033[0m will play first.\\n\")\r\nprint(\"\\033[33m\"+ p2 +\"\\033[0m will play last.\\n\\n\")\r\ninput(\"\\n\\nPress ENTER to continue \\n\\n\")\r\n\r\n# start game\r\ngame() ","repo_name":"Jammer23rd/TTT","sub_path":"ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"70892896465","text":"#!/usr/bin/env python3\n\n'''\n Operating on artifacts in Byte-alignment\n ----------------------------------------\n'''\n\nimport html\n\nfrom itertools import zip_longest\n\nfrom ..base import artifact\nfrom ..base import tree\n\nOV_LIMIT = 1<<20\n\nclass Octets(tree.TreeLeaf):\n ''' ... '''\n\n def __init__(self, up, lo, width=None, hi=None, name=None):\n if hi is None:\n assert width is not None\n hi = lo + width\n assert hi > lo\n if hi - lo > OV_LIMIT:\n print(\n up.this,\n \"Big ov::Octets 0x%x\" % (hi - lo),\n hex(lo),\n hex(hi),\n )\n assert False\n self.up = up\n self.this = up.this\n if name is None:\n name = self.__class__.__name__\n self.ov_name = name\n super().__init__(lo, hi)\n\n def __len__(self):\n return self.hi - self.lo\n\n def __getitem__(self, idx):\n return self.this[self.lo + idx]\n\n def __iter__(self):\n yield from self.this[self.lo:self.hi]\n\n def __str__(self):\n try:\n return \" \".join(self.render())\n except:\n return str(super())\n\n def octets(self):\n return self.this[self.lo:self.hi]\n\n def iter_bytes(self):\n if self.this.byte_order is None:\n yield from self.octets()\n return\n\n def group(data, chunk):\n i = [iter(data)] * chunk\n return zip_longest(*i, fillvalue=0)\n\n for i in group(self.octets(), len(self.this.byte_order)):\n for j in self.this.byte_order:\n yield i[j]\n\n def insert(self):\n self.up.insert(self)\n return self\n\n def render(self):\n octets = self.this[self.lo:self.hi]\n fmt = \"%02x\"\n tcase = self.this.type_case.decode(octets)\n yield \" \".join(fmt % x for x in octets) + \" |\" + tcase + \"|\"\n\nclass This(Octets):\n ''' A new artifact '''\n\n def __init__(self, up, *args, **kwargs):\n super().__init__(up, *args, **kwargs)\n self.this = up.this.create(start=self.lo, stop=self.hi)\n\n def render(self):\n yield self.this\n\nclass Opaque(Octets):\n\n def __init__(self, *args, rendered=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.rendered = rendered\n self.that = None\n\n def artifact(self):\n self.that = self.up.this.create(start=self.lo, stop=self.hi)\n return self.that\n\n def render(self):\n if self.that:\n yield self.that\n elif self.rendered is None:\n yield \"Opaque[0x%x]\" % (self.hi - self.lo)\n else:\n yield self.rendered\n\ndef Text(width):\n class Text_Class(Octets):\n ''' Text String '''\n WIDTH = width\n def __init__(self, *args, **kwargs):\n kwargs[\"width\"] = self.WIDTH\n super().__init__(*args, **kwargs)\n self.txt = self.this.type_case.decode(self.this[self.lo:self.hi])\n self.txt = self.this.type_case.decode(self.iter_bytes())\n\n def render(self):\n yield \"»\" + self.txt + \"«\"\n\n return Text_Class\n\nclass HexOctets(Octets):\n ''' Octets rendered without text column '''\n\n def render(self):\n yield \"\".join(\"%02x\" % i for i in self)\n\nclass Octet(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=1, **kwargs)\n self.val = self.this[lo]\n\n def render(self):\n yield \"0x%02x\" % self.val\n\nclass Le16(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=2, **kwargs)\n self.val = self.this[lo + 1] << 8\n self.val |= self.this[lo]\n\n def render(self):\n yield \"0x%04x\" % self.val\n\nclass Le32(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=4, **kwargs)\n self.val = self.this[lo + 3] << 24\n self.val |= self.this[lo + 2] << 16\n self.val |= self.this[lo + 1] << 8\n self.val |= self.this[lo]\n\n def render(self):\n yield \"0x%08x\" % self.val\n\nclass Be16(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=2, **kwargs)\n self.val = self.this[lo] << 8\n self.val |= self.this[lo + 1]\n\n def render(self):\n yield \"0x%04x\" % self.val\n\nclass Be32(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=4, **kwargs)\n self.val = self.this[lo + 0] << 24\n self.val |= self.this[lo + 1] << 16\n self.val |= self.this[lo + 2] << 8\n self.val |= self.this[lo + 3]\n\n def render(self):\n yield \"0x%08x\" % self.val\n\nclass Re32(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=4, **kwargs)\n self.val = self.this[lo + 2] << 24\n self.val |= self.this[lo + 3] << 16\n self.val |= self.this[lo + 0] << 8\n self.val |= self.this[lo + 1]\n\n def render(self):\n yield \"0x%08x\" % self.val\n\nclass Me32(Octets):\n def __init__(self, up, lo, **kwargs):\n super().__init__(up, lo, width=4, **kwargs)\n self.val = self.this[lo + 1] << 24\n self.val |= self.this[lo + 0] << 16\n self.val |= self.this[lo + 3] << 8\n self.val |= self.this[lo + 2]\n\n def render(self):\n yield \"0x%08x\" % self.val\n\nclass Struct(Octets):\n ''' ... '''\n\n def __init__(self, up, lo, vertical=False, more=False, pad=0, **kwargs):\n self.fields = []\n self.vertical = vertical\n self.lo = lo\n self.hi = lo\n self.up = up\n self.args = {}\n for name, width in kwargs.items():\n if name[-1] == \"_\":\n self.addfield(name[:-1], width)\n else:\n self.args[name] = width\n if not more:\n self.done(pad=pad)\n\n def done(self, pad=0):\n if pad:\n self.hide_the_rest(pad)\n super().__init__(self.up, self.lo, hi = self.hi, **self.args)\n del self.args\n\n def addfield(self, name, what):\n ''' add a field to the structure '''\n assert hasattr(self, \"args\")\n if name is None:\n name = \"at%04x\" % (self.hi - self.lo)\n if isinstance(what, int):\n y = HexOctets(self.up, self.hi, width=what)\n z = y\n else:\n y = what(self.up, self.hi)\n z = y\n self.hi = y.hi\n setattr(self, name, z)\n self.fields.append((name, y))\n return y\n\n def hide_the_rest(self, size):\n ''' hide the rest of the space occupied by the structure '''\n assert hasattr(self, \"args\")\n assert self.lo + size >= self.hi\n if self.lo + size != self.hi:\n self.addfield(\"at%x_\" % self.hi, self.lo + size - self.hi)\n\n def suffix(self, adr):\n return \"\\t// @0x%x\" % (adr - self.lo)\n\n def render(self):\n assert not hasattr(self, \"args\")\n if not self.vertical:\n i = []\n for name, obj in self.fields:\n if name[-1] != \"_\":\n i.append(name + \"=\" + \"|\".join(obj.render()))\n yield self.ov_name + \" {\" + \", \".join(i) + \"}\"\n else:\n yield self.ov_name + \" {\"\n for name, obj in self.fields:\n if name[-1] != \"_\":\n j = list(obj.render())\n j[0] += self.suffix(obj.lo)\n yield \" \" + name + \" = \" + j[0]\n if len(j) > 1:\n for i in j[1:-1]:\n yield \" \" + i\n yield \" \" + j[-1]\n yield \"}\"\n\n\n\nclass OctetView(tree.Tree):\n ''' ... '''\n\n def __init__(self, this, max_render=None, default_width=16):\n self.this = this\n hi = len(this)\n super().__init__(\n lo = 0,\n hi = hi,\n limit = 1<<16,\n )\n if max_render is None:\n max_render = hi\n self.max_render = max_render\n self.default_width = default_width\n self.adrfmt = \"%%0%dx\" % len(\"%x\" % self.hi)\n\n def pad(self, lo, hi):\n assert hi > lo\n width = self.default_width\n if hi - lo <= width:\n yield Octets(self, lo, hi=hi)\n return\n i = lo % width\n if i:\n yield Octets(self, lo, width - i)\n lo += width - i\n while lo + width <= hi:\n yield Octets(self, lo, width)\n lo += width\n if lo < hi:\n yield Octets(self, lo, hi = hi)\n\n def prefix(self, lo, hi):\n return \"0x\" + self.adrfmt % lo + \"…\" + self.adrfmt % hi\n\n def pad_out(self):\n lo = 0\n prev = None\n for i in sorted(self):\n if i.lo < lo:\n print(\"Overlap\", self.this)\n print(\" this: \", hex(i.lo), hex(i.hi), i)\n for n, j in enumerate(i.render()):\n print(\"\\t\" + str(j))\n if n > 5:\n break\n if prev is None:\n print(\" prev: None\")\n else:\n print(\" prev: \", hex(prev.lo), hex(prev.hi), prev)\n for n, j in enumerate(prev.render()):\n print(\"\\t\" + str(j))\n if n > 5:\n break\n if i.lo > lo:\n yield from self.pad(lo, i.lo)\n yield i\n lo = i.hi\n prev = i\n if lo < self.hi:\n yield from self.pad(lo, self.hi)\n\n def render(self, title=\"OctetView\"):\n ''' Render via utf8 file '''\n # print(self.this, \"Rendering\", self.gauge, \"octetview-leaves\")\n tfn = self.this.add_html_interpretation(title)\n with open(tfn.filename, \"w\", encoding=\"utf-8\") as file:\n file.write(\"\\n\")\n last = None\n lasti = None\n trunc = \"\"\n rpt = 0\n for i in self.pad_out():\n if i.lo > self.max_render:\n trunc = \"[Truncated]\\n\"\n break\n for j in i.render():\n if isinstance(j, artifact.Artifact):\n j = j.summary(types=False)\n else:\n j = html.escape(j)\n if j == last:\n rpt += 1\n lasti = i\n continue\n if rpt == 1:\n file.write(self.prefix(lasti.lo, lasti.hi) + \" \" + last + \"\\n\")\n rpt = 0\n elif rpt:\n file.write(self.prefix(lasti.lo, lasti.hi) + \" …[0x%x]\\n\" % rpt)\n rpt = 0\n last = j\n file.write(self.prefix(i.lo, i.hi) + \" \" + j + \"\\n\")\n if rpt:\n file.write(self.prefix(lasti.lo, lasti.hi) + \" …[0x%x]\\n\" % rpt)\n file.write(trunc)\n file.write(\"
\\n\")\n","repo_name":"Datamuseum-DK/AutoArchaeologist","sub_path":"autoarchaeologist/generic/octetview.py","file_name":"octetview.py","file_ext":"py","file_size_in_byte":11020,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"}
+{"seq_id":"3092134645","text":"from numba import njit\n\n'''\n Apply social distancing in each region based on region's policy - `social_distancing`.\n Arguments:\n people:\n (x, y, alive, region_id)\n regions:\n (region_id, xmin, xmax, ymin, ymax, social_distancing_factor)\n Returns:\n (x, y) : new positions\n'''\n@njit\ndef social_distancing(people, regions):\n\n # get values from arguments\n x, y, alive, region_id = people[:, 0], people[:, 1], people[:, 2], people[:, 3]\n r_region_id, r_xmin, r_xmax, r_ymin, r_ymax, r_social_dist = regions[:, 0], regions[:, 1], regions[:, 2], regions[:, 3], regions[:, 4], regions[:, 5]\n\n # repel force weight\n K = 10.0\n\n # loop through all regions and apply social distancing\n for r in range(regions.shape[0]):\n\n n_iter = int(r_social_dist[r] * 100) # number of iterations of social distancing force\n xm, xM, ym, yM = r_xmin[r], r_xmax[r], r_ymin[r], r_ymax[r] # region's bounding box\n\n # keep looping multiple times based on social distancing factor to maximize distance between 2 people\n for _ in range(n_iter):\n\n # calculate the force exerted on person i by localites\n for i in range(people.shape[0]):\n \n # person should be alive and belong to the region `r` under consideration\n if alive[i] == 0.0 or region_id[i] != r_region_id[r]:\n continue\n \n # change in (x, y)\n delta_x, delta_y = 0.0, 0.0\n \n # loop through all other people in same region\n for j in range(people.shape[0]):\n\n # person `j` should be alive, in same region as that of person `i` and must not be equal to `i`\n if region_id[i] != region_id[j] or alive[j] == 0.0 or i == j:\n continue\n\n # calculate distance between i and j\n dist_ij = (x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2\n\n # calculate direction of force (for super position)\n delta_x = delta_x + ( K * ( x[i] - x[j] ) ) / ( dist_ij**2 + 0.0000001 )\n delta_y = delta_y + ( K * ( y[i] - y[j] ) ) / ( dist_ij**2 + 0.0000001 )\n \n # get new position of persion i\n x[i] = x[i] + delta_x\n y[i] = y[i] + delta_y\n\n # clip values within the region's bounding box\n x[i] = xm if x[i] < xm else (xM if x[i] > xM else x[i])\n y[i] = ym if y[i] < ym else (yM if y[i] > yM else y[i])\n \n # return new positions\n return x, y","repo_name":"ankit1997/CovidSim","sub_path":"core/simulation/operation/social_distancing.py","file_name":"social_distancing.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"37472914283","text":"#%%\n\nimport pandas as pd\n\n#%%\n\nprint('Reading NGSIM dataset...')\nngsim = pd.read_csv('/home/alan/work/data/NGSIM.csv')\nnames = ngsim.Location.dropna().unique()\nfor name in names:\n print(f'Saving {name} dataset...')\n data = ngsim[ngsim.Location == name]\n data.to_csv(f'/home/alan/work/data/{name}.csv', index=False)\nprint('Finish')\n\n#%% fail\n\n#location = pd.read_csv('~/data/NGSIM.csv', usecols=['Location'], squeeze=True)\n#names = location.drop_duplicates().dropna().tolist()\n#for name in names:\n# print('Seperating dataset %s...' % name)\n# skip = location.index[location != name]\n# data = pd.read_csv('~/data/NGSIM.csv', skiprows=skip)\n# print('Saving...')\n# data.to_csv('~/data/%s.csv' % name)\n# print('Finish')\n# print()\n\n#%%\n\n#count = 0\n#for name in names:\n# tmp = location == name\n# count += tmp.sum()\n#print(count)\n#print(len(location))\n\n#%% index of nan\n\n#for i in location.index:\n# if location[i] not in names:\n# print(i)","repo_name":"Chauency/Predict","sub_path":"data-process/split_ngsim.py","file_name":"split_ngsim.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"24147523301","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom myLib.ImageProcess import ImageProcess\nclass FriendInfoWidget(QWidget):\n\tdef __init__(self,parent=None,data=None):\n\t\tsuper().__init__()\n\t\tself.data=data\n\t\tself.parent=parent\n\t\tself.initUI()\n\t\tself.update()\n\tdef initUI(self):\n\t\t#创建控件\n\t\tself.label_photo=QLabel()\n\t\tself.label_name=QLabel()\n\t\tself.label_certicication=QLabel()#官方认证标识\n\t\t#创建布局\n\t\tlayout_main=QHBoxLayout()\n\t\t#为控件设置属性\n\t\t# self.label_photo.setStyleSheet('border:1px solid red')\n\t\tself.label_name.setStyleSheet('font-size:12px;')\n\t\t# self.label_certicication.setStyleSheet('border:1px solid red')\n\n\t\tself.label_photo.setScaledContents(True)\n\t\tself.label_certicication.setScaledContents(True)\n\t\tself.parent.layoutAddWidget(layout_main,self.label_photo,size=(40,40),alignment=Qt.AlignCenter- Qt.AlignVCenter|Qt.AlignHCenter)\n\t\tself.parent.layoutAddWidget(layout_main,self.label_name,size=(85,20),alignment=Qt.AlignHCenter)\n\t\tself.parent.layoutAddWidget(layout_main,self.label_certicication,size=(35,15),alignment=Qt.AlignHCenter)\n\t\tself.setLayout(layout_main)\n\tdef update(self):\n\t\tImage=ImageProcess()\n\t\tImage.readBytes(self.data['photo'])\n\t\tImage.toCicle()\n\t\tself.label_photo.setPixmap(QPixmap.fromImage(Image.toQImage()))\n\t\tself.label_name.setText(self.data['name'])\n\t\tif self.data['certification']==1:\n\t\t\tself.label_certicication.setText('已认证')\n\t\t\tself.label_certicication.setStyleSheet('color:green;font-size:10px;font-weight:800')\n\t\telse:\n\t\t\tself.label_certicication.setText('未认证')\n\t\t\tself.label_certicication.setStyleSheet('color:red;font-size:10px;font-weight:800')\n","repo_name":"Minicking/cloudbird","sub_path":"Client/QTWidget/FriendInfoWidget.py","file_name":"FriendInfoWidget.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"29207695605","text":"#Isaac Fernandez Hernandez\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# Load data time series\r\ndef load_tseries(n):\r\n dinp = []\r\n param = load_cnf_prep()\r\n print(\"Cargando la data de las clases y generacion de caracteristicas\")\r\n param[2] = 4\r\n for i in range(1,n+1):\r\n st = np.genfromtxt('Data/Clase%d.csv'%i,delimiter=',')\r\n dinp.append(get_features(st,param[0],np.shape(st)[0],param[1],param[2]))\r\n print(\"Creando labels binarias\")\r\n dinp, dout = binary_label(dinp)\r\n return dinp, dout\r\n\r\n# Create Features\r\n# data: Series de tiempo de la clase c\r\n# ns: Numero de series a utilizar por clase/data\r\n\r\ndef get_features(data,lss,ns,fs,l):\r\n m = ns if ns < len(data) else len(data)\r\n for i in range(0,m):\r\n features = hankel_features(data[i,:],lss,fs,l) if i == 0 else np.vstack((features,hankel_features(data[i,:],lss,fs,l)))\r\n return features\r\n\r\n# Hankel's features\r\ndef hankel_features(serie,lss,fs,l):\r\n l_serie = np.shape(serie)[0]\r\n nss = int(l_serie/lss)\r\n sub_series = np.reshape(serie,(nss,lss))\r\n for i in range(0,np.shape(sub_series)[0]):\r\n c,ce = cfourier(sub_series[i],fs,l)\r\n u, c_sv, v = np.linalg.svd(c,False)\r\n ce = np.hstack((ce,c_sv)).ravel()\r\n ce = np.hstack((ce,spectral_entropy(sub_series[i]))).ravel()\r\n f = ce if i == 0 else np.vstack((f,ce))\r\n return f\r\n\r\ndef cfourier(serie,fs,l):\r\n n = len(serie)\r\n idx = np.round((np.arange(1,l+1) * (fs/(l*2))) * n/fs)\r\n j = 0\r\n for i in idx:\r\n serie,c,ce = fpbi(serie,n,i)\r\n if j == 0:\r\n cp = c\r\n cet = ce\r\n j = 1\r\n else:\r\n cp = np.vstack((cp,c))\r\n cet = np.hstack((cet,ce))\r\n return cp,cet\r\n\r\ndef fpbi(serie,n,idx):\r\n F = np.fft.fft(serie)\r\n F[int(idx+1):int(n/2)]=0\r\n F[int(n/2+1):int(n-idx)]=0\r\n c = np.fft.ifft(F).real\r\n x = serie - c\r\n return x,c,spectral_entropy(c)\r\n\r\ndef hanma (serie,k,m):\r\n h = np.zeros((k,m))\r\n for i in range(0,m):\r\n h[:,i] = serie[i:k+i]\r\n return h\r\n\r\n# spectral entropy\r\ndef spectral_entropy(x):\r\n n = len(x) \r\n fhat = np.fft.fft(x)\r\n fhat = fhat[0:int(n/2)]\r\n\r\n A = (np.sqrt(fhat * np.conj(fhat)).real)**2\r\n \r\n p = A/sum(A)\r\n p=p[p>0]\r\n return -1/np.log2(n)*sum(p*np.log2(p))\r\n\r\n# Binary Label\r\ndef binary_label(dinp):\r\n n_class = np.shape(dinp)[0]\r\n n_features = np.shape(dinp)[2]\r\n for i in range(0,n_class):\r\n n_class_data = np.shape(dinp[i])[0]\r\n A = np.zeros((n_class_data,n_class),int)\r\n A[:,i] = 1\r\n A = np.hstack((dinp[i],A))\r\n B = A if i == 0 else np.vstack((B,A))\r\n np.random.shuffle(B)\r\n dinp,dout = np.hsplit(B,[n_features])\r\n return dinp, dout\r\n\r\n# Data norm \r\ndef data_norm(data): \r\n a = 0.01 \r\n b = 0.99 \r\n data_norm = ((data-data.min(axis=0))/(data.max(axis=0)-data.min(axis=0)))*(b-a)+a\r\n return data_norm\r\n\r\n# Save Data based on Hankel's features\r\ndef save_data(Dinp,Dout):\r\n print(\"Nuevas caracteristicas generadas en el archivo dinp.csv\")\r\n np.savetxt(\"dinp.csv\", Dinp, delimiter=\",\", fmt=\"%.6f\")\r\n print(\"Label binarias generadas en el archivo dout.csv\")\r\n np.savetxt(\"dout.csv\", Dout, delimiter=\",\", fmt=\"%i\")\r\n return\r\n\r\ndef load_cnf_prep():\r\n par_sof=[]\r\n par = np.genfromtxt('cnf_prep.csv',delimiter=',')\r\n par_sof.append(np.int16(par[0])) # Largo de las sub-series de tiempo\r\n par_sof.append(np.int16(par[1])) # Frecuencia de Muestreo\r\n par_sof.append(np.int16(par[2])) # Sub-banda Fourier.\r\n return par_sof\r\n\r\ndef main():\r\n\tprint(\"Iniciando el pre-procesamiento\")\r\n\t#n = int(input(\"Ingrese el numero de clases:\\n\"))\r\n\tn = 8\r\n\tDinp,Dout = load_tseries(n)\t\r\n\tDinp = data_norm(Dinp)\r\n\tsave_data(Dinp,Dout)\r\n\tprint(\"Pre-procesamiento terminado\")\r\n\treturn pd.DataFrame(Dinp),pd.DataFrame(Dout)\r\n\r\nif __name__ == '__main__': \r\n\t main()\r\n\r\n\r\n","repo_name":"IsaacFernandezH/Series-de-tiempo-Hankel","sub_path":"prep.py","file_name":"prep.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2398948625","text":"# _*_ encoding:utf-8 _*_\n__author__ = 'xyx'\n__date__ = '2017-7-13 21:56'\n\nfrom django import forms\n\nfrom operation.models import UserAsk\n\nclass UserAskForm(forms.ModelForm):\n class Meta:\n model = UserAsk\n fields = ['name', 'mobile', 'course_name']","repo_name":"124608760/muxueonline","sub_path":"apps/organization/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"9860879421","text":"from math import *\r\nfrom re import L # імпортуємо всі функції з модуля math\r\n\r\n# Зайві коментарі видаліть перед захистом проекту!!!!!\r\n\r\n# Отримуємо а, b, h від користувача\r\na = float(input(\"Введите а: \")) \r\nb = float(input(\"Введите b: \")) \r\nh = float(input(\"Введите h: \"))\r\n\r\nx = a # Присвоюємо х значення нижньої границі нашого діапазону\r\nlist_of_values = [] # Створюємо список в який будемо складувати значення х та у\r\nmax_values = [0, -10000] # Створюємо список для збереження максимального значення х та y беремо -10000 щоб будь яке значення у було більшим за наше\r\n\r\n# Создаємо цикл з лічильником\r\nwhile x <= b:\r\n\r\n # Визначаємо у для значення х та округлюємо одразу до 4 знаків після коми\r\n y = round((1 / (x * x + 1)) + x * x, 4) # x*x те саме, що й х в другій степені \r\n x = round(x, 4) # округлюємо х, щоб мати більш гарний вигляд\r\n\r\n list_of_values.append([x, y]) # Записуємо в список list_of_values список, що має всередині значення x та y\r\n \r\n # Створюємо умову коли значення у більше за наше збережене тоді змінюємо значення максимального х та у в змінній\r\n if y > max_values[1]:\r\n max_values[1] = y\r\n max_values[0] = x\r\n\r\n # додаємо до поточного значення х шаг h\r\n x = x + h # можна написати x += h\r\n\r\nprint(list_of_values) # Виводимо список значень\r\nprint(\"Найбільше значення у - \" + str(max_values)) # Виводимо найбільше значення у","repo_name":"ise999joy/WKN","sub_path":"6(3).py","file_name":"6(3).py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34680153595","text":"import tkinter.ttk\n\nimport sounddevice as sd\nimport soundfile as sf\nfrom tkinter import *\nimport json\nimport random\nimport time\nfrom datetime import datetime\nimport os\nfrom tkinter import PhotoImage\nimport speech_recognition as sr\nimport pyttsx3\n\n\ndef getJson():\n with open(\"questions.json\") as fp:\n qus = json.load(fp)\n return qus\n\n\ndef prepCounter(preptime):\n preptime -= 1\n while preptime > -1:\n sec1.set(preptime)\n preptime -= 1\n master.update()\n time.sleep(1)\n\n\ndef durationCounter(duration):\n duration -= 1\n temp = 0\n while duration > -1:\n sec2.set(duration)\n duration -= 1\n temp += 1\n master.update()\n total = duration + temp + 1\n print(\"%\", ((total - duration) / total) * 100, \"total\", total, \"duration\", duration)\n progBarUpdate(((total - duration) / total) * 100)\n time.sleep(1)\n\n\ndef durationCounterW(duration):\n temp = 0\n duration = duration * 60\n while duration > -0.5:\n sec2.set(int(duration / 60))\n duration -= 0.5\n temp += 0.5\n master.update()\n total = duration + temp\n print(\"%\", ((total - duration) / total) * 100, \"total\", total, \"mins\", duration / 60)\n progBarUpdate(((total - duration) / total) * 100)\n WW.set('Word-count: %s' % len(TT.get('1.0', END).split(' ')))\n time.sleep(0.5)\n\n\ndef Voice_rec(duration, name):\n engine = pyttsx3.init()\n text = \"Please start speaking now\"\n engine.say(text)\n engine.runAndWait()\n\n fs = 48000\n r = sr.Recognizer()\n # seconds\n myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=2)\n durationCounter(duration)\n progBarStop()\n # sd.wait() removed as putting wait time\n Q.set(\"Please wait writing your audio file and audio to text file\")\n master.update()\n # Save as FLAC file at correct sampling rate\n sf.write(name, myrecording, fs)\n try:\n with sr.AudioFile(os.path.join(os.getcwd(), name)) as audio:\n data = r.record(audio)\n text = r.recognize_google(data)\n filename = name.replace('flac', 'txt')\n with open(filename, 'w') as file:\n file.write(text)\n except:\n Q.set('Error in writing audio to text file')\n master.update()\n time.sleep(2)\n\n\ndef setImage(path):\n image = PhotoImage(file=path)\n can.create_image(5, 400, anchor=SW, image=image)\n can.image = image\n\n\ndef showRadiobutton():\n rad1.place(x=150, y=200)\n rad2.place(x=500, y=200)\n\n\ndef destroyRadiobutton():\n rad1.destroy()\n rad2.destroy()\n\n\ndef progBarUpdate(percVar):\n myProgBar['value'] = percVar\n master.update()\n\n\ndef progBarStop():\n myProgBar.stop()\n\n\ndef finish():\n master.destroy()\n\n\ndef designForSpeakingTask():\n master.title(\"Speaking-Task\")\n master.geometry(\"900x600\")\n ST.destroy()\n can.place(x=5, y=550)\n var.set(1)\n\n PT.place(x=650, y=30)\n PTE.place(x=850, y=30)\n sec1.set('00')\n\n RT.place(x=650, y=60)\n RTE.place(x=850, y=60)\n sec2.set('00')\n\n myProgBar.place(x=100, y=5)\n\n ff.set(\"Close Test\")\n FT.place(x=700, y=550)\n\n timer.set(\"Record-time remaining: \")\n\n master.update()\n\n for ind in range(startfrom, 9):\n data = jData['SpeakingTask'][str(ind)]\n noOfQ = (len(data) - 3) if ind != 5 else (len(data) - 3) / 2\n\n T.set(\"Task:-%s : %s\" % (ind, data['0']))\n master.update()\n\n randV = random.randrange(1, noOfQ + 1)\n duration = data['duration']\n preptime = data['preptime']\n\n qus = data[str(randV)]\n Q.set(\"Qus:- %s\" % qus)\n master.update()\n\n if ind in (3, 4, 5, 8):\n currPath = os.path.join(os.getcwd(), \"images/Task%s/%s.png\" % (ind, randV))\n if ind == 5:\n setImage(currPath)\n showRadiobutton()\n prepCounter(60)\n currPath = os.path.join(os.getcwd(), \"images/Task%s/%s_%s.png\" % (ind, randV, var.get()))\n destroyRadiobutton()\n qus = data[\"%s_2\" % randV]\n Q.set(\"Q:- %s\" % qus)\n setImage(currPath)\n\n prepCounter(preptime)\n name = \"%s/S/%s_%s.flac\" % (testtime, ind, testhourmin)\n Voice_rec(duration, name)\n setImage(path)\n Q.set(\"Qus:- \")\n\n\ndef designForWritingTask():\n master.title(\"Writing-Task\")\n master.geometry(\"900x900\")\n master.resizable(False, False)\n ST.destroy()\n can.place(x=5, y=850)\n\n PT.place(x=650, y=30)\n PTE.place(x=850, y=30)\n sec1.set('00')\n\n RT.place(x=650, y=60)\n RTE.place(x=850, y=60)\n sec2.set('00')\n\n myProgBar.place(x=100, y=5)\n\n ff.set(\"Close Test\")\n FT.place(x=700, y=860)\n\n timer.set(\"Writing-time remaining: \")\n\n WC.place(x=500, y=860)\n\n TT.place(x=60, y=220)\n\n master.update()\n\n for ind in range(1, 3):\n data = jData['WritingTask'][str(ind)]\n noOfQ = len(data) - 3\n\n T.set(\"Task:-%s : %s\" % (ind, data['0']))\n master.update()\n\n randV = random.randrange(1, noOfQ + 1)\n\n duration = data['duration']\n\n qus = data[str(randV)]\n Q.set(\"Qus:- %s\" % qus)\n master.update()\n\n durationCounterW(duration)\n text = TT.get('1.0', END)\n name = \"%s/W/%s_%s.txt\" % (testtime, ind, testhourmin)\n writeTexttoFile(name, text)\n TT.delete('1.0', END)\n master.update()\n TT.destroy()\n WC.destroy()\n\n\ndef writeTexttoFile(name, text):\n with open(name, 'w') as fp:\n fp.write(text)\n\n\ndef createFolders():\n try:\n print(os.path.join(wd, f'{testtime}'))\n os.makedirs(os.path.join(wd, f'{testtime}\\W'), exist_ok=True)\n os.makedirs(os.path.join(wd, f'{testtime}\\S'), exist_ok=True)\n os.makedirs(os.path.join(wd, f'{testtime}\\R'), exist_ok=True)\n os.makedirs(os.path.join(wd, f'{testtime}\\L'), exist_ok=True)\n except:\n print('error')\n pass\n\n\ndef on_click():\n createFolders()\n options = [var1.get(), var2.get(), var3.get(), var4.get()]\n print(options)\n c1.destroy()\n c2.destroy()\n c3.destroy()\n c4.destroy()\n\n for title in jData.keys():\n if title in options:\n eval('designFor%s()' % title)\n\n\nif __name__ == \"__main__\":\n master = Tk()\n master.title(\"CELPIP Test\")\n master.geometry(\"300x300\")\n master.resizable(False, False)\n jData = getJson()\n sec1 = StringVar()\n sec2 = StringVar()\n\n Q = StringVar()\n WW = StringVar()\n T = StringVar()\n ss = StringVar()\n ff = StringVar()\n var = IntVar()\n timer = StringVar()\n\n var1 = StringVar()\n var2 = StringVar()\n var3 = StringVar()\n var4 = StringVar()\n\n imagePath = StringVar()\n\n testtime = datetime.now().strftime(\"%d_%m\")\n testhourmin = datetime.now().strftime('%H_%M')\n\n startfrom = 0\n\n TT = Text(master, height=35, width=85, font='Helvetica14')\n\n PT = Label(master, text=\"Prep-time remaining: \", font='Helvetica14', justify=LEFT)\n PTE = Entry(master, textvariable=sec1, width=2, font='Helvetica14')\n\n RT = Label(master, textvariable=timer, font='Helvetica14')\n RTE = Entry(master, textvariable=sec2, width=2, font='Helvetica14')\n\n Label(master, textvariable=Q, font='Helvetica14', justify=LEFT).place(x=50, y=90)\n Label(master, textvariable=T, font='Helvetica14', justify=LEFT).place(x=50, y=30)\n WC = Label(master, textvariable=WW, font='Helvetica14', justify=LEFT)\n WW.set('Word-count: 0')\n\n ST = Button(master, textvariable=ss, command=on_click, width=20)\n ss.set(\"Start Test\")\n FT = Button(master, textvariable=ff, command=finish, width=20)\n ff.set(\"Exit Test\")\n ST.place(x=80, y=150)\n FT.place(x=80, y=200)\n\n can = Canvas(master, width=300, height=50)\n can.place(x=1, y=240)\n wd = os.getcwd()\n path = os.path.join(wd, \"images/developerBy.png\")\n image = PhotoImage(file=path)\n can.create_image(1, 50, anchor=SW, image=image)\n\n myProgBar = tkinter.ttk.Progressbar(master, orient=HORIZONTAL, length=700, mode='determinate')\n\n rad1 = Radiobutton(master, text=\"Option 1\", variable=var, value=1)\n rad2 = Radiobutton(master, text=\"Option 2\", variable=var, value=2)\n\n c1 = Checkbutton(master, text='WritingTask', variable=var1, onvalue='WritingTask', offvalue='')\n c1.place(x=80, y=10)\n c2 = Checkbutton(master, text='SpeakingTask', variable=var2, onvalue='SpeakingTask', offvalue='')\n c2.place(x=80, y=40)\n c3 = Checkbutton(master, text='ReadingTask', variable=var3, onvalue='ReadingTask', offvalue='')\n c3.place(x=80, y=70)\n c4 = Checkbutton(master, text='ListeningTask', variable=var4, onvalue='ListeningTask', offvalue='')\n c4.place(x=80, y=100)\n\n mainloop()\n","repo_name":"vivek-gour/Python-Design-Patterns","sub_path":"CELPIP/speaking.py","file_name":"speaking.py","file_ext":"py","file_size_in_byte":8728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"6543797321","text":"n = int(input())\r\nn_list = []\r\nfor _ in range(n):\r\n\tn_list.append(list(map(int, input().split())))\r\nn_list.sort()\r\nn_list.sort(key=lambda x: x[1])\r\nend_t = 0\r\ncnt = 0\r\nfor i, j in n_list:\r\n\tif i >= end_t:\r\n\t\tend_t = j\r\n\t\tcnt += 1\r\n \r\nprint(cnt)","repo_name":"wooneojun/1day1solution","sub_path":"백준/Silver/1931. 회의실 배정/회의실 배정.py","file_name":"회의실 배정.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"19134492955","text":"import re\nimport urllib.request\nfrom http.cookiejar import CookieJar\nimport imageSearchResult as isr\n\nclass google_image_search:\n \n def __init__(self, imageURL = None):\n self.cj = CookieJar()\n self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))\n self.opener.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17')]\n self.imageURL = imageURL\n self.googlePath = 'https://www.google.com/searchbyimage?&image_url='+self.imageURL\n self.sourceCode = None\n self.numOfReusePic = 0\n self.numOfpageResults = 0\n\n def ReverseImageLookup(self):\n try:\n self.sourceCode = self.opener.open(self.googlePath).read()\n except urllib.error.HTTPError as e:\n print(\"exeption occur\")\n print(\"msg: \"+e.msg)\n print(\"reason: \"+e.reason)\n print(\"errno: \"+e.errno)\n print(\"getcode: \"+e.getcode)\n print(\"info: \"+e.info)\n print(\"strerror: \"+e.strerror)\n findLinks = re.findall(r'
함수 따로 만들지 않아도 됨. 메인에서 값 확인만 .\n# 2) 강우 일수는 ? count_rain_days ount_rain_days ount_rain_days ount_rain_days (rainfall) (rainfall) (rainfall) (rainfall)\ndef count_rain_days(rainfall):\n total_rain_days = 0\n for i in rainfall:\n if i != 0:\n total_rain_days += 1\n\n return total_rain_days\n\n\n# 3) 여름철 (6 월-8월) 총 강수량은 ? sumifs umifs(rainfall, months, selected=[6,7,8])\ndef sumifs(rainfall, months, selected=[6,7,8]):\n days = len(months)\n total_rainfall = 0\n for i in range(days):\n if months[i] in selected:\n total_rainfall += rainfall[i]\n return total_rainfall\n\n\n\n\n\n# 4) 최장연속강우일수는 ? longest_rain_days longest_rain_days longest_rain_days longest_rain_days longest_rain_days (rainfall)(rainfall)(rainfall)(rainfall) (rainfall) (rainfall)\ndef longest_rain_days(rainfall):\n rainy_days = []\n rain = 0\n for i in rainfall:\n if i != 0:\n rain += 1\n else:\n rainy_days.append(rain)\n rain = 0\n if rain != 0:\n rainy_days.append(rain)\n return max(rainy_days)\n\n\n# 5) 강우이벤트 중 최대 강수량은 ? 비가 연속으로 올 때, 하나의 강우 이벤트로 가정\ndef maximum_rainfall_event(rainfall):\n rain_event = 0\n rain_event_2 = []\n for i in rainfall:\n if i > 0:\n rain_event += i\n else:\n rain_event_2.append(rain_event)\n rain_event = 0\n return max(rain_event_2)\n\n\n\n# 6) 일교차가 가장 큰날짜와 일교차?\ndef maximum_temp_gap(dates, tmax, tmin):\n gap = 0\n day = []\n for i in range(len(dates)):\n temp_gap = tmax[i] - tmin[i]\n if temp_gap > gap:\n gap = temp_gap\n day = dates[i]\n\n return day, gap\n\n\n\n\n\n# 7) 적산온도는?\ndef gdd(dates, tavg):\n temp = 0\n month = [5, 6, 7, 8, 9]\n for i in range(len(dates)):\n if dates[i][1] in month:\n if tavg[i] >= 5:\n temp += tavg[i]-5\n return temp\n\n\n\n\n\ndef main():\n f = open(\"../week6/weather(146)_2021-2021.csv\")\n lines = f.readlines()\n rainfall = [float(x.split(\",\")[9]) for x in lines[1:]]\n tavg = [float(x.split(\",\")[4]) for x in lines[1:]]\n tmax = [float(x.split(\",\")[3]) for x in lines[1:]]\n tmin = [float(x.split(\",\")[5]) for x in lines[1:]]\n months = [int(x.split(\",\")[1]) for x in lines[1:]]\n dates = [[int(x.split(\",\")[0]), int(x.split(\",\")[1]), int(x.split(\",\")[2])] for x in lines[1:]]\n # # 1) 총 강수량\n print(\"총 강수량: {:.1f} mm\".format(sum(rainfall)))\n # 2) 총 강우일수\n print(\"총 강우일수: {:d} 일\".format(count_rain_days(rainfall)))\n # 3) 여름철 (6 월-8월) 총 강수량은 ?\n print(\"여름철 (6 월-8월) 총 강수량은 {:.1f} mm\".format(sumifs(rainfall, months, [6,7,8])))\n # 4) 최장연속강우일수는 ?\n print(\"최장연속강우일수: {:d}일\".format(longest_rain_days(rainfall)))\n # 5) 강우이벤트 중 최대 강수량은 ?\n print(\"강우이벤트 중 최대 강수량: {:.1f}\".format(maximum_rainfall_event(rainfall)))\n # 6) 일교차가 가장 큰날짜와 일교차?\n print(\"일교차가 가장 큰���짜: {}, 일교차: {:.1f}도\".format(*maximum_temp_gap(dates, tmax, tmin)))\n # 7) 적산온도는?\n print(\"적산온도는 {:.1f} degree-days\".format(gdd(dates, tavg)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"danuni29/Code","sub_path":"week7/hw09_main.py","file_name":"hw09_main.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"37804152822","text":"#H4 tehtävä 3: Peltojen tiedot tiedostosta\npinta={}\npinnat=[]\nkasvit={}\nkasvikset=[]\nkylvöpäivät={}\npäivät=[]\npellot=[]\nrivit=[]\nwith open(\"tiedosto.txt\") as file:\n \n for rivi in file:\n osat=rivi.split(\"\\t\")\n if osat[0]==\"Pinta-ala\":\n continue\n pinta[\"Pinta-ala\"]=pinnat\n pinnat.append(osat[0])\n if osat[1]==\"Kasvi\":\n continue\n\n kasvit[\"Kasvit\"]=kasvikset\n kasvikset.append(osat[1].lstrip())\n if osat[2]==\"Pvm\":\n continue\n\n kylvöpäivät[\"Kylvöpäivä\"]=päivät\n päivät.append(osat[2].strip())\n \n pellot.append(pinta)\n pellot.append(kasvit)\n pellot.append(kylvöpäivät)\n for item in pellot:\n for avain,arvo in item.items():\n item[avain]=[arvo]\n for i in arvo:\n rivi=avain+\":\"+i\n rivit.append(rivi)\n L1=len(pinnat)+len(kasvikset) \n for i in range(len(pinnat)):\n row=rivit[i]+\" -- \"+ rivit[len(pinnat)+i]+\" -- \"+ rivit[L1+i]\n print(row)\n print() \n print(f\"Yhteensä {len(pinnat)} peltoa.\") \n","repo_name":"allienka/python-water-tank","sub_path":"H4-watertank.py","file_name":"H4-watertank.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"29334375839","text":"#!/usr/bin/env python3\n\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 4:\nThe telephone company want to identify numbers that might be doing\ntelephone marketing. Create a set of possible telemarketers:\nthese are numbers that make outgoing calls but never send texts,\nreceive texts or receive incoming calls.\n\nPrint a message:\n\"These numbers could be telemarketers: \"\n\nThe list of numbers should be print out one per line in lexicographic order with no duplicates.\n\"\"\"\nincoming_text = []\noutgoing_text = []\nincoming_call = []\nfor text in texts:\n if text[0] not in incoming_text:\n incoming_text.append(text[0])\n if text[1] not in outgoing_text:\n outgoing_text.append(text[1])\n \n \nfor call in calls:\n if call[1] not in incoming_call:\n incoming_call.append(call[1])\n \nmaster_list = set(incoming_text + outgoing_text + incoming_call)\nspammers = set()\nfor call in calls:\n if (call[0] not in master_list):\n spammers.add(call[0])\n \nprint(f\"These numbers could be telemarketers:\",*sorted(spammers),sep='\\n') \n\n\"\"\"\nBig O Notation Worst Case Scenario\n\nO(5 + 1n^3 + 1x^2 + 1y^2+ x(log(x)))\n\n\n\n5 represents the 5 valiables created in the algorithm (3 lists, two sets)\n\n1n^3 represents the first for loop iterating over texts. This item is cubed because\none must check if the item is not in incoming text or outgoing text and the worse case is the item \nas long as the CSV\n\nthe 1x^2 variables represent the loop iteration over calls. This item is squared because\nthe worst case scenario is the incoming call is just as long as the call csv\n\nthe 1y^2 variables represent the second loop iteration over calls. This item is squared because\nthe worst case scenario is the master list is just as long as the call csv\n\n\n\nxlog(x) is for the sorting function in the print statement\n\n\"\"\"","repo_name":"akniels/Data_Structures","sub_path":"Project_1/P0/Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"649743703","text":"import sys\nimport pickle as pk\nimport networkx as nx\nfrom antlr4 import *\nfrom EnquestesLexer import EnquestesLexer\nfrom EnquestesParser import EnquestesParser\nfrom antlr4.InputStream import InputStream\nfrom EnquestesVisitor import EnquestesVisitor\nfrom testDrawGraph import drawGraph\nfrom os import path\nif len(sys.argv) > 1:\n input_stream = FileStream(sys.argv[1])\nelse:\n input_stream = InputStream(input('? '))\n\nlexer = EnquestesLexer(input_stream)\ntoken_stream = CommonTokenStream(lexer)\nparser = EnquestesParser(token_stream)\ntree = parser.botGraph()\nvisitor = EnquestesVisitor()\nG = visitor.visit(tree)\nidEnquesta = visitor.getStartNode()\nnx.write_gpickle(G, \"../GeneratedData/GeneratedEnquestes/\"+idEnquesta+\".pickle\")\npathQuizzesIDs = \"../GeneratedData/0QuizzesIDs.pickle\"\nif not path.exists(pathQuizzesIDs):\n pickleOut = open(pathQuizzesIDs, \"wb\")\n quizzesIDs = {idEnquesta}\n pk.dump(quizzesIDs, pickleOut)\n pickleOut.close()\nelse:\n pickleQuizzesIDs = open(pathQuizzesIDs, \"rb\")\n quizzesIDs = pk.load(pickleQuizzesIDs)\n pickleQuizzesIDs.close()\n pickleOut = open(pathQuizzesIDs, \"wb\")\n quizzesIDs.add(idEnquesta)\n pk.dump(quizzesIDs, pickleOut)\n pickleOut.close()\ndrawGraph(G, idEnquesta)\n","repo_name":"dieg666/graphBot","sub_path":"cl/testEnquestes.py","file_name":"testEnquestes.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71349016467","text":"\nimport sys\nimport logging\nfrom os import environ\n\nfrom logging.handlers import RotatingFileHandler\n\nCRITICAL = 50\nFATAL = CRITICAL\nERROR = 40\nWARNING = 30\nWARN = WARNING\nINFO = 20\nDEBUG = 10\nNOTSET = 0\n\n_levelToName = {\n CRITICAL: 'CRITICAL',\n ERROR: 'ERROR',\n WARNING: 'WARNING',\n INFO: 'INFO',\n DEBUG: 'DEBUG',\n NOTSET: 'NOTSET',\n }\n\n_nameToLevel = {\n 'CRITICAL': CRITICAL,\n 'ERROR': ERROR,\n 'WARN': WARNING,\n 'WARNING': WARNING,\n 'INFO': INFO,\n 'DEBUG': DEBUG,\n 'NOTSET': NOTSET,\n }\n\nclass alogger:\n\n @staticmethod\n def getLogger(name: str, cfg=None, default_level=None):\n _log_options = { # default log options\n \"log_level\": default_level if default_level is not None else \"debug\",\n # enable logging by default if configuration or default log level is set\n \"enabled\": cfg is not None or default_level is not None,\n # Output log to tty if logging is possible\n \"tty\": True and (cfg is not None or default_level is not None)\n }\n\n flask_reload = True\n log = logging.getLogger(name)\n\n if cfg is not None:\n alogger.setLogLevel(log, cfg.get(\"logging.log_level\", default=_log_options[\"log_level\"], check_type=str))\n flask_reload = not cfg.get(\"server.debug.external_debug\", default=not flask_reload, check_type=bool)\n _log_options[\"file\"] = cfg.get(\"logging.file\", default=\"\", check_type=str)\n else:\n # set log level for the instance from default one passed in case, if no configuration available\n _log_options[\"log_level\"] is not None and alogger.setLogLevel(log, _log_options[\"log_level\"])\n\n # hack, print logs only for reloaded thread\n if flask_reload and environ.get('WERKZEUG_RUN_MAIN') != 'true':\n _log_options[\"enabled\"] = False\n\n for handler in alogger.getHandlers(_log_options):\n log.addHandler(handler)\n\n return log\n\n @staticmethod\n def getHandlers(options: dict):\n new_format = logging.Formatter('%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s')\n handlers = []\n\n # return Null handler if logging is not allowed\n if \"enabled\" in options and not options[\"enabled\"]:\n handlers.append(logging.NullHandler())\n return handlers\n\n # output error handler\n if \"tty\" in options and options[\"tty\"]:\n handlers.append(logging.StreamHandler(sys.stderr))\n\n if \"file\" in options and options[\"file\"].strip() != \"\":\n # uncomment to allow file output handler\n handlers.append(RotatingFileHandler(options[\"file\"], \"a\"))\n\n # assign same format output to handlers\n for item in handlers:\n item.setFormatter(new_format)\n\n return handlers\n\n @staticmethod\n def setLogLevel(log, level):\n level = level.upper().strip()\n if level in _nameToLevel:\n log.setLevel(_nameToLevel[level])\n else:\n log.setLevel(NOTSET)\n\n\n","repo_name":"hapylestat/anime-library","sub_path":"backend/alist/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"74653347665","text":"import re\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer\n#import spacy\n\n\nclass Classifier:\n stop_words = stopwords.words('russian', 'english')\n stop_words.extend(\n ['the', 'one', 'two', 'of', 'you', 'your', 'in', 'game', 'to',\n 'is', 'for', 'on', 'with', 'it', 'this', 'will', 'by', 'that',\n 'if', 'be', 'or', 'as', 'an', 'are', 'all', 'but', 'about', 'can',\n 'so', 'play', 'story', 'novel', 'from', 'out', 'he', 'she', 'not',\n 'they', 'their', 'what', 'up', 'have', 'her', 'more', 'demo',\n 'at', 'who', 'fill', 'there', 'was', 'we', 'please', 'new', 'and',\n 'features', 'music', 'content', 'version', 'me', 'my', 'like',\n 'some', 'how', 'characters', 'his', 'get', 'visual', 'other', 'll',\n 'also', 'into', 'made', 'us', 'only', 'has', 'our', 'time', 'find',\n 'life', 'world', 'make', 'just', 'any', 'them', 'when', 'do', 'now',\n 'help', 're', 'free', 'endings', 'no', 'first', 'here', 'want',\n 'through', 'been', 'available', 'after', 'where', 'full', 'different',\n 'follow', 'may', 'credits', 'own', 'll', 'character', 'even', 'him',\n 'than', 'way', 'being', 'games', 'each', 'warning', 'over',\n 'contains', 'see', 'day', 'words', 'which', 'around', 'something',\n 'know', 'would', 'take', 'right', 've', 'well', 'much', 'while',\n 'work', 'project', 'three', 'best', 'still', 'don', 'jam', 'end',\n 'many', 'enjoy', 'join', 'playing', 'really', 'every', 'little',\n 'most', 'things', 'download', 'note', 'come', 'keep', 'very', 'its',\n 'feel', 'hope', 'ending', 'main', ])\n #en_nlp = spacy.load(\"en_core_web_sm\")\n #ru_nlp = spacy.load(\"ru_core_news_sm\")\n en_letter = re.compile(r'[a-z]')\n ru_letter = re.compile(r'[а-я]')\n\n def __init__(self, game_info=None):\n self.pd_texts = game_info\n self.pd_texts['paper_text'] = \\\n self.pd_texts['paper_text'].apply(lambda x: self.clean_up_text(x))\n\n def get_tags(self):\n cv = CountVectorizer()\n vocab = cv.fit(self.pd_texts['paper_text'].values)\n a = pd.DataFrame(data=cv.transform(self.pd_texts['paper_text']).toarray(),\n columns=vocab.get_feature_names())\n # сделать приведение к начальной форме\n # попробовать вытащить биграммы как теги\n top_tags = a.sum(axis=0).sort_values(ascending=False)[:40].index\n tag_to_texts = dict()\n for tag in top_tags:\n texts = [one['link'] for index, one in self.pd_texts.iterrows()\n if tag in one['paper_text']]\n tag_to_texts[tag] = texts\n return tag_to_texts\n\n def clean_up_text(self, text):\n doc = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", text) # Remove the \"written by\" caption\n doc = doc.replace(u'\\n', u'').replace(u'\\r', u'')\n doc = re.sub(r'[^\\s\\w]', '', doc)\n doc = re.sub('\\s+', ' ', doc)\n doc = doc.lower().split()\n doc = ' '.join([t for t in doc\n if not t in Classifier.stop_words and len(t) > 1 and not t.isdigit()])\n return doc\n\n @staticmethod\n def lemmatize(word):\n if Classifier.en_letter.search(word):\n return Classifier.en_nlp(word)\n if Classifier.ru_letter.search(word):\n return Classifier.ru_nlp(word)\n return word\n","repo_name":"AlexJackalope/novels-search-project","sub_path":"Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"910606450","text":"# Definition for singly-linked list.\n\n\n# https://leetcode.com/problems/reverse-linked-list/\n\n\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def reverseList(self, head):\n \n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \n helpful tutorial video for recursive solution: https://www.youtube.com/watch?v=MRe3UsRadKw\n \n \"\"\"\n # Recursive solution\n\n if not head:\n return None\n\n # Make variable to maintain new head...initially set to head\n\n newHead = head\n\n # If head.next is not Null (still exists a subproblem) then we reverse List\n if head and head.next:\n # call reverseList with head.next so you can find the end of the linkedlist and make that the new head!!! This is how we recursively traverse the list until we found the last node.\n newHead = self.reverseList(head.next)\n \n # confusing but explanation is, we are at end of the linked list so head.next.next is now Null which means the pointer pointing to Null can be manipuated and turned to point at curr instead -- effectively reversing the linked list.\n head.next.next = head\n \n # now set head.next to Null \n # go back to the start of the callstack if head and head.next:\n # If head happens to be the first node in the list, we need to reverse the next pointer to Null indicating this is the new end of LinkedList that points to null.\n head.next = None\n \n return newHead\n\n\n # -> 1 -> 2 -> 3\n \n \n \n ''' \n # Iterative Solution\n \n if not head:\n return None\n \n prev = None\n cur = head\n \n while cur:\n # saving next node so we don't lose it when we move cur's pointer...\n saveNext = cur.next\n \n # switching the cur.next's pointer to the one before it (not afraid to lose cur.next bc we already saved it)\n cur.next = prev\n \n # incrementing prev to cur (moving forward by 1)\n prev = cur\n \n # incrementing cur also forward by 1\n cur = saveNext\n return prev\n \n # gif animation for clarity: https://media.geeksforgeeks.org/wp-content/cdn-uploads/RGIF2.gif\n \n '''\n","repo_name":"MadamHippo/Python-leetcode","sub_path":"206-reverse-linked-list.py","file_name":"206-reverse-linked-list.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73173294227","text":"import uuid\n\n\nasync def test_get_user_by_id(client, create_user_in_database, get_user_from_database):\n user_data = {\n \"user_id\": uuid.uuid4(),\n \"name\": \"Baby_get\",\n \"surname\": \"Bone_get\",\n \"email\": \"babyboneget@gmail.com\",\n \"is_active\": True,\n }\n\n await create_user_in_database(**user_data)\n resp = client.get(f'/user/?user_id={user_data[\"user_id\"]}')\n resp_json = resp.json()\n user_from_db = await get_user_from_database(user_data[\"user_id\"])\n user_from_db = user_from_db[0]\n user_data[\"user_id\"] = str(user_data[\"user_id\"])\n\n assert resp.status_code == 200\n assert user_data == resp_json\n assert str(user_from_db[\"user_id\"]) == user_data[\"user_id\"]\n assert user_from_db[\"name\"] == user_data[\"name\"]\n assert user_from_db[\"surname\"] == user_data[\"surname\"]\n assert user_from_db[\"email\"] == user_data[\"email\"]\n assert user_from_db[\"is_active\"] == user_data[\"is_active\"]\n","repo_name":"CRPNTRPINK/CodeHub","sub_path":"tests/test_handlers/test_get_user_by_id_handler.py","file_name":"test_get_user_by_id_handler.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"28428590908","text":"import csv\nimport os\nimport time\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\nimport torch.optim\nfrom Data import *\n\nimport criteria\n\ncudnn.benchmark = True\n\nimport models\nfrom metrics import AverageMeter, Result\nfrom utils import *\n\n\nargs = parse_command()\nprint(args)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu # Set the GPU.\n\nfieldnames = ['rmse', 'mae', 'delta1', 'absrel',\n 'lg10', 'mse', 'delta2', 'delta3', 'data_time', 'gpu_time']\nbest_fieldnames = ['best_epoch'] + fieldnames\nbest_result = Result()\nbest_result.set_to_worst()\n\n\n##################################################################\n\n\ndef create_data_loaders(args):\n # Data loading code\n print(\"=> creating data loaders ...\")\n home_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))\n traindir = os.path.join(home_path, 'data', args.data, 'train')\n valdir = os.path.join(home_path, 'data', args.data, 'val')\n train_loader = None\n\n max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf\n\n if args.data == 'nyudepthv2':\n if not args.evaluate:\n train_dataset = NYU(traindir, split='train', modality=args.modality)\n val_dataset = NYU(valdir, split='val', modality=args.modality)\n else:\n raise RuntimeError('Dataset not found.' + 'The dataset must be either of nyudepthv2 or kitti.')\n\n # set batch size to be 1 for validation\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.workers,\n pin_memory=True)\n\n # put construction of train loader here, for those who are interested in testing only\n if not args.evaluate:\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, sampler=None,\n worker_init_fn=lambda work_id: np.random.seed(work_id))\n # worker_init_fn ensures different sampling patterns for each data loading thread\n\n print(\"=> data loaders created.\")\n return train_loader, val_loader\n\n\n####################################################################\ndef main():\n global args, best_result, output_directory, train_csv, test_csv\n\n # evaluation mode\n if args.evaluate:\n\n # Data loading code\n print(\"=> creating data loaders...\")\n home_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))\n valdir = os.path.join(home_path, 'data', args.data, 'val')\n\n if args.data == 'nyudepthv2':\n val_dataset = NYU(valdir, split='val', modality=args.modality)\n else:\n raise RuntimeError('Dataset not found.')\n\n # set batch size to be 1 for validation\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)\n print(\"=> data loaders created.\")\n\n assert os.path.isfile(args.evaluate), \\\n \"=> no model found at '{}'\".format(args.evaluate)\n print(\"=> loading model '{}'\".format(args.evaluate))\n checkpoint = torch.load(args.evaluate)\n if type(checkpoint) is dict:\n args.start_epoch = checkpoint['epoch']\n best_result = checkpoint['best_result']\n model = checkpoint['model']\n print(\"=> loaded best model (epoch {})\".format(checkpoint['epoch']))\n else:\n model = checkpoint\n args.start_epoch = 0\n output_directory = os.path.dirname('/home/jetson/FastDepth')\n validate(val_loader, model, args.start_epoch, write_to_file=False)\n return\n\n start_epoch = 0\n if args.train:\n train_loader, val_loader = create_data_loaders(args)\n print(\"=> creating Model ({}-{}) ...\".format(args.arch, args.decoder))\n\n model = models.MobileNetSkipAdd(output_size=train_loader.dataset.output_size)\n print(\"=> model created.\")\n optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training\n model = model.cuda()\n\n # define loss function (criterion) and optimizer\n if args.criterion == 'l2':\n criterion = criteria.MaskedMSELoss().cuda()\n elif args.criterion == 'l1':\n criterion = criteria.MaskedL1Loss().cuda()\n\n # create results folder, if not already exists\n output_directory = get_output_directory(args)\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n train_csv = os.path.join(output_directory, 'train.csv')\n test_csv = os.path.join(output_directory, 'test.csv')\n best_txt = os.path.join(output_directory, 'best.txt')\n\n # create new csv files with only header\n if not args.resume:\n with open(train_csv, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n with open(test_csv, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for epoch in range(start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch, args.lr)\n train(train_loader, model, criterion, optimizer, epoch) # train for one epoch\n result, img_merge = validate(val_loader, model, epoch) # evaluate on validation set\n\n # remember best rmse and save checkpoint\n is_best = result.rmse < best_result.rmse\n if is_best:\n best_result = result\n with open(best_txt, 'w') as txtfile:\n txtfile.write(\n \"epoch={}\\nmse={:.3f}\\nrmse={:.3f}\\nabsrel={:.3f}\\nlg10={:.3f}\\nmae={:.3f}\\ndelta1={:.3f}\\nt_gpu={:.4f}\\n\".\n format(epoch, result.mse, result.rmse, result.absrel, result.lg10, result.mae,\n result.delta1,\n result.gpu_time))\n if img_merge is not None:\n img_filename = output_directory + '/comparison_best.png'\n save_image(img_merge, img_filename)\n\n save_checkpoint({\n 'args': args,\n 'epoch': epoch,\n 'arch': args.arch,\n 'model': model,\n 'best_result': best_result,\n 'optimizer': optimizer,\n }, is_best, epoch, output_directory)\n\n\ndef validate(val_loader, model, epoch, write_to_file=True):\n average_meter = AverageMeter()\n model.eval() # switch to evaluate mode\n end = time.time()\n eval_file = output_directory + '/FastDepth/evaluation.csv'\n f = open(eval_file, \"w+\")\n f.write(\"Max_Error,Depth,RMSE,GPU_TIME,Number_Of_Frame\\r\\n\")\n for i, (input, target) in enumerate(val_loader):\n input, target = input.cuda(), target.cuda()\n # torch.cuda.synchronize()\n data_time = time.time() - end\n\n # compute output\n end = time.time()\n with torch.no_grad():\n pred = model(input)\n # torch.cuda.synchronize()\n gpu_time = time.time() - end\n\n abs_err = (target.data - pred.data).abs().cpu()\n max_err_ind = np.unravel_index(np.argmax(abs_err, axis=None), abs_err.shape)\n\n max_err_depth = target.data[max_err_ind]\n max_err = abs_err[max_err_ind]\n\n\n # measure accuracy and record loss\n result = Result()\n result.evaluate(pred.data, target.data)\n average_meter.update(result, gpu_time, data_time, input.size(0))\n end = time.time()\n\n f.write(f'{max_err},{max_err_depth},{result.rmse:.2f},{gpu_time},{i+1}\\r\\n')\n # save 8 images for visualization\n skip = 50\n\n if args.modality == 'rgb':\n rgb = input\n\n if i == 0:\n img_merge = merge_into_row_with_gt(rgb, target, pred, (target - pred).abs())\n elif (i < 8 * skip) and (i % skip == 0):\n row = merge_into_row_with_gt(rgb, target, pred, (target - pred).abs())\n img_merge = add_row(img_merge, row)\n elif i == 8 * skip:\n filename = output_directory + '/comparison_' + str(epoch) + '.png'\n save_image(img_merge, filename)\n\n if (i + 1) % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\\n\\t'\n 'RMSE={result.rmse:.2f}({average.rmse:.2f}) '\n 'MAE={result.mae:.2f}({average.mae:.2f}) '\n 'Delta1={result.delta1:.3f}({average.delta1:.3f}) '\n 'REL={result.absrel:.3f}({average.absrel:.3f}) '\n 'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(\n i + 1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))\n f.close()\n avg = average_meter.average()\n\n print('\\n*\\n'\n 'RMSE={average.rmse:.3f}\\n'\n 'MAE={average.mae:.3f}\\n'\n 'Delta1={average.delta1:.3f}\\n'\n 'REL={average.absrel:.3f}\\n'\n 'Lg10={average.lg10:.3f}\\n'\n 't_GPU={time:.3f}\\n'.format(\n average=avg, time=avg.gpu_time))\n\n if write_to_file:\n with open(test_csv, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,\n 'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,\n 'data_time': avg.data_time, 'gpu_time': avg.gpu_time})\n return avg, img_merge\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n average_meter = AverageMeter()\n model.train() # switch to train mode\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n\n input, target = input.cuda(), target.cuda()\n torch.cuda.synchronize()\n data_time = time.time() - end\n\n # compute pred\n end = time.time()\n pred = model(input)\n loss = criterion(pred, target)\n optimizer.zero_grad()\n loss.backward() # compute gradient and do SGD step\n optimizer.step()\n torch.cuda.synchronize()\n gpu_time = time.time() - end\n\n # measure accuracy and record loss\n result = Result()\n result.evaluate(pred.data, target.data)\n\n average_meter.update(result, gpu_time, data_time, input.size(0))\n end = time.time()\n\n if (i + 1) % args.print_freq == 0:\n print('=> output: {}'.format(output_directory))\n print('Train Epoch: {0} [{1}/{2}]\\t'\n 't_Data={data_time:.3f}({average.data_time:.3f}) '\n 't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\\n\\t'\n 'RMSE={result.rmse:.2f}({average.rmse:.2f}) '\n 'MAE={result.mae:.2f}({average.mae:.2f}) '\n 'Delta1={result.delta1:.3f}({average.delta1:.3f}) '\n 'REL={result.absrel:.3f}({average.absrel:.3f}) '\n 'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(\n epoch, i + 1, len(train_loader), data_time=data_time,\n gpu_time=gpu_time, result=result, average=average_meter.average()))\n\n avg = average_meter.average()\n with open(train_csv, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,\n 'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,\n 'gpu_time': avg.gpu_time, 'data_time': avg.data_time})\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tau-adl/FastDepth","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11876,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"}
+{"seq_id":"17620001504","text":"def heapify(arr, n, i): # complexity -> Log(n)\n largest = i\n l = 2*i+1\n r = 2*i+2\n\n if l < n and arr[l] > arr[i]:\n largest = l\n\n if r < n and arr[largest] < arr[r]:\n largest = r\n\n if largest != i:\n arr[largest], arr[i] = arr[i], arr[largest]\n heapify(arr, n, largest)\n\ndef heapSort(arr): # complexity -> nLog(n)\n\n n = len(arr)\n for i in range(n-1, -1, -1):\n heapify(arr, n, i)\n\n for i in range(n-1, -1, -1):\n arr[0], arr[i] = arr[i], arr[0]\n heapify(arr, i, 0)\n\n return arr\n\nprint(heapSort([3,5,1,2,8,9,4,0]))\n","repo_name":"poojaKarande13/ProgramingPractice","sub_path":"python/heapSort.py","file_name":"heapSort.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"9541334325","text":"import time\nimport pandas as pd\nimport pyspark.sql.functions as F\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession, SQLContext, HiveContext, Row\n\nspark = SparkSession.builder \\\n .master(\"local\") \\\n .appName(\"example\") \\\n .config(\"spark.debug.maxToStringFields\", \"100\") \\\n .config(\"spark.sql.shuffle.partitions\", \"400\") \\\n .config(\"spark.default.parallelism\", \"600\") \\\n .config(\"spark.sql.auto.repartition\", \"true\") \\\n .config(\"spark.sql.execution.arrow.enabled\", \"true\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n\nstarttime = time.time()\n\ninputFile01 = 'hdfs://localhost:9000/result/form_par'\ninputs01 = spark.read.format('parquet').load(inputFile01)\ninputs01.createOrReplaceTempView(\"tweets01\")\nendtime = time.time()\nprint(\"1: \", endtime - starttime)\n# testDF['Age'], testDF['Sex'], testDF['HosRegisterCode']\n# testDF[\"CertificateCode\"], testDF['Desc'], testDF['AllName'], testDF[\"Name\"],\nstarttime = time.time()\ntestDF = spark.sql(\n \"\"\"SELECT CertificateCode, Desc, AllName, Name, Age, Sex, HosRegisterCode FROM tweets01 WHERE tweets01.Name= '柳三女'\"\"\") \\\n .withColumn(\"id\", F.monotonically_increasing_id())\nendtime = time.time()\nprint(\"2: \", endtime - starttime)\n\nstart = time.time()\ntestDF = testDF.select('*').where((testDF.id >= 0) & (testDF.id < 20))\nendt = time.time()\nprint('lll: ', endt - start)\n\nstart = time.time()\nddl = testDF.toPandas()\nend = time.time()\nprint(\"ddl: \", end - start)\n\nstart = time.time()\n\nlist_persons = map(lambda row: row.asDict(), testDF.collect())\n\nend = time.time()\nprint(type(list_persons))\nprint(list_persons)\nprint(\"ddl: \", end - start)\n\n# starttime = time.time()\njson_list = []\nfor a, b, c, d, e, f, g in zip(testDF[\"CertificateCode\"], testDF['Desc'], testDF['AllName'], testDF[\"Name\"], testDF['Age'], testDF['Sex'], testDF['HosRegisterCode']):\n json_dict = {'CertificateCode': a, 'Desc': b, 'AllName': c, 'Name': d, 'Age': e, 'Sex': f,\n 'HosRegisterCode': g}\n json_list.append(json_dict)\n\nprint(type(json_list))\nprint(json_list)\n# endtime = time.time()\n# print(\"3: \", endtime - starttime)\n# print(json_list)\n# print(tst)\n","repo_name":"djejjd/BackupCode","sub_path":"clean/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"5367794026","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # ML Pipeline Preparation\n# Follow the instructions below to help you create your ML pipeline.\n# ### 1. Import libraries and load data from database.\n# - Import Python libraries\n# - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)\n# - Define feature and target variables X and Y\n\n\n#%%\nimport nltk\n#nltk.download(['stopwords', 'punkt', 'wordnet', 'averaged_perceptron_tagger',\n# 'maxent_ne_chunker', 'words', 'word2vec_sample'])\n\n# import libraries\nimport dill as pickle\nimport re\nimport numpy as np\nimport pandas as pd\nimport time\n\nfrom sqlalchemy import create_engine\n\nimport nltk\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.stem.porter import PorterStemmer\n\nfrom nltk import ne_chunk, pos_tag\n\nfrom sklearn import svm\nfrom sklearn.linear_model import (LogisticRegression,\n RidgeClassifier)\n\nfrom sklearn.ensemble import (RandomForestClassifier,\n BaggingClassifier,\n RandomTreesEmbedding\n )\n\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.multiclass import OneVsRestClassifier\n\n\nfrom sklearn.naive_bayes import GaussianNB, BernoulliNB\n\nfrom sklearn.model_selection import train_test_split, KFold, GridSearchCV, cross_val_score\nfrom sklearn.preprocessing import PolynomialFeatures\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import (StandardScaler, RobustScaler, Normalizer,\n FunctionTransformer, QuantileTransformer,\n PowerTransformer, OneHotEncoder)\n\nfrom sklearn.compose import ColumnTransformer\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nfrom sklearn.feature_extraction.text import (CountVectorizer,\n TfidfTransformer,\n HashingVectorizer\n )\n\nfrom sklearn.feature_selection import chi2, SelectKBest\nfrom sklearn.decomposition import TruncatedSVD\n\nfrom sklearn.neighbors import (KNeighborsClassifier,\n NeighborhoodComponentsAnalysis)\n\nfrom sklearn.metrics import (confusion_matrix, f1_score, precision_score,\n recall_score, classification_report,\n roc_auc_score,accuracy_score, make_scorer)\n\nfrom sklearn.utils import resample\n\nfrom models.custom_transform import (StartingVerbExtractor,\n KeywordSearch,\n EntityCount,\n GetVerbNounCount,\n tokenize,\n Dense,\n SentenceVector\n )\n\n\n\n# In[3]:\n\n\npd.options.display.max_columns = 60\n\ndef drop_class(Y):\n \"\"\"\n Checks distribution of classes in each category.\n Drops class(es) (inplace) where there is less than 2 classes present.\n\n For example, if one of the target classes contain only ones of zeros,\n that target class will be removed.\n\n This functions does not return anything.\n \"\"\"\n # extract category which has less than two classes\n print('Dropping class(es):', Y.nunique()[Y.nunique() < 2].index.tolist())\n # drop category, `child_alone`\n Y.drop(Y.nunique()[Y.nunique() < 2].index.tolist(), axis=1, inplace=True)\n\n# In[4]:\ndef load_data(database_filepath, n_sample=5000):\n \"\"\"\n Import data from database into a DataFrame. Split DataFrame into\n features and predictors, `X` and `Y`. Additionally, extract the names\n of target categories.\n\n Preprocess data.\n\n Params:\n -------\n database_filepath: file path of database\n\n n_sample: int, optional\n Number of samples to draw from data. If set to `0`, then entire\n data is used.\n\n Returns:\n -------\n tuple(X, Y, category_names)\n pd.DataFrame of features and predictors, `X` and `Y`, respectively.\n List of target category names\n \"\"\"\n\n engine = create_engine(f'sqlite:///{database_filepath}')\n\n # extract directory name\n dir_ = re.findall(\".*/\", database_filepath)\n\n # extract table name by stripping away directory name\n table_name = database_filepath.replace('.db', '').replace(dir_[0], \"\")\n\n df = pd.read_sql_table(f'{table_name}', engine)\n\n if n_sample > 0:\n # Sample data\n df = df.sample(n_sample)\n\n # reset index\n df.reset_index(drop=False, inplace=True)\n\n # DROP ROWS/COLUMN\n # where sum across entire row is less than 1\n null_idx = np.where(df.loc[:, 'related':].sum(axis=1) < 1)[0]\n # drop rows which contain all null values\n df.drop(null_idx, axis=0, inplace=True)\n\n # explore `related` feature where its labeled as a `2`\n related_twos = df[df['related'] == 2]\n df.drop(index=related_twos.index, inplace=True)\n\n # reset index\n df = df.reset_index(drop=True)\n\n # define features and predictors\n X = df.loc[:, 'message']\n Y = df.loc[:, 'related':]\n Y.drop(Y.nunique()[Y.nunique() < 2].index.tolist(), axis=1, inplace=True)\n\n # extract label names\n category_names = Y.columns.to_list()\n\n return X, Y, category_names\n\n\n#%%\n# load data from database\nengine = create_engine('sqlite:///data/disaster_response.db')\ndf = pd.read_sql_table('disaster_response', engine)\n\n\n# X, Y, categories = load_data('data/disaster_response.db', n_sample=10000)\n\n#%%\nX = df.loc[:, ['message']]\nY = df.loc[:, 'related':]\n\n\n# explore `related` feature where its labeled as a `2`\nrelated_twos = df[df['related'] == 2]\n\n# try dropping the above rows\ndf.drop(index=related_twos.index, inplace=True)\ndf = df.reset_index(drop=True)\n# check count of classes\ndf.nunique()\n\n# now `related` has been reduced down to two classes\n\n\n# In[8]:\n# EXPLORE MESSAGES IN MORE DETAIL\n\nidx = 9\ndf.loc[idx, 'message']\ndf.loc[idx, 'related':]\n\n# all rows except `related` are equal to zero at given index\n(df.loc[idx, 'related':] == 0).all()\n\n# iterate over each message, find each row which contains ALL zeros\nrow_sum = df.loc[:, 'related':].apply(sum, axis=1)\ndrop_idx = row_sum[row_sum < 1].index\nprint(len(drop_idx))\n\n\n# inspect indecies before dropping\n# NOTE: This message is asking for FOOD AND WATER. However, ALL labels\n# indicate NO need for help\nidx = drop_idx[78]\ndf.loc[idx, 'message']\ndf.loc[idx, 'message':]\n\n\nidx = drop_idx[77]\ndf.loc[idx, 'message']\ndf.loc[idx, 'message':]\n\n\n#%%\nfrom spellchecker import SpellChecker\n\nspell = SpellChecker()\n\n# find those words that may be misspelled\nmisspelled = spell.unknown(['something', 'is', 'hapenning', 'here'])\n\nfor word in misspelled:\n # Get the one `most likely` answer\n print(spell.correction(word))\n\n # Get a list of `likely` options\n print(spell.candidates(word))\n\nidx = 15\ndf.loc[idx, 'message']\ndf.loc[idx, 'related':]\n\n#%%\n\n### COMBINE OUTPUT CATEGORIES\n\n\n# Combine Weather\n(df['weather_related'] + df['other_weather']).unique()\n\ndf.loc[:, 'related':].sum(axis=1)\n\n\n#%%\n\n# FIND ROWS WITH NO POSITIVE INSTANCES\n\n# where sum across entire row is less than 1\nnull_idx = np.where(df.loc[:, 'related':].sum(axis=1) < 1)[0]\n\n# drop rows which contain all null values\ndf.drop(null_idx, axis=0, inplace=True)\n\n\n#%%\n\n# CHECK BALANCE\nbefore = (df.loc[:, 'related':].sum() / df.loc[:, 'related':].shape[0]).sort_values()\n\n# DROP INDEX\ndf.drop(index=drop_idx, inplace=True)\n\n# CHECK BALANCE, AGAIN\nafter = (df.loc[:, 'related':].sum() / df.loc[:, 'related':].shape[0]).sort_values()\n\nnp.c_[before, after]\n\n# REPLACE `related` with zeros\ndf['related'].replace(to_replace=1, value=0, inplace=True)\ndf['related'].sum()\n\n\n#%%\n\ndef tokenize(text):\n \"\"\"\n Replace `url` with empty space \"\".\n Tokenize and lemmatize input `text`.\n Converts to lower case and strips whitespaces.\n\n\n Returns:\n --------\n dtype: list, containing processed words\n \"\"\"\n\n lemm = WordNetLemmatizer()\n\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"\")\n\n # load stopwords\n stop_words = stopwords.words(\"english\")\n\n remove_words = ['one', 'see', 'please', 'thank', 'thank you', 'thanks',\n 'we', 'us', 'you', 'me']\n for addtl_word in remove_words:\n stop_words.append(addtl_word)\n\n # remove punctuations (retain alphabetical and numeric chars) and convert to all lower case\n # tokenize resulting text\n tokens = word_tokenize(re.sub(r\"[^a-zA-Z]\", ' ', text.lower().strip()))\n\n # drop stop words\n no_stops = [word for word in tokens if word not in stop_words]\n\n # lemmatize and remove stop words\n # lemmatized = [lemm.lemmatize(word) for word in tokens if word not in stop_words]\n\n return no_stops\n\n\n\n#%%\n\nidx = 99\nmsg = df.loc[idx, 'message']\ndf.loc[idx, 'related':]\nprint(msg)\n\n\n# tokenize, pos tag, then recognize named entities in text\ntree = ne_chunk(pos_tag(word_tokenize(msg)))\nprint(tree)\n\nne_list = ['GPE', 'PERSON', 'ORGANIZATION']\nne_labels = []\nfor item in tree.subtrees():\n ne_labels.append(item.label())\n\n# FOUND ENTITIES\npd.Series(ne_list).isin(ne_labels).astype(np.int32).values\n\ntokenize(msg)\n#%%\n#print(nltk.FreqDist(lem).most_common())\n#nltk.ConditionalFreqDist(pos_tag(lem))['is'].most_common()\n\n# POSITION OF VERBS AND NOUNS\n\n\n\n# In[30]:\nN_JOBS = -1\n\n# LogisticRegression params\nlg_params = dict(\n C = 12,\n solver = 'newton-cg',\n penalty = 'l2',\n class_weight = 'balanced',\n multi_class = 'multinomial',\n n_jobs = N_JOBS,\n random_state = 11\n\n)\n\nsvc_params = dict(\n C = 2,\n kernel = 'linear',\n# gamma = 0.002,\n cache_size = 1000,\n class_weight = 'balanced',\n random_state = 11\n\n)\n\nrf_params = dict(\n n_estimators=40,\n max_depth=4,\n # min_samples_split=10,\n class_weight='balanced',\n n_jobs=N_JOBS,\n random_state=11\n )\n\n# define classifier\nclf = LogisticRegression(**lg_params)\n# clf = svm.SVC(**svc_params)\n# clf = RandomForestClassifier(**rf_params)\n#\n\n# pipeline = Pipeline([\n# ('count_vect', CountVectorizer(\n# tokenizer=tokenize,\n# ngram_range=(1, 2),\n# # max_features=200\n# )),\n# ('tfidf_tx', TfidfTransformer()),\n# ('clf', MultiOutputClassifier(clf, n_jobs=6))\n# ])\n\npipeline = Pipeline([\n\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('count_vect', CountVectorizer(\n tokenizer=tokenize,\n ngram_range=(1, 1),\n ))\n ])),\n\n # ('keywords', KeywordSearch()),\n # ('verb_noun_count', GetVerbNounCount()),\n # ('entity_count', EntityCount()),\n # ('verb_extract', StartingVerbExtractor()),\n\n\n ], n_jobs=N_JOBS)),\n\n ('tfidf_tx', TfidfTransformer()),\n # ('quantile_tx', QuantileTransformer(output_distribution='normal',\n # random_state=11)),\n # ('decomp', TruncatedSVD(n_components=2,\n # random_state=11)),\n # ('rt', RandomTreesEmbedding(**rt_params)),\n # ('dense', Dense()),\n # ('poly', PolynomialFeatures(degree=3, interaction_only=True)),\n # ('scale', RobustScaler(with_centering=False)),\n ('clf', MultiOutputClassifier(clf, n_jobs=N_JOBS))\n ])\n\n# use ColumnTransfomer to combine transformations\n# NOTE:\n# OneHot expects 2-D, therefore, the column(s) must be specified\n# as a list!\n#full_pipe = Pipeline([\n# ('union', ColumnTransformer([\n# ('category', OneHotEncoder(), [0]),\n# ('messages', pipeline, 1),\n# ])),\n# ('clf', MultiOutputClassifier(clf,n_jobs=-1))\n# ], memory='models/cache')\n\n# In[56]:\n\n# RESET INDEX\n# df.reset_index(drop=True, inplace=True)\n# df['genre'] = df['genre'].astype('category')\n\n# X = df.loc[:, 'message']\n# Y = df.loc[:, 'related':]\n\nX, Y, categories = load_data('data/disaster_response.db', n_sample=0)\n\nprint('X-shape:', X.shape)\nprint('Y-shape:', Y.shape)\n\n\n(Y.sum() / Y.shape[0]).sort_values()\n\n\n# In[31]:\n\n\n# extract category which has less than two classes\nprint(Y.nunique()[Y.nunique() < 2].index.tolist())\n\n# drop category, `child_alone`\nY.drop(Y.nunique()[Y.nunique() < 2].index.tolist(), axis=1, inplace=True)\n\n\nX_train, X_test, y_train, y_test = train_test_split(X.values,\n Y.values,\n # stratify=Y['offer'].values,\n test_size=0.15)\n\n\n# In[33]:\nprint('Training model...')\n\nstart_time = time.perf_counter()\n\npipeline.fit(X_train.ravel(), y_train)\ny_pred = pipeline.predict(X_test.ravel())\n\n#full_pipe.fit(X_train, y_train)\n#y_pred = full_pipe.predict(X_test)\n\nend_time = time.perf_counter()\n\nprint('\\n')\nprint('-'*75)\nprint('Training time:', np.round((end_time - start_time)/60, 4), 'min')\nprint('\\n')\n\n# ### 5. Test your model\n# Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.\n\nprint('Scoring model...')\n# print label and f1-score for each\navg = 'weighted'\nlabels = Y.columns.tolist()\nf1 = []\nprec = []\nrec = []\nacc = []\n#train_scores = []\nfor i in range(y_test[:, :].shape[1]):\n f1.append(f1_score(y_test[:, i], y_pred[:, i], average=avg))\n acc.append(accuracy_score(y_test[:, i], y_pred[:, i]))\n rec.append(recall_score(y_test[:, i], y_pred[:, i], average=avg))\n prec.append(precision_score(y_test[:, i], y_pred[:, i], average=avg))\n\n# summarize f1-scores and compare to the rate of positive class occurance\nf1_df = pd.DataFrame({'f1-score': np.round(f1, 4),\n 'precision': np.round(prec, 4),\n 'recall': np.round(rec, 4),\n 'accuracy': np.round(acc, 4)}, index=labels)\n\n\nprint('\\n')\nprint('='*75)\nprint(f1_df)\nprint('\\n')\nprint(f1_df.agg(['mean', 'median', 'std']))\nprint('='*75)\nprint('\\n')\n\nf1_df['f1-score'].mean()\n\n\n#%%\nprint('\\nCross-validating...\\n')\nscores = cross_val_score(\n pipeline,\n X_train.ravel(),\n y_train,\n scoring='f1_weighted',\n cv=3,\n n_jobs=N_JOBS)\nprint('\\nCross-val scores:\\n', scores)\n\n#%%\n# with open('results.txt', 'a') as file:\n# file.write('\\n\\n')\n# file.write(str(time.localtime()))\n# file.write(('-'*100))\n# file.write(str(pipeline.get_params()))\n# file.write('\\n\\n')\n# file.write(str(f1_df))\n# file.write('\\n\\n')\n# file.write(str(f1_df.agg(['mean', 'median', 'std'])))\n# file.write('\\n\\n')\n# file.write(('-'*100))\n# file.write('\\n\\n')\n\n\n# ### 6. Improve your model\n# Use grid search to find better parameters.\n\n#%%\n#import matplotlib.pyplot as plt\n#fpr = dict()\n#tpr = dict()\n#roc_auc = dict()\n#for i in range(y_test[:, :].shape[1]):\n# fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])\n# roc_auc[i] = auc(fpr[i], tpr[i])\n#\n##%%\n#plt.figure()\n#lw = 2\n#plt.plot(fpr[1], tpr[1], color='darkorange',\n# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])\n#\n#plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n#plt.xlim([0.0, 1.0])\n#plt.ylim([0.0, 1.05])\n#plt.xlabel('False Positive Rate')\n#plt.ylabel('True Positive Rate')\n#plt.title('Receiver operating characteristic example')\n#plt.legend(loc=\"lower right\")\n#plt.show()\n\n\n\n# In[70]:\n\n# GRID-SEARCH HYPERPARAMS\n\n# print('Performing GridSearch. Please be patient ...')\n# grid_params = {\n# 'clf__estimator__C': [2, 4],\n# # 'clf__estimator__n_estimators': [80, 120, 150],\n# # 'clf__estimator__class_weight': [{0: 1, 1: 500},\n# # {0: 1, 1: 300},]\n\n# }\n\n# grid_cv = GridSearchCV(\n# pipeline,\n# grid_params,\n# cv=3,\n# scoring='f1_weighted',\n# n_jobs=2,\n# )\n# grid_cv.fit(X_train.ravel(), y_train)\n\n\n# print('Using best params...')\n# print(grid_cv.best_params_)\n\n# y_pred = grid_cv.predict(X_test.ravel())\n\n# print('Scoring model using tuned params...')\n# # print label and f1-score for each\n# avg = 'weighted'\n# labels = Y.columns.tolist()\n# f1 = []\n# prec = []\n# rec = []\n# acc = []\n# #train_scores = []\n# for i in range(y_test[:, :].shape[1]):\n# f1.append(f1_score(y_test[:, i], y_pred[:, i], average=avg))\n# acc.append(accuracy_score(y_test[:, i], y_pred[:, i]))\n# rec.append(recall_score(y_test[:, i], y_pred[:, i], average=avg))\n# prec.append(precision_score(y_test[:, i], y_pred[:, i], average=avg))\n\n# # summarize f1-scores and compare to the rate of positive class occurance\n# f1_df = pd.DataFrame({'f1-score': np.round(f1, 4),\n# 'precision': np.round(prec, 4),\n# 'recall': np.round(rec, 4),\n# 'accuracy': np.round(acc, 4)}, index=labels)\n\n\n# print('\\n')\n# print('='*75)\n# print(f1_df)\n# print('\\n')\n# print(f1_df.agg(['mean', 'median', 'std']))\n# print('='*75)\n# print('\\n')\n\n\n#%%\n# ### 8. Try improving your model further. Here are a few ideas:\n# * try other machine learning algorithms\n# * add other features besides the TF-IDF\n# * sampling: upsample or downsample in order to improve balance between classes\n# * however, upsampling minority classes may also affect majority classes and result in no significant imporvement\n# * create more features\n# * search each message for keywords; use target array labels as keywords\n# * for example, search for keywords, `food`, `water`, `shelter`...\n\n# In[ ]:\n\n\n\n\n\n# ### 9. Export your model as a pickle file\n\n# In[ ]:\n\n\n\n\n\n# ### 10. Use this notebook to complete `train.py`\n# Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.\n\n# In[ ]:\n\n\n\n\n\n# # Resampling\n\n# In[ ]:\n\n\n\ndef upsample(X_train, y_train, target_col_name, sample_fraction=0.25):\n\n # combine train sets\n X_c = pd.concat([X_train, y_train], axis=1)\n # extract `success` and `fail` instances, `success` represented by 1\n fail = X_c[X_c[target_col_name] == 0]\n success = X_c[X_c[target_col_name] == 1]\n\n # upsample to match 'fail' class\n success_upsampled = resample(success,\n replace=True,\n n_samples=int(len(fail)*(sample_fraction)),\n random_state=11\n )\n # put back together resample `success` and fail\n upsample = pd.concat([fail, success_upsampled])\n # split back into X_train, y_train\n X_train = upsample['message']\n y_train = upsample.drop('message', axis=1)\n\n return X_train, y_train\n\ndef get_difference(array1, array2):\n \"\"\"Returns difference in the amount of rows between arrays\"\"\"\n return array1.shape[0] - array2.shape[0]\n\n# print('X_train size: ', X_train.shape[0])\n# print('X_test size: ', X_test.shape[0])\n# # print('X_holdout size:', X_holdout.shape[0])\n\n# # review class balance\n# final_balance = (y_train.sum() / y_train.shape[0]).sort_values()\n# bal_df = pd.DataFrame({'initial_balance': init_balance, 'final_balance': final_balance})\n# print(bal_df.sort_values(by='final_balance'))\n\n\n#%%\n# 1st Upsample\n# =============================================================================\n\nX_train, X_test, y_train, y_test = train_test_split(X,\n Y,\n test_size=0.15)\n# review class balance\ninit_balance = (y_train.sum() / y_train.shape[0]).sort_values()\nprint('Initial balance:\\n', init_balance)\nprint('')\nprint('Initial X_train shape:', X_train.shape[0])\n\n# Upsample 'fire'\n# =============================================================================\nX_train_up, y_train_up = upsample(X_train, y_train, 'fire')\n\n# review class balance\nfinal_balance = (y_train_up.sum() / y_train_up.shape[0]).sort_values()\nbal_df = pd.DataFrame({'initial_balance': init_balance, 'final_balance': final_balance})\nprint(bal_df.sort_values(by='final_balance'))\n\n# number of rows added due to resampling\nprint(get_difference(X_train_up, X_train))\n\n#%%\n# 2nd Upsample\n# =============================================================================\n# review class balance\ninit_balance = (y_train.sum() / y_train.shape[0]).sort_values()\nprint('Initial balance:', init_balance)\nprint('')\n\nX_train_up, y_train_up = upsample(X_train_up, y_train_up, 'missing_people')\n\n# review class balance\nfinal_balance = (y_train_up.sum() / y_train_up.shape[0]).sort_values()\nbal_df = pd.DataFrame({'initial_balance': init_balance, 'final_balance': final_balance})\nprint(bal_df.sort_values(by='final_balance'))\n\n# number of rows added due to resampling\nprint('Rows added after resampling:', get_difference(X_train_up, X_train))\n\n\n# Iterate over mutltiple columns to resample\n# =============================================================================\nfor col in ['fire', 'missing_people', 'clothing']:\n X_train, y_train = upsample(X_train, y_train, col)\n\nprint('Final X_train shape:', X_train.shape[0])\n(y_train.sum() / y_train.shape[0]).sort_values()\n\n\n\n#%%\n# =============================================================================\n# Downsample\n# =============================================================================\ndef downsample(X_train, y_train, target_col_name, sample_fraction=1.0):\n \"\"\"\n\n\n Parameters\n ----------\n X_train : pd.DataFrame\n Training feature space subset.\n\n y_train : pd.DataFrame\n Training target variable subset.\n\n target_col_name : str\n Target variable to resample.\n\n sample_fraction : float, optional\n Controls the number of samples being drawn from an array.\n This essentially control the magnitude of downsampling. Increasing\n this value will draw more samples from array.\n\n Returns\n -------\n X_train : pd.DataFrame\n y_train : pd.DataFrame\n\n \"\"\"\n # combine train sets\n X_c = pd.concat([X_train, y_train], axis=1)\n # extract `success` and `fail` instances, `success` represented by 1\n fail = X_c[X_c['aid_related'] == 0]\n success = X_c[X_c['aid_related'] == 1]\n\n # downsample w/replacment; number of samples = len(fail)\n # this essentially add more instances of `fail` to improve balance\n success_downsampled = resample(fail,\n replace=True,\n n_samples=int(len(success)*sample_fraction),\n random_state=11\n )\n\n # put back together resample `success` and fail\n downsample = pd.concat([success, success_downsampled])\n # split back into X_train, y_train\n X_train = downsample['message']\n y_train = downsample.drop('message', axis=1)\n\n return X_train, y_train\n\nX_train, X_test, y_train, y_test = train_test_split(X,\n Y,\n test_size=0.15)\nx_down, y_down = downsample(X_train, y_train, 'aid_related', 1.0)\n\nx_down.shape\nX_train.shape\n\n(y_down.sum() / y_down.shape[0]).sort_values()\n(y_train.sum() / y_train.shape[0]).sort_values()\n\n#%%\n\n# =============================================================================\n# Perform Upsample, then Downsample\n# =============================================================================\nX, Y, categories = load_data('data/disaster_response.db', n_sample=0)\n\nX_train, X_test, y_train, y_test = train_test_split(X,\n Y,\n test_size=0.15)\n\n# review class balance\ninit_balance = (y_train.sum() / y_train.shape[0]).sort_values()\nprint('Initial balance:\\n', init_balance)\nprint('')\nprint('Initial X_train shape:', X_train.shape[0])\nprint('-'*75)\n\n#### Upsample more important features\nfor col in ['food', 'clothing', 'hospitals']:\n X_train, y_train = upsample(X_train, y_train, col, 0.7)\n\n#### Downsample\nX_train, y_train = downsample(X_train, y_train, 'aid_related', 0.5)\n\n# review class balance\nfinal_balance = (y_train.sum() / y_train.shape[0]).sort_values()\n\nbal_df = pd.DataFrame({'initial_balance': init_balance,\n 'final_balance': final_balance})\n\nbal_df['improved'] = np.where(bal_df['final_balance'] > bal_df['initial_balance'], 1, 0)\nbal_df['diff_prc'] = (bal_df['final_balance'] - bal_df['initial_balance'])*100\n\nprint(bal_df.sort_values(by='final_balance'))\n\nprint(\"Final shape:\", X_train.shape)\nprint(\"Median Improvement:\", bal_df['diff_prc'].median())\n\n\"\"\"\nNOTE:\n Resampling increases the amount of data significantly.\n However, there is improvement in the balance between classes.\n\"\"\"\n\n# In[ ]:\n\n\n# from sklearn.utils.class_weight import compute_class_weight\n# from sklearn.utils import resample\n# class_weights = compute_class_weight('balanced', np.unique(Y), Y.iloc[:, 2])\n\n\n# # Add more features\n\n# In[ ]:\n\n\n\n\n","repo_name":"sergatron/deploy-disaster-response","sub_path":"models/ML_prep_clean.py","file_name":"ML_prep_clean.py","file_ext":"py","file_size_in_byte":25414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"32768249545","text":"from kubernetes import client\n\n\nclass Resources:\n def __init__(self, region, context, cluster, namespaces):\n self.region = region\n self.context = context\n self.cluster = cluster\n self.namespaces = namespaces\n\n def set_resources(self):\n self.cluster_roles = (\n client.RbacAuthorizationV1Api().list_cluster_role().items\n )\n self.cluster_role_bindings = (\n client.RbacAuthorizationV1Api().list_cluster_role_binding().items\n )\n self.resource_quotas = (\n client.CoreV1Api().list_resource_quota_for_all_namespaces().items\n )\n self.network_policies = (\n client.NetworkingV1Api()\n .list_network_policy_for_all_namespaces()\n .items\n )\n self.storage_classes = client.StorageV1Api().list_storage_class().items\n self.persistent_volumes = (\n client.CoreV1Api().list_persistent_volume().items\n )\n\n\nclass NamespacedResources:\n def __init__(self, region, context, cluster, namespace):\n self.namespace = namespace\n self.region = region\n self.cluster = cluster\n self.context = context\n\n def set_resources(self):\n self.roles = (\n client.RbacAuthorizationV1Api()\n .list_namespaced_role(self.namespace)\n .items\n )\n self.pods = (\n client.CoreV1Api().list_namespaced_pod(self.namespace).items\n )\n self.role_bindings = (\n client.RbacAuthorizationV1Api()\n .list_namespaced_role_binding(self.namespace)\n .items\n )\n self.deployments = (\n client.AppsV1Api().list_namespaced_deployment(self.namespace).items\n )\n self.daemon_sets = (\n client.AppsV1Api().list_namespaced_daemon_set(self.namespace).items\n )\n self.stateful_sets = (\n client.AppsV1Api()\n .list_namespaced_stateful_set(self.namespace)\n .items\n )\n self.services = (\n client.CoreV1Api().list_namespaced_service(self.namespace).items\n )\n self.hpas = (\n client.AutoscalingV1Api()\n .list_namespaced_horizontal_pod_autoscaler(self.namespace)\n .items\n )\n","repo_name":"aws-samples/hardeneks","sub_path":"hardeneks/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":770,"dataset":"github-code","pt":"48"}
+{"seq_id":"42592040781","text":"\"\"\"\nDjango settings for AlumniConnect project.\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.humanize',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_cleanup',\n 'anymail',\n 'easy_thumbnails',\n 'imagekit',\n 'crispy_forms',\n 'applications.alumniprofile',\n 'applications.awards',\n 'applications.blog',\n 'applications.events_news',\n 'applications.job_posting',\n 'applications.adminportal',\n 'applications.members',\n 'applications.news',\n 'applications.geolocation',\n 'applications.publications',\n 'applications.gallery',\n 'applications.chapter',\n 'ckeditor',\n 'ckeditor_uploader',\n 'tempus_dominus'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'AlumniConnect.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['/', os.path.join(BASE_DIR, '..', 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.template.context_processors.media',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'AlumniConnect.wsgi.application'\n\n# Database\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, '..', 'db.sqlite3'),\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Asia/Kolkata'\n\nUSE_I18N = True\n\nUSE_L10N = False\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media/')\nMEDIA_URL = '/media/'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nLOGIN_URL = 'login'\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\nCKEDITOR_JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/2.2.4/jquery.min.js'\n\nCKEDITOR_UPLOAD_PATH = 'uploads/'\nCKEDITOR_IMAGE_BACKEND = \"pillow\"\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'toolbar': None,\n 'extraplugins': ['table'],\n 'width': '100%'\n }\n}\n\nTEMPUS_DOMINUS_LOCALIZE = True\n\n# CELERY STUFF\nBROKER_URL = 'redis://localhost:6379'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379'\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIMEZONE = 'Asia/Kolkata'\nPASSWORD_RESET_TIMEOUT_DAYS = 1\nANYMAIL = {\n # (exact settings here depend on your ESP...)\n \"MAILJET_API_KEY\": os.environ.get(\"MJ_APIKEY_PUBLIC\", \"\"),\n \"MAILJET_SECRET_KEY\": os.environ.get(\"MJ_APIKEY_PRIVATE\", \"\"), # your Mailgun domain, if needed\n\n}\nMAILJET_API_URL = \"https://api.mailjet.com/v3.1\"\nEMAIL_BACKEND = \"anymail.backends.mailjet.EmailBackend\" # or sendgrid.EmailBackend, or...\nDEFAULT_FROM_EMAIL = \"Alumni Cell IIITDMJ \" # if you don't already have this in settings\nSERVER_EMAIL = os.environ.get(\"MJ_SENDER_EMAIL\", \"\") # ditto (default from-email for Django errors)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder'\n)\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake',\n }\n}\n","repo_name":"Student-Alumni-Connect/alumni","sub_path":"AlumniConnect/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"}
+{"seq_id":"28795859446","text":"import functools\nimport re\n\n\ndef get_input() -> list[str]:\n with open(\"input\") as f:\n return f.readlines()\n\n\ndef combine_value_with_bitmask(value: int, mask: str) -> int:\n bitwise_value = f\"{value:036b}\"\n\n new_bitwise_value_list = []\n for value_bit, mask_bit in zip(bitwise_value, mask):\n bit = value_bit if mask_bit == \"X\" else mask_bit\n new_bitwise_value_list.append(bit)\n\n new_bitwise_value = \"\".join(new_bitwise_value_list)\n\n print(bitwise_value)\n print(mask)\n print(new_bitwise_value)\n\n return int(new_bitwise_value, 2)\n\n\ndef handle_instruction(state: dict, instruction: str) -> dict:\n print(state)\n if \"mask\" in instruction:\n match = re.search(r\"mask = (\\w+)\", instruction)\n if not match:\n raise RuntimeError(f\"Mem instruction should match {instruction}\")\n\n new_mask = match.group(1)\n return {\"memory\": state[\"memory\"], \"mask\": new_mask}\n\n if \"mem\" in instruction:\n match = re.search(r\"mem\\[(\\d+)\\] = (\\d+)\", instruction)\n if not match:\n raise RuntimeError(f\"Mem instruction should match {instruction}\")\n\n memory_index = int(match.group(1))\n memory_value = int(match.group(2))\n new_memory = state[\"memory\"]\n new_memory[memory_index] = combine_value_with_bitmask(\n memory_value, state[\"mask\"]\n )\n return {\"memory\": new_memory, \"mask\": state[\"mask\"]}\n\n raise RuntimeError(f\"Me no understand instruction {instruction}\")\n\n\ndef main() -> int:\n instructions = get_input()\n\n # Initialize memory.\n state: dict = {\"memory\": {}, \"mask\": 0}\n\n final_state = functools.reduce(handle_instruction, instructions, state)\n\n return sum(final_state[\"memory\"].values())\n\n\nprint(main())\n","repo_name":"steve148/advent-of-code","sub_path":"2020/14/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10178366604","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name='dvtDecimal',\n version='1.4.0',\n description='Repeating digits of rational numbers',\n long_description_content_type='text/markdown',\n long_description=long_description,\n url='https://twitter.com/david_cobac',\n author='David COBAC',\n author_email='david.cobac@gmail.com',\n keywords=['rational',\n 'numbers',\n 'fraction',\n 'decimal',\n 'nombres',\n 'décimaux'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n license='CC-BY-NC-SA',\n packages=find_packages()\n)\n","repo_name":"cobacdavid/dvtDecimal","sub_path":"dvtDecimal/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"110256189","text":"import pytest\ntry:\n from rmad.expressions import Symbol, Number, \\\n Add, Sub, Mul, Div, Pow\nexcept ImportError:\n pass\n\n\ndef test_imports():\n from rmad.expressions import Symbol, Number, \\\n Add, Sub, Mul, Div, Pow # NoQA F401\n\n\n@pytest.fixture\ndef sample_operand_expr():\n o1 = Symbol('x')\n o2 = Symbol('y')\n o3 = Number(42)\n o4 = Number(1)\n return o1, o2, o3, o4\n\n\n@pytest.mark.parametrize(\"a1, a2, expr\", [\n (0, 1, \"x + y\"),\n (2, 1, \"42 + y\"),\n (2, 3, \"42 + 1\"),\n])\ndef test_add(a1, a2, expr, sample_operand_expr):\n x, y = sample_operand_expr[a1], sample_operand_expr[a2]\n assert str(Add(x, y)) == expr, \\\n f\"expected string representation of {expr} but got {str(Add(x, y))}\"\n\n\n@pytest.mark.parametrize(\"a1, a2, expr\", [\n (0, 1, \"x - y\"),\n (2, 1, \"42 - y\"),\n (2, 3, \"42 - 1\"),\n])\ndef test_sub(a1, a2, expr, sample_operand_expr):\n x, y = sample_operand_expr[a1], sample_operand_expr[a2]\n assert str(Sub(x, y)) == expr, \\\n f\"expected string representation of {expr} but got {str(Sub(x, y))}\"\n\n\n@pytest.mark.parametrize(\"a1, a2, expr\", [\n (0, 1, \"x * y\"),\n (2, 1, \"42 * y\"),\n (2, 3, \"42 * 1\"),\n])\ndef test_mul(a1, a2, expr, sample_operand_expr):\n x, y = sample_operand_expr[a1], sample_operand_expr[a2]\n assert str(Mul(x, y)) == expr, \\\n f\"expected string representation of {expr} but got {str(Mul(x, y))}\"\n\n\n@pytest.mark.parametrize(\"a1, a2, expr\", [\n (0, 1, \"x / y\"),\n (2, 1, \"42 / y\"),\n (2, 3, \"42 / 1\"),\n])\ndef test_div(a1, a2, expr, sample_operand_expr):\n x, y = sample_operand_expr[a1], sample_operand_expr[a2]\n assert str(Div(x, y)) == expr, \\\n f\"expected string representation of {expr} but got {str(Div(x, y))}\"\n\n\n@pytest.mark.parametrize(\"a1, a2, expr\", [\n (0, 1, \"x ^ y\"),\n (2, 1, \"42 ^ y\"),\n (2, 3, \"42 ^ 1\"),\n])\ndef test_pow(a1, a2, expr, sample_operand_expr):\n x, y = sample_operand_expr[a1], sample_operand_expr[a2]\n assert str(Pow(x, y)) == expr, \\\n f\"expected string representation of {expr} but got {str(Pow(x, y))}\"\n\n\n@pytest.fixture\ndef sample_string_set():\n x = Symbol('x')\n y = Symbol('y')\n tests = [(x + 1)**(y*x**3) + y**2*x*(2 / y),\n (1/x + 1/y)**2 + (1 + 2*x),\n 4*(x/y)**(0.5)\n ]\n return tests\n\n\n@pytest.mark.parametrize(\"idx, string\", [\n (0, '(x + 1) ^ (y * x ^ 3) + y ^ 2 * x * 2 / y'),\n (1, '(1 / x + 1 / y) ^ 2 + 1 + 2 * x'),\n (2, '4 * (x / y) ^ 0.5')\n])\ndef test_str_rep(sample_string_set, idx, string):\n expr = sample_string_set[idx]\n assert str(expr) == string, \\\n f\"expected string representation of {string} but got {str(expr)}\"\n\n\n@pytest.fixture\ndef sample_expr_set():\n x = Symbol('x')\n y = Symbol('y')\n tests = [(3 * x + 2**(y / 5) - 1, 1.5, 10, 7.5),\n (3 * x + 2**(y / 5) - 1, 2.5, 11, 11.09479341998814),\n (4 * x + x**2 * y + 3 * y + 2, 1.0, 2.5, 16),\n (4 * x + x**2 * y + 3 * y + 2, 1.1, 2.25, 15.8725)\n ]\n return tests\n\n\n@pytest.mark.parametrize(\"idx\", [\n (0),\n (1),\n (2),\n (3)\n])\ndef test_any_evaluate(sample_expr_set, idx):\n from tests.expression_tools import postvisitor, evaluate\n expr, x, y, val = sample_expr_set[idx]\n assert postvisitor(expr, evaluate, symbol_map={'x': x, 'y': y}) == val, \\\n f\"expected an evaluation of {val} for expression {expr}\"\n","repo_name":"callumfirth/M2R-RMAD","sub_path":"tests/test_evaluate.py","file_name":"test_evaluate.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"11211894197","text":"\nfrom flask import Flask, render_template, request,url_for\nimport test2\nimport json\n\napp=Flask(__name__)\n\n#index route starter\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/handle_data', methods=['POST'])\ndef handle_data():\n mid_state=request.form['front_state']\n mid_zip=request.form['front_zip']\n action = request.form.get('action')\n \n json_coords= json.loads(request.form['clickedLocation'])\n lat = json_coords['lat']#json likes to swap order so don't be startled about why they get swapped in the return\n lng = json_coords['lng']\n mid_node=test2.Node((lat,lng),0)\n print(f'Front end data received in flask route:{mid_state},{mid_zip},{mid_node}')\n\n if action=='bubble':\n sorting_algorithm='bubble'\n elif action=='merge':\n sorting_algorithm='merge'\n elif action=='selection':\n sorting_algorithm='selection'\n\n back_station=test2.get_stations(mid_node,mid_state,mid_zip,sorting_algorithm)\n\n #test2.get_stations returns fire station object\n #parameters available for fire station object:(self, name, geometry, zip_code, city, state, address, global_id, distance)\n #use back_station.parameter in the return\n #example return statement: return render_template('index.html',state=mid_state, zip_code=mid_zip, coords=back_station.name)#for testing\n back_lat=(back_station.geometry[0])\n back_lng=(back_station.geometry[1])\n return render_template('index.html',return_start_lng=lat, return_start_lat=lng,return_name=back_station.name, return_lat=back_lat, return_lng=back_lng, \n return_zip=back_station.zip_code, return_city=back_station.city, return_state=back_station.state, \n return_address=back_station.address, return_global_id=back_station.global_id, \n return_distance=back_station.distance)#the return viariables are set in the html, make sure they match here\n\nif __name__=='__main__':\n app.run(debug=True)","repo_name":"ReneLisasi/DRO_basic","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"17465106006","text":"import itertools as it\n\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom crystals import Atom, Crystal\nfrom matplotlib.ticker import FixedFormatter, FixedLocator\nfrom skued import electron_wavelength, indices_to_text, lorentzian\n\nfrom dissutils import LARGE_FIGURE_WIDTH, ImageGrid, named_arrow\n\nEWALD_RADIUS = 2 * np.pi / electron_wavelength(keV=90)\nEWALD_RADIUS_XRAY = 2 * np.pi / 0.95 # 13 keV x-rays\n\nELECTRONS_COLOR = \"k\"\nXRAY_COLOR = \"indigo\"\n\n# Abstract simple cubic crystal with 3Angs sides\nCRYSTAL = Crystal(unitcell=[Atom(\"C\", (0, 0, 0))], lattice_vectors=5 * np.eye(3))\n\nfig = plt.figure(figsize=(LARGE_FIGURE_WIDTH, LARGE_FIGURE_WIDTH / 1.5))\n(ax,) = ImageGrid(fig, rect=111, nrows_ncols=(1, 1), cbar_location=\"top\")\n\nky, kz = np.meshgrid(np.linspace(-6, 6, 256), np.linspace(-4, 7, 256))\nim = np.zeros_like(ky)\n\nfor k, l in it.product([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], repeat=2):\n _, qy, qz = CRYSTAL.scattering_vector((0, k, l))\n im += lorentzian(coordinates=[ky, kz], center=[qy, qz], fwhm=0.15)\n\nm = ax.imshow(\n im,\n vmin=0,\n cmap=\"CMRmap_r\",\n extent=[ky.min(), ky.max(), kz.max(), kz.min()],\n)\ncbar = ax.cax.colorbar(\n m, ticks=FixedLocator(locs=[0, im.max()]), format=FixedFormatter([\"0\", \"1\"])\n)\nax.cax.set_xlabel(\"$|\\\\tilde{V}(\\mathbf{q})|$ [a.u.]\")\nax.cax.xaxis.set_label_position(\"top\")\nax.cax.xaxis.tick_top()\n\n# Ewald spheres\nax.add_patch(\n mpatches.Circle(\n xy=(0, EWALD_RADIUS),\n radius=EWALD_RADIUS,\n fc=\"none\",\n ec=ELECTRONS_COLOR,\n )\n)\nax.add_patch(\n mpatches.Circle(\n xy=(0, EWALD_RADIUS_XRAY),\n radius=EWALD_RADIUS_XRAY,\n fc=\"none\",\n ec=XRAY_COLOR,\n linestyle=\"dashed\",\n )\n)\n\nfor (h, k, l) in [(0, 0, 0)]:\n ax.annotate(\n xy=(k, l),\n ha=\"center\",\n va=\"bottom\",\n text=indices_to_text(h, k, l),\n xytext=(k, l + 0.2),\n )\n\n# Lattice vectors\n_, _y, _z = CRYSTAL.scattering_vector((0, -4, -2))\narrow_kwds = dict(\n x=_y, y=_z, length_includes_head=True, width=0.001, head_width=0.1, fc=\"k\"\n)\n\nnamed_arrow(\n ax,\n dx=np.linalg.norm(CRYSTAL.reciprocal_vectors[1]),\n dy=0,\n text=r\"$\\mathbf{b}_2$\",\n toffset=(0, -0.1),\n tkwds=dict(va=\"top\", ha=\"center\"),\n **arrow_kwds\n)\nnamed_arrow(\n ax,\n dx=0,\n dy=np.linalg.norm(CRYSTAL.reciprocal_vectors[2]),\n text=r\"$\\mathbf{b}_3$\",\n toffset=(-0.1, 0),\n tkwds=dict(va=\"center\", ha=\"right\"),\n **arrow_kwds\n)\n\n\nelectron_handle = mlines.Line2D(\n [],\n [],\n color=ELECTRONS_COLOR,\n marker=None,\n linestyle=\"solid\",\n label=\"Electrons (90 keV)\",\n)\nxray_handle = mlines.Line2D(\n [], [], color=XRAY_COLOR, marker=None, linestyle=\"dashed\", label=\"X-rays (13 keV)\"\n)\n\nfig.legend(\n handles=[electron_handle, xray_handle],\n loc=\"center\",\n ncol=2,\n bbox_to_anchor=(0.5, 0.05),\n edgecolor=\"none\",\n)\n\nax.set_xlim([ky.min(), ky.max()])\nax.set_ylim([-2.7, 4.8])\nax.axis(\"off\")\n","repo_name":"LaurentRDC/dissertation","sub_path":"figures/scattering/ewald.py","file_name":"ewald.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"6766354466","text":"\"\"\"\nUtilities for coloring\n\nHere, take this:\n\nPYTHONMALLOC=malloc valgrind --tool=memcheck --error-limit=no python ...\n\"\"\"\n\nfrom numba import jit, int32, uint32, uint64, void, float64, boolean, prange\nimport numpy as np\nimport scipy.sparse as sps\n\n@jit(\n void(float64[:], int32[:], uint32[:]),\n nopython=True\n)\ndef _uniques_and_counts_compiled(\n data, indptr, counts):\n for i, (start, stop) in enumerate(zip(indptr, indptr[1:])):\n data[start:stop].sort()\n\n unique_ix = start\n for scan_ix in range(start + 1, stop):\n if data[unique_ix] == data[scan_ix]:\n continue\n unique_ix += 1\n data[unique_ix] = data[scan_ix]\n\n counts[i] = min(stop, 1 + unique_ix) - start\n\ndef get_uniques_and_counts(X):\n \"\"\"\n Accepts a CSR or CSC sparse matrix over float64.\n\n Returns (uniques, uniques_offsets, nunique), where\n uniques[uniques_offsets[i]:unique_offsets[i]+nunique[i]]\n contains the sorted, unique values for the i-th\n row (for CSR) or column (for CSC).\n \"\"\"\n assert sps.issparse(X), type(X)\n assert X.getformat() in ['csc', 'csr']\n assert X.dtype == np.float64\n\n n = len(X.indptr) - 1\n uniques = X.data.copy()\n nunique = np.zeros(n, np.uint32)\n _uniques_and_counts_compiled(uniques, X.indptr, nunique)\n offsets = X.indptr[:-1]\n return uniques, offsets, nunique\n\n@jit(\n void(\n float64[:], int32[:], uint32[:],\n float64[:], int32[:], uint32[:]),\n nopython=True\n)\ndef _remap_floats_compiled(\n data, indptr, data_out,\n uniques, offsets, nuniques):\n for i, (start, stop, ustart, ulen) in enumerate(zip(\n indptr, indptr[1:], offsets, nuniques)):\n ustop = ustart + ulen\n data_out[start:stop] = 1 + np.searchsorted(uniques[ustart:ustop], data[start:stop])\n\ndef remap_floats(X, uniques, offsets, nunique):\n \"\"\"\n Accepts a CSR or CSC sparse matrix X over float64, and\n its get_uniques_and_counts(X) output.\n\n Remaps the i-th smallest float in each row (for CSR) or\n column (for CSC) to the value i, returning a corresponding\n sparse matrix over uint32, starting the indexing from\n 1 (since 0 is our sparse fill value).\n \"\"\"\n assert sps.issparse(X), type(X)\n assert X.getformat() in ['csc', 'csr']\n assert X.dtype == np.float64\n\n data = np.empty(len(X.data), np.uint32)\n _remap_floats_compiled(\n X.data, X.indptr, data,\n uniques, offsets, nunique\n )\n\n constructor = sps.csc_matrix if X.getformat() else sps.csr_matrix\n return constructor((data, X.indices, X.indptr))\n\nfrom create_edgeset import uniquify, sort4, create_edgeset_u64 as create_edgeset_u64\n\ndef onehot(Xcategorical_csc_remapped, nunique):\n # accepts CSC remapped values (contiguous ints in each column)\n # return CSR, CSC for onehot.\n nnzc = np.diff(Xcategorical_csc_remapped.indptr)\n\n # Xcategorical - csc u32 sparse matrix with contiguous categorical values\n # nunique - number unique vals in each col, excl 0\n # nnzc - number nonzeros in each col\n col_count_base = np.roll(np.cumsum(nunique, dtype=np.uint32), 1)\n col_count_base[0] = 0\n\n # column i can be recoded to have integers\n # from col_count_base[i] + 1 (incl) to\n # col_count_base[i] + nunique[i] + 1 (excl)\n X = Xcategorical_csc_remapped\n data = X.data + np.repeat(col_count_base, nnzc).astype(np.uint32)\n\n # now that every categorical value is unique,\n # make set the indices to the data values (one-hotting, essentially)\n assert data.max() < 2 ** 31\n data = data.astype(np.int32)\n\n # uniqued original matrix, where value = its target column in binary form\n X = sps.csc_matrix((data, X.indices, X.indptr), shape=X.shape)\n X = X.tocsr()\n\n new_data = np.ones_like(X.data, dtype=bool)\n new_indices = X.data\n\n nrows = X.shape[0]\n ncols = col_count_base[-1] + nunique[-1] + 1\n Xbinary = sps.csr_matrix((new_data, new_indices, X.indptr), shape=(nrows, ncols))\n # creates a useless dummy column 0 with no hot values\n return Xbinary\n\n# create an inline adjacency list\n\n@jit(uint32(uint64), nopython=True)\ndef left(x):\n return x >> 32\n\n@jit(uint32(uint64), nopython=True)\ndef right(x):\n return x & (2 ** 32 - 1)\n\n@jit(void(uint64[:], uint32[:]), nopython=True)\ndef count_degree(edges, degree):\n for e64 in edges:\n l, r = left(e64), right(e64)\n degree[l] += 1\n degree[r] += 1\n\n@jit(void(uint64[:], uint32[::1], uint64[:], uint64[:]), nopython=True, parallel=True)\ndef fill_edges(edges, bidir_edges, start_offsets, start_offsets_immutable):\n for e64 in edges:\n l, r = left(e64), right(e64)\n bidir_edges[start_offsets[l]] = r\n bidir_edges[start_offsets[r]] = l\n start_offsets[l] += 1\n start_offsets[r] += 1\n\n noffsets = len(start_offsets_immutable) - 1\n for i in prange(noffsets):\n start = start_offsets_immutable[i]\n stop = start_offsets_immutable[i + 1]\n sort4(bidir_edges, start, stop - start)\n\nu32max = np.iinfo(np.uint32).max\n@jit(\n uint32(uint32[:], uint32[:], uint64[:],\n uint32[:], boolean[:]),\n nopython=True\n)\ndef _color_graph_compiled(\n vertex_order, adjacency, vertex_offsets,\n color_map, adjacent_colors):\n ncolors = 0\n for v in vertex_order:\n vstart, vend = vertex_offsets[v], vertex_offsets[v + 1]\n for n in adjacency[vstart:vend]:\n if color_map[n] != u32max:\n adjacent_colors[color_map[n]] = True\n\n color = ncolors\n for i in range(ncolors):\n if not adjacent_colors[i]:\n color = i\n break\n\n ncolors = max(color + 1, ncolors)\n color_map[v] = color\n\n if vend - vstart > ncolors:\n adjacent_colors[:ncolors] = False\n else:\n for n in adjacency[vstart:vend]:\n if color_map[n] != u32max:\n adjacent_colors[color_map[n]] = False\n return ncolors\n\ndef color_graph(degree, bidir_edges, vertex_offsets, color_ub=2 ** 16):\n nverts = len(degree)\n smallest_first = np.argsort(degree).astype(np.uint32)\n largest_first = smallest_first[::-1]\n\n color_map = np.full(int(nverts), u32max, dtype=np.uint32)\n # will segfault if you have >2**10 colors\n adjacent_colors = np.zeros(color_ub, dtype=bool)\n\n ncolors = _color_graph_compiled(\n largest_first, bidir_edges, vertex_offsets,\n color_map, adjacent_colors)\n\n return ncolors, color_map\n\n@jit(void(uint32[:], uint32[:, ::1], uint32[:]), nopython=True)\ndef _color_remap_compiled(\n remap_map,\n color_coded_T,\n color_cards):\n ncolors, nrows = color_coded_T.shape\n for col in range(ncolors):\n column = color_coded_T[col]\n sort4(color_coded_T.ravel(), col * nrows, nrows)\n ucol = uniquify(column)\n color_cards[col] = ucol - 1\n for i, c in enumerate(column[:ucol]):\n remap_map[c] = i\n\n\ndef color_remap(Xbinary_csr, ncolors, color_map, nnzr):\n nverts = Xbinary_csr.shape[1]\n nrows = Xbinary_csr.shape[0]\n color_coded = np.zeros((nrows, ncolors), dtype=np.uint32)\n color_coded_T = np.zeros((ncolors, nrows), dtype=np.uint32)\n\n row_ix = np.repeat(np.arange(0, nrows, dtype=np.uint32), nnzr)\n active_columns = Xbinary_csr.indices\n colors = color_map[active_columns]\n color_coded[row_ix, colors] = active_columns\n color_coded_T[colors, row_ix] = active_columns\n\n # convert unique factors back into compact intervals\n remap_map = np.zeros(int(nverts), np.uint32)\n color_cards = np.zeros(ncolors, np.uint32)\n _color_remap_compiled(\n remap_map,\n color_coded_T,\n color_cards)\n\n Xcategorical_color = np.take(remap_map, color_coded)\n\n return Xcategorical_color, color_cards\n\nimport time\nfrom contextlib import contextmanager\nimport sys\n\nclass _timeit:\n def __init__(self):\n self.seconds = 0\n\n def set_seconds(self, x):\n self.seconds = x\n\n@contextmanager\ndef timeit(name=None, name_pad=32):\n if name:\n print(('{:>' + str(name_pad) + '}').format(name), end='')\n sys.stdout.flush()\n x = _timeit()\n t = time.time()\n yield x\n x.set_seconds(time.time() - t)\n if name:\n print(\" ...took {:10.2f} sec \".format(x.seconds))\n\n\nfrom joblib import Memory\nmemory = Memory('urls.coloring.cache')\n\nfrom sklearn.datasets import load_svmlight_file\n\ndef read_svmlight(filename):\n X, y = load_svmlight_file('url_svmlight/' + filename)\n assert X.shape[0] == len(y)\n return X, y\n\nimport os\ndatafiles = [f for f in os.listdir('url_svmlight') if f.startswith('Day') and f.endswith('.svm')]\ndatafiles = list(sorted((f for f in datafiles), key=lambda x: int(x[len('Day'):x.index('.svm')])))\n\nfrom multiprocessing import Pool, cpu_count\n\n@memory.cache\ndef read_all_svmlight():\n Xs, ys = [], []\n nrows = 0\n ncols = 0\n with Pool(cpu_count()) as p:\n for X, y in p.map(read_svmlight, datafiles):\n nrows += len(y)\n ncols = max(ncols, X.shape[1])\n Xs.append(X)\n ys.append(y)\n return Xs, ys, nrows, ncols\n\ndef pad_columns(Xs, ncols):\n # TODO: csc-specialized version of this should be really fast\n for i in range(len(Xs)):\n r, c = Xs[i].shape\n if ncols > c:\n Xs[i] = sps.hstack([Xs[i], sps.lil_matrix((r, ncols - c), dtype=Xs[i].dtype)], 'csc')\n\n@memory.cache\ndef extract_continuous():\n continuous_feature_ixs = []\n with open('url_svmlight/FeatureTypes') as f:\n for line in f:\n continuous_feature_ixs.append(int(line))\n\n return continuous_feature_ixs\n\n@memory.cache\ndef extract_sparse():\n with timeit('load all svmlight files'):\n Xs, ys, nrows, ncols = read_all_svmlight()\n\n with timeit('pad columns'):\n pad_columns(Xs, ncols)\n\n with timeit('gather feature type ixs'):\n continuous_feature_ixs = extract_continuous()\n cat_feature_ixs = [i for i in range(Xs[0].shape[1]) if i not in set(continuous_feature_ixs)]\n\n with timeit('extract continuous'):\n Xcontinuous = np.concatenate([X[:, continuous_feature_ixs].todense() for X in Xs])\n\n with timeit('extract categorical'):\n # TODO: csc-specialized version of this should be really fast\n Xcategorical_csc = sps.vstack([X[:, cat_feature_ixs] for X in Xs], 'csc')\n\n return Xcontinuous, Xcategorical_csc, ys, nrows, ncols\n\n@memory.cache\ndef get_all_data():\n Xcontinuous, Xcategorical_csc, ys, nrows, ncols = extract_sparse()\n\n with timeit('cat label'):\n y = np.concatenate(ys) == 1\n y = y.astype(float)\n\n with timeit('unique column values'):\n uniques, offsets, nunique = get_uniques_and_counts(Xcategorical_csc)\n\n with timeit('remap categorical floats'):\n Xcategorical_csc = remap_floats(Xcategorical_csc, uniques, offsets, nunique)\n # coloring works just fine with categorical input, since you can create\n # a new vertex for categorical values\n with timeit('onehot'):\n Xbinary_csr = onehot(Xcategorical_csc, nunique)\n \n with timeit('csc'):\n Xcsc = Xbinary_csr.tocsc()\n\n ncols = Xbinary_csr.shape[1] + Xcontinuous.shape[1]\n\n return Xcontinuous, Xbinary_csr, Xcsc, y, nrows, ncols\n\n# messing with env for C lib parallelism here...\n# can't use this with other jobs with diff nthreads\ndef set_parallelism(nthreads):\n if 'NUMBA_NUM_THREADS' in os.environ:\n assert os.environ['NUMBA_NUM_THREADS'] == str(nthreads), \"once set, can't override\"\n else:\n os.environ['NUMBA_NUM_THREADS'] = str(nthreads)\n if 'OMP_NUM_THREADS' in os.environ:\n assert os.environ['OMP_NUM_THREADS'] == str(nthreads), \"once set, can't override\"\n else:\n os.environ['OMP_NUM_THREADS'] = str(nthreads)\n\ndef sums_to_means_precondition(offsets, means, counts):\n assert np.all(0 <= offsets)\n assert np.all(offsets <= len(means))\n assert np.all(offsets[-1] == len(means))\n assert len(means) == len(counts)\n\n@jit(void(uint32[:], float64[:], uint32[:]), nopython=True)\ndef sums_to_means(offsets, means, counts):\n for start, stop in zip(offsets, offsets[1:]):\n net_sum = means[start:stop].sum()\n net_count = counts[start:stop].sum()\n for i in range(start, stop):\n if counts[i]:\n means[i] = means[i] / counts[i]\n else:\n means[i] = net_sum / net_count if net_count else 0\n\ndef fit_target_encode_csc_precondition(\n indptr, data, indices, y,\n offsets, counts, means):\n sums_to_means_precondition(offsets, means, counts)\n assert len(indptr) == len(offsets)\n assert indptr[-1] == len(data)\n\n for col, (start, stop) in enumerate(zip(indptr, indptr[1:])):\n card = offsets[col + 1] - offsets[col]\n assert card >= 0\n assert np.all(0 <= data[start:stop] -1)\n assert np.all(data[start:stop] - 1 < card)\n assert np.all(0 <= offsets[col] + data[start:stop] - 1)\n assert np.all(offsets[col] + data[start:stop] - 1 < len(means))\n assert np.all(0 <= indices)\n assert np.all(indices < len(y))\n\n@jit(void(int32[:], uint32[:], int32[:], float64[:],\n uint32[:], uint32[:], float64[:]),\n nopython=True)\ndef fit_target_encode_csc(\n indptr, data, indices, y,\n offsets, counts, means):\n for col, (start, stop) in enumerate(zip(indptr, indptr[1:])):\n for nnz_ix in range(start, stop):\n value_ix = offsets[col] + data[nnz_ix] - 1\n means[value_ix] += y[indices[nnz_ix]]\n counts[value_ix] += 1\n\n sums_to_means(offsets, means, counts)\n\ndef fit_target_encode_dense_precondition(\n Xcat, y,\n offsets, counts, means):\n sums_to_means_precondition(offsets, means, counts)\n nrows, ncols = Xcat.shape\n assert ncols + 1 == len(offsets)\n\n for col in range(ncols):\n card = offsets[col + 1] - offsets[col]\n assert card > 0\n assert np.all(0 <= Xcat[:, col] - 1)\n assert np.all(Xcat[:, col] - 1 < card)\n assert np.all(0 <= offsets[col] + Xcat[:, col] - 1)\n assert np.all(offsets[col] + Xcat[:, col] - 1 < len(means))\n\n@jit(void(uint32[:, :], float64[:],\n uint32[:], uint32[:], float64[:]),\n nopython=True)\ndef fit_target_encode_dense(\n Xcat, y,\n offsets, counts, means):\n nrows, ncols = Xcat.shape\n for row in range(nrows):\n # TODO invert, then vectorize this loop\n for col in range(ncols):\n value_ix = offsets[col] + Xcat[row, col] - 1\n means[value_ix] += y[row]\n counts[value_ix] += 1\n\n sums_to_means(offsets, means, counts)\n\n@jit(void(int32[:], uint32[:],\n uint32[:], float64[:],\n float64[:]),\n nopython=True)\ndef transform_target_encode_csc(\n indptr, data,\n offsets, means,\n data_out):\n for col, (start, stop) in enumerate(zip(indptr, indptr[1:])):\n data_out[start:stop] = means[offsets[col] + data[start:stop] - 1]\n\n@jit(void(uint32[:, :],\n uint32[:], float64[:],\n float64[:, :]),\n nopython=True)\ndef transform_target_encode_dense(\n Xcat,\n offsets, means,\n data_out):\n nrows, ncols = Xcat.shape\n for row in range(nrows):\n # TODO: invert, then vectorize this loop\n for col in range(ncols):\n value_ix = offsets[col] + Xcat[row, col] - 1\n data_out[row, col] = means[value_ix]\n\nclass TargetEncoder:\n \"\"\"\n Should be initialized with\n\n cards - cardinalities for categorical columns, in order,\n excluding zeros from cardinality\n is_sparse - whether to expect sparse categorical inputs or dense ones\n\n It's OK to know this cardinality info ahead of time since\n values unseen in training are filled with the average\n target value from the training set.\n \"\"\"\n\n def __init__(self, *, cards, is_sparse, debug):\n self.cards = cards.astype(np.uint32)\n self.is_sparse = is_sparse\n self.debug = debug\n\n # means with imputation values for non-zero entries\n self.means = np.zeros(np.sum(cards), np.float64)\n self.counts = np.zeros(len(self.means), np.uint32)\n self.offsets = np.cumsum(np.insert(cards, 0, 0), dtype=np.uint32)\n\n def check_sparse(self, Xcat):\n assert self.is_sparse == sps.issparse(Xcat), (self.is_sparse, type(Xcat))\n if self.is_sparse:\n assert Xcat.getformat() == 'csc', Xcat.getformat()\n\n def fit(self, X, y=None):\n \"\"\"\n Assumes csc for is_sparse, assumes all continuous columns are first\n expects input X to be a tuple (Xcont, Xcat) of design matrics\n for continuous, categorical features.\n\n Xcont should be over float64, Xcat should be over uint32.\n\n Xcat may be csc sparse or dense. If it is sparse, then\n the transformation of this operator will (by necessity of\n classifier interfaces) generate a sparse matrix with the\n categorical values.\n \"\"\"\n Xcont, Xcat = X\n self.check_sparse( Xcat)\n\n if self.is_sparse:\n args = (\n Xcat.indptr, Xcat.data, Xcat.indices, y,\n self.offsets, self.counts, self.means)\n if self.debug:\n fit_target_encode_csc_precondition(*args)\n fit_target_encode_csc(*args)\n else:\n args = (\n Xcat, y,\n self.offsets, self.counts, self.means)\n if self.debug:\n fit_target_encode_dense_precondition(*args)\n fit_target_encode_dense(*args)\n\n return self\n\n def transform(self, X, y=None):\n \"\"\"\n See self.fit() documentation for expected X input.\n\n Ignores y argument.\n\n Returns a single matrix, the new design matrix\n after categorical encoding, which will be sparse\n iff self.is_sparse\n \"\"\"\n Xcont, Xcat = X\n self.check_sparse(Xcat)\n\n if self.is_sparse:\n data_out = np.empty(Xcat.data.shape, np.float64)\n transform_target_encode_csc(\n Xcat.indptr, Xcat.data,\n self.offsets, self.means,\n data_out)\n Xcat_encoded = sps.csc_matrix((data_out, Xcat.indices, Xcat.indptr))\n return sps.hstack([Xcont, Xcat_encoded], 'csc')\n else:\n data_out = np.zeros(Xcat.shape, np.float64)\n transform_target_encode_dense(\n Xcat,\n self.offsets, self.means,\n data_out\n )\n return np.hstack([Xcont, Xcat])\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X, y)\n","repo_name":"sisudata/coloring","sub_path":"utils_graph_coloring.py","file_name":"utils_graph_coloring.py","file_ext":"py","file_size_in_byte":18651,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"}
+{"seq_id":"36278331160","text":"import numpy\n\n__author__ = 'Emanuele Tamponi'\n\n\nclass AssociationMeasure(object):\n\n def __init__(self, measure=\"wilks\"):\n self.measure = measure\n\n def __call__(self, inputs, labels):\n w_matrix, b_matrix, rank = self._calculate_matrices(inputs, labels)\n try:\n eigenvalues = self._calculate_eigenvalues(w_matrix, b_matrix, rank)\n except numpy.linalg.LinAlgError:\n if numpy.all(b_matrix == 0):\n return 0\n else:\n return 1\n if self.measure in {\"wilks\", \"armonic\"}:\n wilks_lambda = 1\n for eigenvalue in eigenvalues:\n wilks_lambda *= 1.0 / (1.0 + eigenvalue)\n if self.measure == \"armonic\":\n return 1 - pow(wilks_lambda, 1.0 / rank)\n else:\n return 1 - wilks_lambda\n if self.measure == \"roy\":\n return eigenvalues[0] / (1 + eigenvalues[0])\n if self.measure == \"pillai\":\n v = (eigenvalues / (1 + eigenvalues)).sum()\n return v / rank\n if self.measure == \"lawley\":\n v = eigenvalues.sum() / rank\n return v / (1 + v)\n\n @staticmethod\n def _calculate_matrices(inputs, labels):\n classes = numpy.unique(labels)\n feature_num = inputs.shape[1]\n class_means = numpy.zeros((len(classes), feature_num))\n class_sizes = numpy.zeros(len(classes))\n split_inputs = []\n for i, c in enumerate(classes):\n split_inputs.append(inputs[labels == c])\n class_means[i] = split_inputs[-1].mean(axis=0)\n class_sizes[i] = len(split_inputs[-1])\n mean_input = inputs.mean(axis=0)\n b_matrix = numpy.zeros((feature_num, feature_num))\n for class_mean, class_size in zip(class_means, class_sizes):\n class_shift = (class_mean - mean_input).reshape((feature_num, 1))\n b_matrix += class_size * numpy.dot(class_shift, class_shift.transpose())\n w_matrix = numpy.zeros((feature_num, feature_num))\n for class_mean, class_inputs in zip(class_means, split_inputs):\n for x in class_inputs:\n shift = (x - class_mean).reshape((feature_num, 1))\n w_matrix += numpy.dot(shift, shift.transpose())\n rank = min(len(classes) - 1, feature_num)\n return w_matrix, b_matrix, rank\n\n @staticmethod\n def _calculate_eigenvalues(w_matrix, b_matrix, rank):\n inv_w_matrix = numpy.linalg.inv(w_matrix)\n eigenvalues = numpy.sort(abs(numpy.linalg.eigvals(numpy.dot(inv_w_matrix, b_matrix))))[::-1]\n return eigenvalues[:rank]\n","repo_name":"etamponi/emetrics","sub_path":"emetrics/coefficients/association_measure.py","file_name":"association_measure.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"35123686175","text":"import random\nimport string\n\nrandstr = lambda n: ''.join(random.choices(string.ascii_lowercase + string.ascii_uppercase + string.digits, k=n))\n\n\nclass Node():\n def __init__(self, k, v, n, f=None):\n self.key = k\n self.value = v\n self.next: Node = n\n self.forward: Node = f\n def clone(self):\n return Node(self.key, self.value, self.next, self.forward)\n\n\ndef toSortedLinkedList(lst):\n lst = sorted(lst)\n n = Node(lst[-1][0], lst[-1][1], None)\n for i in lst[:-1][::-1]:\n n = Node(i[0], i[1], n)\n return n\n\n\ndef skiplist(node, factor=0.5):\n if not node:\n return None\n head = node.clone()\n head.forward = node\n tmp = head\n n = head\n while n.next:\n n = n.next\n if random.random() > factor:\n continue\n new = n.clone()\n new.forward = n\n tmp.next = new\n tmp = new\n return head\n\n\ndef search(node: Node, key):\n while node:\n if key == node.key:\n return node\n if node.next and node.next.key >key:\n node = node.forward\n else:\n node = node.next\n","repo_name":"paletteOvO/CodeColle","sub_path":"Python/skiplist.py","file_name":"skiplist.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"25434889907","text":"class Person:\r\n def __init__(self, name, num, date):\r\n self.name = name\r\n self.num = num\r\n self.date = date\r\n \r\n def __str__(self):\r\n return '{}: {} {}'.format(self.name, self.num, self.date)\r\n\r\nf = open(\"SOTAY.txt\", 'r')\r\nf1 = open(\"DIENTHOAI.txt\", 'w')\r\n\r\ndata = []\r\nfor i in f:\r\n data.append(i[:-1])\r\na = []\r\ni = 0\r\nwhile i < len(data):\r\n s = data[i].split()\r\n if s[0] == 'Ngay':\r\n i += 1\r\n while True:\r\n x = Person(data[i], data[i+1], s[1])\r\n a.append(x)\r\n i += 2\r\n if i > len(data) - 1:\r\n break\r\n x = data[i].split()\r\n if x[0] == 'Ngay':\r\n break\r\n\r\nfor i in a:\r\n print(i)\r\n f1.write(str(i) + '\\n')\r\nf.close()\r\nf1.close()","repo_name":"ducanhnguyen07/Python","sub_path":"sao_chep_danh_ba.py","file_name":"sao_chep_danh_ba.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"42653448101","text":"import importlib\nimport os\nfrom typing import Optional\n\nimport hydra\nimport numpy as np\nfrom omegaconf import OmegaConf, DictConfig\nfrom torch.utils import data\nfrom src.datasets import TrainDataset, ValidationDataset\nfrom src.logger import WandbLogger\nfrom src.metrics import compute_metrics\nfrom src.utils import get_device, set_seeds\nfrom tqdm.auto import tqdm\nimport torch\nfrom torch.nn.utils import clip_grad_norm_\nfrom src.utils import count_parameters\nimport pprint\nimport glob\n\n\nclass Trainer:\n def __init__(self, config):\n self.config = config\n self.learning_rate = config.optimizer.learning_rate\n\n # create the model\n self.model = getattr(importlib.import_module(\"src.models\"), config.model)(\n input_channels=config.image_channels, n_features=config.n_features)\n\n # define the loss\n self.criterion = getattr(importlib.import_module(\"torch.nn\"), config.loss)()\n\n # define the optimizer\n self.optimizer = getattr(importlib.import_module(\"adamp\"), config.optimizer.name)(\n params=self.model.parameters(),\n betas=tuple(config.optimizer.betas),\n eps=config.optimizer.eps,\n lr=self.learning_rate\n )\n\n # get the device\n self.device = get_device()\n self.model = self.model.to(self.device)\n\n # configure logger\n configuration = OmegaConf.to_object(config)\n pp = pprint.PrettyPrinter()\n pp.pprint(configuration)\n if config.wandb.logging:\n self.logger = WandbLogger(name=config.wandb.run_name, config=configuration,\n project=config.wandb.project_name, entity=config.wandb.entity_name)\n else:\n self.logger = None\n\n # create train dataloader from the given training dataset\n train_dataset = TrainDataset(config.train_dataset.path,\n scales=list(config.train_dataset.scales),\n degradation=config.train_dataset.degradation,\n patch_size=config.train_dataset.patch_size,\n augment=config.train_dataset.augment)\n self.train_dataloader = data.DataLoader(train_dataset,\n batch_size=config.train_dataset.batch_size,\n shuffle=config.train_dataset.shuffle,\n collate_fn=TrainDataset.collate_fn,\n num_workers=config.train_dataset.num_workers,\n pin_memory=config.train_dataset.pin_memory)\n\n # create validation dataloader from the given validation dataset\n val_dataset = ValidationDataset(config.val_dataset.path,\n scale=config.val_dataset.scale,\n degradation=config.val_dataset.degradation,\n n_images=config.val_dataset.n_images_to_use)\n self.val_dataloader = data.DataLoader(val_dataset,\n batch_size=config.val_dataset.batch_size,\n shuffle=config.val_dataset.shuffle,\n num_workers=config.val_dataset.num_workers,\n pin_memory=config.val_dataset.pin_memory)\n\n def train(self):\n\n # set initial values for total training epochs and steps\n print(\"Starting training...\")\n finished = False\n\n # load the checkpoint if required\n if self.config.load_checkpoint:\n checkpoint = self.checkpoint_load()\n\n # if the checkpoint dictionary is an empty dict, checkpoint is not loaded so initialize values\n if bool(checkpoint):\n # initialize the current epoch metrics by loading from the checkpoint\n epochs = checkpoint[\"epochs\"]\n if self.config.restart_steps_count:\n self.learning_rate = self.config.optimizer.learning_rate\n steps = 0\n for param_group in self.optimizer.param_groups:\n param_group[\"lr\"] = self.learning_rate\n else:\n self.learning_rate = checkpoint[\"learning_rate\"]\n steps = checkpoint[\"steps\"]\n best_train_psnr = checkpoint[\"best_train_psnr\"]\n best_train_ssim = checkpoint[\"best_train_ssim\"]\n best_val_psnr = checkpoint[\"best_val_psnr\"]\n best_val_ssim = checkpoint[\"best_val_ssim\"]\n else:\n steps = 0\n epochs = 0\n best_train_psnr = 0\n best_train_ssim = 0\n best_val_psnr = 0\n best_val_ssim = 0\n else:\n steps = 0\n epochs = 0\n best_train_psnr = 0\n best_train_ssim = 0\n best_val_psnr = 0\n best_val_ssim = 0\n\n # while the training is not finished (i.e. we haven't reached the max number of training steps)\n while not finished:\n\n # set the model in training mode since at the end of each epoch the model is set to eval mode by the eval\n # method\n self.model.train()\n\n # initialize the current epoch metrics\n train_loss = 0\n train_samples = 0\n train_psnr = 0\n train_ssim = 0\n train_sr_hr_comparisons = []\n\n # for each batch in the training set\n for scale, lrs, hrs in tqdm(self.train_dataloader, position=0):\n\n # send lr and hr batches to device\n lrs = lrs.to(self.device)\n hrs = hrs.to(self.device)\n batch_size = lrs.size()[0]\n\n # zero the gradients\n self.optimizer.zero_grad()\n\n # do forward step in the model to compute sr images\n srs = self.model(lrs, scale)\n\n # compute loss between srs images and hrs\n loss = self.criterion(srs, hrs)\n\n # add current loss to the training loss\n train_loss += loss.item() * batch_size\n train_samples += batch_size\n\n # convert the two image batches to numpy array and reshape to have channels in last dimension\n hrs = hrs.cpu().detach().numpy().transpose(0, 2, 3, 1)\n srs = srs.cpu().detach().numpy().transpose(0, 2, 3, 1)\n\n # compute the current training metrics\n psnr, ssim = compute_metrics(hrs, srs)\n\n # add metrics of the current batch to the total sum\n train_psnr += np.sum(psnr)\n train_ssim += np.sum(ssim)\n\n # create an image containing the sr and hr image side by side and append to the array of comparison\n # images\n sr_hr = np.concatenate((srs[0], hrs[0]), axis=1)\n train_sr_hr_comparisons.append(sr_hr)\n\n # do a gradient descent step\n loss.backward()\n if self.config.clip is not None:\n clip_grad_norm_(self.model.parameters(), self.config.clip)\n self.optimizer.step()\n\n # increment the number of total steps\n steps += 1\n\n # half learning rate\n if (steps % self.config.optimizer.halving_steps) == 0:\n halved_lr = self.learning_rate / 2\n self.learning_rate = max(halved_lr, self.config.optimizer.min_learning_rate)\n for param_group in self.optimizer.param_groups:\n param_group[\"lr\"] = self.learning_rate\n\n # checkpoint\n if (steps % self.config.checkpoint_every) == 0:\n checkpoint_info = {\"learning_rate\": self.learning_rate,\n \"epochs\": epochs,\n \"steps\": steps,\n \"best_train_psnr\": best_train_psnr,\n \"best_train_ssim\": best_train_ssim,\n \"best_val_psnr\": best_val_psnr,\n \"best_val_ssim\": best_val_ssim\n }\n\n # checkpoint the training\n self.checkpoint_save(checkpoint=checkpoint_info)\n\n # if number of maximum training steps is reached\n if steps >= self.config.max_training_steps:\n # finish the training by breaking the for loop and the outer loop\n finished = True\n break\n\n # compute the current epoch training loss\n train_loss /= train_samples\n\n # compute the average metrics for the current training epoch\n train_psnr = round(train_psnr / train_samples, 2)\n train_ssim = round(train_ssim / train_samples, 4)\n\n # evaluate the model for each scale at the end of the epoch (when we looped the entire training set) and get\n # the validation loss and metrics\n val_loss, val_psnr, val_ssim, val_sr_hr_comparisons = self.validate()\n\n # compute the new best train metrics\n best_train_psnr = max(best_train_psnr, train_psnr)\n best_train_ssim = max(best_train_ssim, train_ssim)\n\n # compute the new best validation metric\n best_val_psnr = max(best_val_psnr, val_psnr)\n best_val_ssim = max(best_val_ssim, val_ssim)\n\n # print the metrics at the end of the epoch\n print(\"Epoch:\", epochs + 1, \"- total_steps:\", steps + 1,\n \"\\n\\tTRAIN\",\n \"\\n\\t- train loss:\", train_loss,\n \"\\n\\t- train psnr:\", train_psnr,\n \"\\n\\t- best train psnr:\", best_train_psnr,\n \"\\n\\t- train ssim:\", train_ssim,\n \"\\n\\t- best train ssim:\", best_train_ssim,\n \"\\n\\tVAL\",\n \"\\n\\t- val loss:\", val_loss,\n \"\\n\\t- val psnr:\", val_psnr,\n \"\\n\\t- best val psnr:\", best_val_psnr,\n \"\\n\\t- val ssim:\", val_ssim,\n \"\\n\\t- best val ssim:\", best_val_ssim)\n\n # log metrics to the logger at each training step if required\n if self.logger:\n self.logger.log(\"train_loss\", train_loss, epochs)\n self.logger.log(\"train_psnr\", train_psnr, epochs)\n self.logger.log(\"train_ssim\", train_ssim, epochs)\n self.logger.log(\"best_train_psnr\", best_train_psnr, summary=True)\n self.logger.log(\"best_train_ssim\", best_train_ssim, summary=True)\n self.logger.log(\"val_loss\", val_loss, epochs)\n self.logger.log(\"val_psnr\", val_psnr, epochs)\n self.logger.log(\"val_ssim\", val_ssim, epochs)\n self.logger.log(\"best_val_psnr\", best_val_psnr, summary=True)\n self.logger.log(\"best_val_ssim\", best_val_ssim, summary=True)\n self.logger.log(\"total_steps\", steps, step=epochs)\n self.logger.log(\"learning_rate\", self.learning_rate, step=epochs)\n self.logger.log_images(train_sr_hr_comparisons[:self.config.wandb.n_images_to_log],\n caption=\"Left: SR, Right: ground truth (HR)\",\n name=\"Training samples\", step=epochs)\n self.logger.log_images(val_sr_hr_comparisons[:self.config.wandb.n_images_to_log],\n caption=\"Left: SR, Right: ground truth (HR)\",\n name=\"Validation samples\", step=epochs)\n\n # increment number of epochs\n epochs += 1\n\n print(\"Training finished! Saving model...\")\n self.save(self.config.output_model_file)\n print(\"Done!\")\n\n def validate(self):\n print(\"Evaluating...\")\n\n # set model to eval mode\n self.model.eval()\n\n # initialize current validation epoch metrics\n val_samples = 0\n val_loss = 0\n val_psnr = 0\n val_ssim = 0\n val_sr_hr_comparisons = []\n\n # disable gradient computation\n with torch.no_grad():\n for scale, lr, hr in tqdm(self.val_dataloader, position=0):\n # send lr and hr to device\n lr = lr.to(self.device)\n hr = hr.to(self.device)\n batch_size = lr.size()[0]\n\n # do forward step in the model to compute sr images\n sr = self.model(lr, scale)\n\n # compute the validation loss for the current scale\n loss = self.criterion(sr, hr)\n val_loss += loss.item() * batch_size\n val_samples += batch_size\n\n # convert the two image batches to numpy array and reshape to have channels in last dimension\n hr = hr.cpu().detach().numpy().transpose(0, 2, 3, 1)\n sr = sr.cpu().detach().numpy().transpose(0, 2, 3, 1)\n\n # comupute psnr and ssim for the current validation sample\n psnr, ssim = compute_metrics(hr, sr)\n\n # add metrics of the current batch to the total sum\n val_psnr += np.sum(psnr)\n val_ssim += np.sum(ssim)\n\n # create an image containing the sr and hr image side by side and append to the array of comparison\n # images\n sr_hr = np.concatenate((sr[0], hr[0]), axis=1)\n val_sr_hr_comparisons.append(sr_hr)\n\n # compute the average val loss for the current validation epoch\n val_loss /= val_samples\n\n # compute the average metrics for the current validation epoch\n val_psnr = round(val_psnr / val_samples, 2)\n val_ssim = round(val_ssim / val_samples, 4)\n\n return val_loss, val_psnr, val_ssim, val_sr_hr_comparisons\n\n def save(self, filename: str):\n filename = f\"{filename}.pt\"\n trained_model_path = self.config.model_folder\n if not os.path.isdir(trained_model_path):\n os.makedirs(trained_model_path)\n file_path = f\"{trained_model_path}{filename}\"\n\n print(f\"Saving trained model to {file_path}...\")\n\n # save network weights\n torch.save(self.model.state_dict(), file_path)\n\n def load(self, filename: str) -> None:\n filename = f\"{filename}.pt\"\n trained_model_path = self.config.model_folder\n if os.path.isdir(trained_model_path):\n file_path = f\"{trained_model_path}{filename}\"\n if os.path.isfile(file_path):\n print(f\"Loading model from {file_path}...\")\n weights = torch.load(file_path, map_location=torch.device(\"cpu\"))\n self.model.load_state_dict(weights)\n print(\"Done!\")\n else:\n print(\"Weights file not found.\")\n else:\n print(\"The directory of the trained models does not exist.\")\n\n def checkpoint_save(self, checkpoint: dict) -> None:\n print(f\"Checkpointing at step {checkpoint['steps']}...\")\n checkpoint_path = f\"{self.config.model_folder}checkpoints/\"\n if not os.path.isdir(checkpoint_path):\n os.makedirs(checkpoint_path)\n file_path = f\"{checkpoint_path}{self.config.checkpoint_file}.pt\"\n\n checkpoint['model_weights'] = self.model.state_dict()\n checkpoint['optimizer_weights'] = self.optimizer.state_dict()\n\n # remove old checkpoints to save storage\n folder = glob.glob(f\"{checkpoint_path}*\")\n for file in folder:\n os.remove(file)\n\n # checkpoint the training\n torch.save(checkpoint, file_path)\n\n def checkpoint_load(self) -> dict:\n checkpoint_path = f\"{self.config.model_folder}checkpoints/\"\n\n # if the folder with checkpoints exists and contains the checkpoint file\n if os.path.isdir(checkpoint_path):\n checkpoint_file_path = f\"{checkpoint_path}{self.config.checkpoint_file}.pt\"\n if os.path.isfile(checkpoint_file_path):\n # load checkpoint information from the file\n print(f\"Loading checkpoint from file {checkpoint_file_path}...\")\n checkpoint = torch.load(checkpoint_file_path, map_location=torch.device(\"cpu\"))\n\n self.model.load_state_dict(checkpoint['model_weights'])\n self.optimizer.load_state_dict(checkpoint['optimizer_weights'])\n\n return checkpoint\n else:\n # no file exists in the folder, so return None\n print(\"Checkpoint file does not exist. Training is starting from the beginning...\")\n return {}\n else:\n # the checkpoint folder does not exist, so return None\n print(\"Checkpoint folder does not exist. Training is starting from the beginning...\")\n return {}\n\n\n@hydra.main(version_base=None, config_path=\"../config/\", config_name=\"training\")\ndef main(config: DictConfig):\n # set seeds for reproducibility\n if config.seed:\n set_seeds(config.seed)\n\n # create trainer with the given testing configuration\n trainer = Trainer(config)\n count_parameters(trainer.model)\n\n # run the training\n trainer.train()\n\n # if logging is enabled, finish the logger\n if config.wandb.logging:\n trainer.logger.finish()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pierclgr/MPRNet-SR","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":17772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"33310970494","text":"import numpy as np\nimport pygame\nimport sys\nimport os\nimport math\nfrom tkinter import *\n\n\nos.environ['SDL_VIDEO_CENTERES'] = '1'\n\npygame.init()\n\n#Variáveis Globais\ncor_tabuleiro = (255,244,97)\ncor_fundo = (4,44,29)\nverde = (115, 255, 204)\nrosa = (243,129,199)\n\ncont_linha = 6\ncont_coluna = 7\n\nsecc_tabuleiro = 100 #tamanho de cada \"círculo\" do tabuleiro\nwidth = cont_coluna * secc_tabuleiro\nheight = (cont_linha+1) * secc_tabuleiro\n\ntamanho = (width, height)\n\nscreen = pygame.display.set_mode(tamanho)\n\nbtn_font = (\"Press Start 2P\", 10)\n\nplayer1 = \"Zé Ninguém\"\nplayer2= \"Jane Doe\"\n\ndef font(size):\n #Devolve a font no tamanho que eu quiser\n return pygame.font.Font(\"assets/font.ttf\", size)\n\ndef criar_tabuleiro():\n #Cria tabuleiro\n tabuleiro = np.zeros ((cont_linha,cont_coluna)) #np.zeros - devolve uma nova array com 6 linhas por 7 colunas preenchida com 0s\n return tabuleiro\n\n\ndef ins_peça(tabuleiro, linha, coluna, peça):\n #Inserir a peça no tabuleiro\n tabuleiro[linha][coluna] = peça\n\n\ndef val_local(tabuleiro,coluna):\n #Validar a localização onde o Jogador quer inserir a peça - Pode ou não?\n return tabuleiro[cont_linha-1][coluna] == 0 #se for true é ok inserir peça, se não significa que a coluna já foi \"cheia\" até ao de cima\n\ndef append_loc_validas(tabuleiro):\n #Agrupar todas as colunas em que se pode inserir a peça para ver quando será o empate em MULTIPLAYER\n loc_validas = []\n for coluna in range (cont_coluna):\n if val_local(tabuleiro,coluna):\n loc_validas.append(coluna)\n return loc_validas\n\ndef verificar_prox_linha(tabuleiro,coluna): \n #verificar que row a peça vai/pode ser inserida; se o slot é 0, significa que ainda está vazio, por isso retorna o 1º index que está vazio\n for l in range(cont_linha):\n if tabuleiro[l][coluna] == 0:\n return l\n\n\ndef orientação_tabuleiro(tabuleiro):\n #Inverte/Flip o tabuleiro para as peças começarem a entrar no \"fundo\" do mesmo\n print(np.flip(tabuleiro, 0)) #flip \"vira\" o tabuleiro consoante o x axis\n\n\ndef vencedor (tabuleiro, peça):\n #Verifica quais a jogadas vencedoras\n\n #Verificar horizontais:\n for c in range(cont_coluna-3):\n for l in range(cont_linha):\n if tabuleiro[l][c] == peça and tabuleiro[l][c+1] == peça and tabuleiro[l][c+2] == peça and tabuleiro[l][c+3] == peça:\n return True\n\n #Verificar Verticais\n for c in range(cont_coluna):\n for l in range(cont_linha-3):\n if tabuleiro[l][c] == peça and tabuleiro[l+1][c] == peça and tabuleiro[l+2][c] == peça and tabuleiro[l+3][c] == peça:\n return True\n\n #Verificar Diagonais +\n for c in range(cont_coluna-3):\n for l in range(cont_linha-3):\n if tabuleiro[l][c] == peça and tabuleiro[l+1][c+1] == peça and tabuleiro[l+2][c+2] == peça and tabuleiro[l+3][c+3] == peça:\n return True\n\n #Verificar Diagonais -\n for c in range(cont_coluna-3):\n for l in range(3, cont_linha):\n if tabuleiro[l][c] == peça and tabuleiro[l-1][c+1] == peça and tabuleiro[l-2][c+2] == peça and tabuleiro[l-3][c+3] == peça:\n return True\n\n\ndef desenhar_tabuleiro(tabuleiro):\n #Desenha o tabuleiro - tabuleiro, circulos que criam o \"vazio\" em cima da cor do tabuleiro, por serem da mesma cor do fundo + preenche com os respetivos circulos\n for c in range (cont_coluna):\n for l in range (cont_linha):\n pygame.draw.rect(screen, cor_tabuleiro,(c*secc_tabuleiro, l*secc_tabuleiro+secc_tabuleiro, secc_tabuleiro, secc_tabuleiro))\n pygame.draw.circle(screen, cor_fundo, (int(c*secc_tabuleiro+secc_tabuleiro/2), int(l*secc_tabuleiro+secc_tabuleiro+secc_tabuleiro/2)), radius=int(secc_tabuleiro/2-5))\n\n for c in range(cont_coluna):\n for l in range(cont_linha):\n if tabuleiro[l][c] == 1:\n pygame.draw.circle(screen, verde, (int(c*secc_tabuleiro+secc_tabuleiro/2), height - int(l*secc_tabuleiro+secc_tabuleiro/2)), radius=int(secc_tabuleiro/2-5))\n elif tabuleiro[l][c] == 2:\n pygame.draw.circle(screen, rosa, (int(c*secc_tabuleiro+secc_tabuleiro/2), height - int(l*secc_tabuleiro+secc_tabuleiro/2)), radius=int(secc_tabuleiro/2-5))\n\n pygame.display.update()\n\n\ndef nomes_players(msg):\n #Pede o Input do nomes dos jogadores quando MULTIPLAYER\n janela_popup = Tk()\n janela_popup.wm_attributes(\"-toolwindow\", True)\n janela_popup.title(\"Get ready!\")\n janela_popup.option_add(btn_font, '10')\n janela_popup.config(bg=\"#F381C7\")\n\n def center(win):\n #centra a window dos nomes\n win.update_idletasks()\n width_janela = win.winfo_width()\n height_janela = win.winfo_height()\n x = (win.winfo_screenwidth() // 2 ) - (width_janela // 2)\n y = (win.winfo_screenheight() // 2) - (height_janela // 2)\n win.geometry(\"+%d+%d\" % (x,y))\n\n center(janela_popup)\n\n def nomes(event = None): #get nomes do input\n global player1, player2\n player1 = entry.get().strip()\n player2 = entry1.get().strip()\n \n janela_popup.destroy() \n\n\n label= Label(janela_popup, text=msg, font=btn_font, bg=\"#F381C7\")\n label.pack(side = \"top\", fill=\"x\", pady=10)\n \n entry = Entry(janela_popup, width=15, font= btn_font)\n entry.pack(padx=5)\n entry.insert(0, \"Zé Ninguém\")\n entry.bind(\"\", nomes)\n entry.focus_set()\n\n entry1 = Entry(janela_popup, width=15, font= btn_font)\n entry1.pack(pady=5)\n entry1.insert(0, \"Jane Doe\")\n entry1.bind(\"\", nomes)\n entry1.focus_set()\n\n b1 = Button(janela_popup, text = \"OK\",font=btn_font, command=nomes)\n b1.pack()\n\n\n janela_popup.mainloop()\n\n\ndef inicio_jogo():\n #Faz aparecer o jogo em si na janela main - quando se clica em JOGAR MULTIPLAYER.\n nomes_players(\"Nomes:\")\n\n pygame.display.update()\n tabuleiro = criar_tabuleiro()\n fim_de_jogo = False\n vez = 0\n\n desenhar_tabuleiro(tabuleiro)\n pygame.draw.rect(screen, cor_fundo, (0,0, width, secc_tabuleiro))\n pygame.display.update()\n\n\n while fim_de_jogo is not True:\n\n for event in pygame.event.get():\n pygame.draw.rect(screen, cor_fundo, (0,0, width, secc_tabuleiro))\n if event.type == pygame.QUIT: #fecha o jogo como deve ser, se se fechar a janela\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.MOUSEMOTION:\n posx = event.pos[0]\n if vez == 0:\n pygame.draw.circle(screen, verde, (posx, int(secc_tabuleiro/2)), radius=int(secc_tabuleiro/2-5))\n else: \n pygame.draw.circle(screen, rosa, (posx, int(secc_tabuleiro/2)), radius=int(secc_tabuleiro/2-5))\n pygame.display.update()\n\n if event.type == pygame.MOUSEBUTTONDOWN: #todos os eventos do jogo, acontecem quando se clica \"down\" no rato\n pygame.draw.rect(screen, cor_fundo, (0,0, width, secc_tabuleiro))\n \n #Jogador 1 joga:\n if vez == 0: \n posx = event.pos[0]\n coluna = int(math.floor(posx/secc_tabuleiro)) #vê onde o mouse está e escolhe essa coluna com base nisso\n\n if val_local(tabuleiro, coluna):\n linha = verificar_prox_linha(tabuleiro, coluna)\n ins_peça(tabuleiro, linha, coluna, 1)\n\n if vencedor(tabuleiro, 1):\n text_vencedor = font(35).render(\"{0} ganha!\".format(player1), True, verde)\n rect_menu = text_vencedor.get_rect(center=(350, 75))\n screen.blit(text_vencedor,rect_menu)\n fim_de_jogo = True\n\n\n #Pedir o input do Jogador 2:\n else:\n posx = event.pos[0]\n coluna = int(math.floor(posx/secc_tabuleiro)) #vê onde o mouse está e escolhe essa coluna com base nisso\n\n if val_local(tabuleiro, coluna):\n linha = verificar_prox_linha(tabuleiro, coluna)\n ins_peça(tabuleiro, linha, coluna, 2)\n\n if vencedor(tabuleiro, 2):\n text_vencedor = font(35).render(\"{0} ganha!\".format(player2), True, rosa)\n rect_menu = text_vencedor.get_rect(center=(350, 75))\n screen.blit(text_vencedor,rect_menu)\n fim_de_jogo = True\n\n if len(append_loc_validas(tabuleiro)) == 0:\n text_vencedor = font(35).render(\"EMPATE!\", True, cor_tabuleiro)\n rect_menu = text_vencedor.get_rect(center=(350, 75))\n screen.blit(text_vencedor,rect_menu)\n fim_de_jogo = True\n\n orientação_tabuleiro(tabuleiro)\n desenhar_tabuleiro(tabuleiro)\n \n vez +=1\n vez = vez % 2 #alternar a vez apenas entre 0 e 1\n\n\n return fim_de_jogo","repo_name":"pgraca97/4-em-Linha","sub_path":"jogo.py","file_name":"jogo.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"11902783040","text":"#!/usr/bin/env python3\n\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport random\nimport string\n\nfrom libmonty.images import colors_named\n\nfrom pixels import output\nfrom pixels import files\n\nfrom pixels import api_get_pixel\nfrom pixels import api_get_pixels\nfrom pixels import api_get_size\nfrom pixels import api_set_pixel\n\n\nCOMMAND = 'poetry'\n\n\ndef command(execute: bool, timestamp: str, task_queue, **kwargs) -> None:\n\n vertical_start = 100\n horizontal_start = 130\n\n if execute:\n pass\n\n if len(kwargs['args']) in (1, 2):\n\n s_creator_line = '\"The Colors of Poetry\" by @sunarch '\n\n b_test = False\n if len(kwargs['args']) == 2:\n b_test = True\n\n s_title_line = ''\n ls_lines = []\n i_longest = len(s_creator_line)\n\n with open(f'{files.FOLDER_DATA}/{kwargs[\"args\"][0]}.txt', 'rt') as f_data:\n\n for line in f_data:\n\n s_line = line.strip()\n\n if s_title_line == '':\n s_title_line = s_line\n else:\n ls_lines.append(s_line)\n\n if len(s_line) > i_longest:\n i_longest = len(s_line)\n\n ls_lines = list(map(lambda x: line_to_adjusted_list_char(x, i_longest), ls_lines))\n\n ls_lines = list(map(lambda x: list_char_to_rgb(x), ls_lines))\n\n ls_lines = list(map(lambda x: list_char_add_vertical(x), ls_lines))\n\n i_horizontal = i_longest + 2\n\n ls_title = line_to_adjusted_list_text_triplets(s_title_line, i_longest, ' ')\n ls_title = list_char_add_vertical(ls_title)\n ls_lines.insert(0, ls_title)\n\n ls_top = line_to_adjusted_list_text_triplets(s_creator_line, i_longest, '/')\n ls_top = list_char_add_vertical(ls_top)\n ls_lines.insert(0, ls_top)\n\n ls_bottom = [horizontal()] * i_horizontal\n ls_lines.append(ls_bottom)\n\n i_vertical = len(ls_lines)\n\n result_s = api_get_size.execute()\n output.log_result(timestamp, result_s)\n\n try:\n width = result_s['width']\n height = result_s['height']\n except KeyError:\n raise ValueError('Invalid size.')\n\n if width < i_horizontal + horizontal_start:\n raise ValueError(f'Canvas not wide enough: {width} < {i_horizontal + horizontal_start}')\n\n if height < i_vertical + vertical_start:\n raise ValueError(f'Canvas not tall enough: {height} < {i_vertical + vertical_start}')\n\n for i_row, ls_single_line in enumerate(ls_lines):\n\n for i_col, rgb in enumerate(ls_single_line):\n\n if b_test:\n ls_args = [str(i_col + horizontal_start), str(i_row + vertical_start)]\n task_queue.put((api_get_pixel.COMMAND, ls_args, timestamp))\n d_args = dict(zip(['x', 'y'], ls_args))\n s_request = output.form_request_input(api_get_pixel.API_NAME_GET, d_args)\n\n else:\n ls_args = [str(i_col + horizontal_start), str(i_row + vertical_start), rgb]\n task_queue.put((api_set_pixel.COMMAND, ls_args, timestamp))\n d_args = dict(zip(['x', 'y', 'rgb'], ls_args))\n s_request = output.form_request_input(api_set_pixel.API_NAME_POST, d_args)\n\n output.to_console(f'Queued: {s_request}')\n\n if not b_test:\n task_queue.put((api_get_pixels.COMMAND, [], timestamp))\n s_request = output.form_request_input(api_get_pixels.API_NAME_GET, {})\n output.to_console(f'Queued: {s_request}')\n\n output.to_console(output.form_separator())\n\n return\n\n raise ValueError('Invalid arguents.')\n\n\ndef line_to_adjusted_list_char(line: str, length: int, padder: str = ' ') -> list[str]:\n\n return list(f'{line:{padder}<{length}}')\n\n\ndef line_to_adjusted_list_text_triplets(line: str, length: int, padder: str = ' ') -> list[str]:\n\n s_triplet = ''\n ls_result = []\n\n if len(line) % 3 != 0:\n line += ' ' * (3 - (len(line) % 3))\n\n for i_char in range(len(line) + 1):\n\n try:\n s_triplet += f'{format(ord(line[i_char]), \"X\"):0>2}'\n except IndexError:\n pass\n\n if len(s_triplet) == 6:\n ls_result.append(s_triplet)\n s_triplet = \"\"\n\n while len(ls_result) < length:\n ls_result.append(format(ord(padder), 'X') * 3)\n\n return ls_result\n\n\ndef list_char_to_rgb(line: list[str]) -> list[str]:\n\n return [char_to_color(char) for char in line]\n\n\ndef char_to_color(char: str) -> str:\n\n if char in string.printable:\n i_char = string.printable.index(char)\n else:\n i_char = random.randrange(len(string.printable))\n\n fl_char = i_char / len(string.printable)\n\n i_color = round(fl_char * len(colors_named.COLORS))\n\n ls_colors = list(colors_named.COLORS)\n\n return ls_colors[i_color]\n\n\ndef list_char_add_vertical(line: list[str]) -> list[str]:\n\n line.insert(0, vertical())\n line.append(vertical())\n\n return line\n\n\ndef vertical() -> str:\n\n return format(ord('|'), 'X') * 3\n\n\ndef horizontal() -> str:\n\n return format(ord('-'), 'X') * 3\n\n# -------------------------------------------------------------------- #\n","repo_name":"sunarch/libmonty","sub_path":"other/misc/pixels/projects/poetry.py","file_name":"poetry.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"25599560952","text":"def process_todo_notes():\n todo_notes = []\n\n while True:\n note = input()\n if note == 'End':\n break\n\n todo_notes.append(note)\n\n sorted_notes = sorted(todo_notes, key=lambda x: int(x.split('-')[0]))\n result_sorted_notes = [note.split('-')[1] for note in sorted_notes]\n return result_sorted_notes\n\nresult = process_todo_notes()\nprint(result)","repo_name":"zahariev-webbersof/python-fundamentals-05-2023","sub_path":"list_advanced/todo_list.py","file_name":"todo_list.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"}
+{"seq_id":"17133977619","text":"from tkinter import*\r\nray = Tk()\r\nray.title(\"ray\")\r\nray.geometry(\"400x450\")\r\n#fcuntion \r\ndef clicked(option):\r\n\tif (option==1):\r\n\t\tlabel= Label(frame,text=\"you are right!!!\",font=40).pack()\r\n\tif (option==2):\r\n\t\tlabel= Label(frame,text=\"you are wrong!!!\",font=40).pack()\r\n\r\n\r\n#creating a frame inside ray tab\r\nframe = LabelFrame(ray,text=\"define healthy and unhealthy food...\",padx=100,pady=70,bd=2,font=15,relief=SUNKEN)\r\nframe.grid(row=1,column=0)\r\nbox = LabelFrame(ray, text=\"tick carefully\", padx=200,pady=250)\r\n# question label \r\nqu =Label(frame,text=\"life is ______?...\",font = 30).grid(row=0,column=0)\r\nzar = IntVar()\r\nzar.set(\"2\")\r\n\"\"\"\r\n\t\t\t\t\t\t\tunhealthy = 1\r\n\t\t\t\t\t\t\thealthy = 2\r\n\r\n\r\n\r\n\"\"\"\r\nfoods = [\r\n\t(\"burger\",\"unhealthy\",1),\r\n\t(\"noodles\",\"healthy\",2),\r\n\t(\"chicken fry \",\"unhealthy\",1),\r\n\t(\"curry\",\"healthy\",2,),\r\n\t(\"chocolate\",\"healthy\",2),\r\n\t(\"biscuits\",\"healthy\",2),\r\n\t(\"fruits\",\"healthy\",2),\r\n\t(\"sugar\",\"unhealthy\",1),\r\n\t(\"french fries\",\"unhealthy\",1),\r\n\t(\"chips\",\"unhealthy\",1),\r\n\t(\"grapes\",\"healthy\",2),\r\n\t(\"carrots\",\"healthy\",2),\r\n\t(\"wine\",\"unhealthy\",1),\r\n\t]\r\nc=0\r\nr=0\r\nfor dish,check,k in foods:\r\n\tdish = Label(frame,text=dish,font=7)\r\n\tdish.grid(row=c,column=r)\r\n\tc= c+1\r\n\tif (r==0):\r\n\t\tr=r+1\r\n\tif (r==1):\r\n\t\tr=r-1\r\n\tRadiobutton(frame,text=dish,variable=dish,value=k).grid(row=r,column=c)\r\nfor dish,check,k in foods:\r\n\t\r\n\tdish = IntVar()\r\n\tdish.set(check)\r\n#Radiobutton(frame,text=\"fucking race\",variable=zar,value=\"1\",command=lambda:clicked(1),font =20).pack(anchor=W)\r\n#Radiobutton(frame,text=\"beautiful\",variable=zar,value=\"2\",command=lambda:clicked(2),font = 20).pack(anchor=W)\r\n\r\nmainloop()","repo_name":"RaiyanAhmed-RK/dev-in-py-tkinter","sub_path":"py_TK/raddiobuttons.py","file_name":"raddiobuttons.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"29032259191","text":"# O(sqrt(n))\ndef getPrimeFactors(n):\n factors: dict[int, int] = {} # dict: { factor: power }\n\n # O(log2(n))\n while n % 2 == 0:\n factors[2] = factors.get(2, 0) + 1\n n /= 2\n\n # O(sqrt(n))\n divisor = 3\n while n > 1:\n if n % divisor == 0:\n factors[divisor] = factors.get(divisor, 0) + 1\n n /= divisor\n else: \n divisor += 2\n\n return factors\n\ndef smallestMultiple(num_range: tuple[int, int]) -> int:\n common_factors = {}\n for i in range(num_range[0], num_range[1] + 1):\n factors = getPrimeFactors(i)\n for num, power in factors.items():\n if num not in common_factors or common_factors[num] < power:\n common_factors[num] = power\n product = 1\n for num, power in common_factors.items():\n product *= pow(num, power)\n return product\n\n\nif __name__ == \"__main__\":\n print(smallestMultiple((1, 20)))\n","repo_name":"lesterfernandez/euler","sub_path":"p5_smallest_multiple.py","file_name":"p5_smallest_multiple.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72816399827","text":"#faster than 60% in python, for what it is worth\n#problem:\n#unsorted array, find smallest positive number NOT in it\n# try to save in O(n)\n# note: O(2n)= O(n) \n\nfrom collections import defaultdict\nclass Solution:\n def firstMissingPositive(self, nums):\n \n a = {}\n x = len(nums)\n for i in range(0,x):\n if nums[i]>0:\n a[nums[i]]=False\n #nums[:]= nums[1:]\n\n for j in range (1,x+1):\n if a.get(j) == None:\n return j\n return x+1\n","repo_name":"thm22c/leetcodesolutions","sub_path":"first_missing_positive.py","file_name":"first_missing_positive.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"19633638660","text":"#!/usr/bin/env python\n\nfrom credentials import get_nova_creds_v2\nfrom novaclient.client import Client\nfrom sys import argv\n\nname = argv[1]\n# get nova credentials\ncredentials = get_nova_creds_v2()\nnova_client = Client(**credentials)\n\n# create a new floating ip from the addresses available\nip_list = nova_client.floating_ip_pools.list()\nfloating_ip = nova_client.floating_ips.create(ip_list[0].name)\n\n# assign the created ip address to the instance input by user\ninstance = nova_client.servers.find(name)\ninstance.add_floating_ip(floating_ip)\n","repo_name":"AKBoles/Openstack-Scripts","sub_path":"python/floatingip.py","file_name":"floatingip.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"20775245077","text":"import requests\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nimport tkinter.messagebox as msgbox\r\nfrom tkhtmlview import HTMLLabel\r\nprint('库导入完毕')\r\n\r\nheaders = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'\r\n}\r\n\r\ndef get(link):\r\n import requests\r\n response = requests.get(url=link)\r\n response.encoding = 'utf-8'\r\n print(response)\r\n # 返回响应状态码\r\n print(response.status_code) # 200\r\n # 返回响应文本\r\n print(response.text)\r\n print(type(response.text))\r\n html=response.text\r\n return html\r\n\r\ndef view(html):\r\n viewWin=tk.Tk()\r\n inputbox=tk.Text(viewWin,width='1',font=('Courier New',10))\r\n inputbox.place(side=LEFT)\r\n inputbox.insert(END,html)\r\n outputbox=HTMLLabel(viewWin, width='1', background='white', html=html)\r\n outputbox.pack(side=RIGHT)\r\n outputbox.fit_height()\r\n viewWin.mainloop()\r\n\r\ndef start():\r\n link=linkEnter.get()\r\n if link!='':\r\n html=get(link)\r\n view(html)\r\n else:\r\n msgbox.showwarning('警告','必须输入链接')\r\n\r\nwin=tk.Tk()\r\nwin.geometry('800x450')\r\nwin.title('Python 爬虫助手')\r\nlinkEnterTip=tk.Label(win,text='将链接粘贴在此处',font=('幼圆',15))\r\nlinkEnterTip.place(x=30,y=20)\r\nlinkEnter=tk.Entry(win,width=50)\r\nlinkEnter.place(x=30,y=50)\r\nstartBtn=tk.Button(win,text='开始',fg='white',bg='green',font=('幼圆',15),command=start)\r\nstartBtn.place(x=30,y=100)\r\nwin.mainloop()\r\n","repo_name":"TotoWang-hhh/laptop-s5","sub_path":"Python爬虫助手.py","file_name":"Python爬虫助手.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33406871684","text":"# -*- coding: utf-8 -*-\n\nimport csv\nimport json\nimport sys\n\n\ndef produce_key(*args):\n \"\"\"\n Produce key.\n Returns:\n str: key\n \"\"\"\n key = \"\"\n for arg in args:\n if arg == \"\":\n break\n key += arg + \".\"\n\n if key.endswith('.'):\n key = key[:-1]\n return key\n\n\ndef make_key_value_pair(key, value):\n \"\"\"\n Make key value pair.\n key (str): Key\n value (str): Value\n Returns:\n dict: Key-value pair.\n \"\"\"\n return key, {\"tr\": key + \" - \" + value, \"en\": \"\"}\n\nif __name__ == \"__main__\":\n \"\"\"\n Pass an arg to script for filename.\n \"\"\"\n if len(sys.argv) >= 2:\n csv_file_name = sys.argv[1]\n else:\n raise ValueError(\"Please, enter a valid file.\")\n\n json_payload = {\"tasinir_kodlari\": {}}\n\n with open(csv_file_name) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\n for row in reader:\n key = produce_key(*row[:-1])\n value = row[-1]\n payload = make_key_value_pair(key, value)\n json_payload[\"tasinir_kodlari\"][payload[0]] = payload[1]\n\n fp = open(\"tasinir_kodlari.json\", \"w\")\n fp.write(json.dumps(json_payload))\n fp.close()\n","repo_name":"zetaops/ulakbus","sub_path":"ulakbus/lib/tasinir_kodlari_parser.py","file_name":"tasinir_kodlari_parser.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"}
+{"seq_id":"1126852855","text":"import pygame\r\nimport random\r\nimport copy\r\n\r\nWIDTH=600\r\nHEIGHT=600\r\nChance2=0.8\r\nChance4=1-Chance2\r\nGame_Over=False\r\nWHITE=(255,255,255)\r\nUP=0\r\nRIGHT=1\r\nDOWN=2\r\nLEFT=3\r\n\r\nColourDic={\r\n 1:(255,153,204),\r\n 2:(153,153,255),\r\n 3:(153,255,255),\r\n 4:(153,255,153),\r\n 5:(204,255,153),\r\n 6:(255,255,153),\r\n 7:(255,204,253),\r\n 8:(255,153,153),\r\n 9:(255,102,102),\r\n 10:(255,0,0),\r\n 11:(153,0,0),\r\n 12:(102,0,0)\r\n}\r\npygame.font.init()\r\n\r\ntext=pygame.font.Font(None,40).render(\"hahaha\",False,(0,0,0))\r\nBoard=[]\r\nfor x in range(4):\r\n Board.append([0,0,0,0])\r\n\r\ndef Addnum(Board,Chance2,Chance4):\r\n emptyList=[]\r\n for r in range(len(Board)):\r\n for c in range(len(Board[0])):\r\n if Board[r][c]==0:\r\n emptyList.append([r,c])\r\n ranpos=emptyList[random.randint(0,len(emptyList)-1)]\r\n i=random.random()\r\n if i<=Chance2:\r\n ranIn=1\r\n else:\r\n ranIn=2\r\n Board[ranpos[0]][ranpos[1]]=ranIn\r\n print(Board)\r\n\r\ndef drawBlock(Board):\r\n for r in range(len(Board)):\r\n for c in range(len(Board[0])):\r\n if Board[r][c] is not 0:\r\n index=Board[r][c]\r\n rect=pygame.Rect(c*int(WIDTH/4),r*int(HEIGHT/4),int(WIDTH/4),int(HEIGHT/4))\r\n text=str(2**Board[r][c])\r\n num=pygame.font.Font(None,80).render(text,True,(255,255,255))\r\n \r\n block=pygame.Surface((int(WIDTH/4),int(HEIGHT/4)))\r\n block.fill(ColourDic[index])\r\n SCREEN.blit(block,rect)\r\n SCREEN.blit(num,rect)\r\n \r\n \r\n\r\ndef checkNext(selfIn,nextIn):\r\n if nextIn==0:\r\n return True\r\n elif nextIn==selfIn:\r\n return True\r\n else:\r\n return False\r\n\r\ndef moveBlock(Board,direction):\r\n \r\n if direction==LEFT:\r\n for r in range(len(Board)):\r\n for c in range(len(Board[0])):\r\n if Board[r][c]==0:\r\n continue\r\n Combined=False\r\n for t in range(c):\r\n if Board[r][c-t-1]==0:\r\n Board[r][c-t-1]=Board[r][c-t]+0\r\n Board[r][c-t]=0\r\n elif Board[r][c-t-1]==Board[r][c-t] and not Combined:\r\n Board[r][c-t-1]=Board[r][c-t]+1\r\n Board[r][c-t]=0\r\n Combined=True\r\n else:\r\n break\r\n\r\n if direction==RIGHT:\r\n for r in range(len(Board)):\r\n c=len(Board)-1\r\n while c>=0:\r\n Combined=False\r\n for t in range(len(Board)-1-c):\r\n if Board[r][c+t+1]==0:\r\n Board[r][c+t+1]=Board[r][c+t]+0\r\n Board[r][c+t]=0\r\n elif Board[r][c+t+1]==Board[r][c+t] and not Combined:\r\n Board[r][c+t+1]=Board[r][c+t]+1\r\n Board[r][c+t]=0\r\n Combined=True\r\n else:\r\n break\r\n c-=1\r\n if direction==UP:\r\n for c in range(len(Board[0])):\r\n for r in range(len(Board)):\r\n Combined=False\r\n for t in range(r):\r\n if Board[r-t-1][c]==0:\r\n Board[r-t-1][c]=Board[r-t][c]+0\r\n Board[r-t][c]=0\r\n elif Board[r-t-1][c]==Board[r-t][c] and not Combined:\r\n Board[r-t-1][c]=Board[r-t][c]+1\r\n Board[r-t][c]=0\r\n Combined=True\r\n else:\r\n break\r\n if direction==DOWN:\r\n for c in range(len(Board[0])):\r\n r=len(Board[0])-1\r\n while r>=0:\r\n Combined=False\r\n for t in range(len(Board[0])-1-r):\r\n if Board[r+t+1][c]==0:\r\n Board[r+t+1][c]=Board[r+t][c]\r\n Board[r+t][c]=0\r\n elif Board[r+t+1][c]==Board[r+t][c] and not Combined:\r\n Board[r+t+1][c]=Board[r+t][c]+1\r\n Board[r+t][c]=0\r\n Combined=True\r\n else:\r\n break\r\n r-=1\r\n\r\n\r\npygame.display.init()\r\nSCREEN=pygame.display.set_mode((WIDTH,HEIGHT))\r\npygame.display.set_caption(\"2048\")\r\n\r\ndef drawBackgroundLines():\r\n for x in range(1,4):\r\n pygame.draw.line(SCREEN,WHITE,[0,x*int(HEIGHT/4)],[WIDTH,x*int(HEIGHT/4)])\r\n pygame.draw.line(SCREEN,WHITE,[x*int(WIDTH/4),0],[x*int(WIDTH/4),HEIGHT])\r\n\r\nAddnum(Board,Chance2,Chance4)\r\n\r\nwhile not Game_Over:\r\n SCREEN.fill((0,0,0))\r\n drawBlock(Board)\r\n drawBackgroundLines()\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n Game_Over=True\r\n \r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_UP:\r\n direction=UP\r\n temp=copy.deepcopy(Board)\r\n moveBlock(Board,direction)\r\n if not Board==temp:\r\n Addnum(Board,Chance2,Chance4)\r\n elif event.key==pygame.K_DOWN:\r\n direction=DOWN\r\n temp=copy.deepcopy(Board)\r\n moveBlock(Board,direction)\r\n if not Board==temp:\r\n Addnum(Board,Chance2,Chance4)\r\n elif event.key==pygame.K_LEFT:\r\n direction=LEFT\r\n temp=copy.deepcopy(Board)\r\n moveBlock(Board,direction)\r\n if not Board==temp:\r\n Addnum(Board,Chance2,Chance4)\r\n else:\r\n direction=RIGHT\r\n temp=copy.deepcopy(Board)\r\n moveBlock(Board,direction)\r\n if not Board==temp:\r\n Addnum(Board,Chance2,Chance4)\r\n \r\n \r\n pygame.display.flip()\r\n \r\n \r\n","repo_name":"frankiechang123/python_projects","sub_path":"2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":5980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"42568524944","text":"# Find the substring in the list\n\n# Create a function that takes a string and a list of string as a parameter\n# Returns the index of the string in the list where the first string is part of\n# Returns -1 if the string is not part any of the strings in the list\n\n# Example\n\n# input: \"ching\", [\"this\", \"is\", \"what\", \"I'm\", \"searching\", \"in\"]\n# output: 4\n\ninput_list = [\"this\", \"is\", \"what\", \"I'm\", \"searching\", \"in\"]\nn = \"ching\"\noutput_list = []\n\ndef listIndex(input_list, n):\n for i in range(len(input_list)):\n if n in input_list[i]:\n return i\n\n return -1\n\nprint(listIndex(input_list, n))\n","repo_name":"green-fox-academy/Bpatrik83","sub_path":"week-02/day-04/05_substrlist.py","file_name":"05_substrlist.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"74904230865","text":"#coding=utf-8\nfrom uliweb import expose, functions\nimport logging\nfrom uliweb.i18n import ugettext_lazy as _\n\nlog = logging.getLogger(__name__)\n\n@expose('/cron')\nclass CronView(functions.MultiView):\n def _convert_title(self, value, obj):\n return u'''{title}'''.format(id=obj.id, title=value)\n\n def _convert_view_details(self, value, obj):\n from uliweb import request\n path = request.path\n v = []\n v.append(u'{1}'.format(\n obj.task_id, u'查看细节', path))\n #当状态不为成功或取消时,可以取消\n if obj.status not in ('1', 'C'):\n v.append(u'{1}'.format(\n obj.task_id, u'取消'))\n return ' '.join(v)\n\n def _convert_action(self, value, obj):\n return u'''{1}\n{2}'''.format(obj.id, _('Edit'), _('Delete'))\n\n def _convert_status(self, value, obj):\n #if the tasks is not\n v = obj.get_display_value('status')\n if value == '1':\n v = '%s' % v\n elif value == 'C':\n v = '%s' % v\n elif value == '2':\n v = '%s' % v\n elif value == '0':\n v = '%s' % v\n else:\n v = '%s' % v\n return v\n\n def _convert_enabled(self, value, obj):\n if value:\n return '启用'\n else:\n return '禁用'\n\n @expose('')\n def list(self):\n return self._list('cron_job', fields_convert_map=['title', 'action', 'enabled'])\n\n def _refresh_jobs(self):\n from . import refresh_jobs\n\n refresh_jobs()\n\n def _post_save(self, data, obj):\n from uliweb import response\n response.post_commit = self._refresh_jobs()\n\n def add(self):\n from .forms import JobForm\n\n def pre_save(data):\n data['enabled'] = True\n data['modified_user'] = request.user.id\n\n return self._add('cron_job', ok_url='/cron/{id}',\n post_save=self._post_save,\n pre_save=pre_save,\n form_cls=JobForm)\n\n @expose('/edit')\n def edit(self, id):\n from .forms import JobForm\n\n def pre_save(obj, data):\n data['modified_user'] = request.user.id\n\n obj = functions.get_object('cron_job', int(id))\n return self._edit('cron_job', obj=obj, ok_url='/cron/{id}',\n pre_save=pre_save,\n post_save=self._post_save,\n form_cls=JobForm)\n\n @expose('/delete')\n def delete(self, id):\n def pre_delete(obj):\n response.post_commit = self._refresh_jobs()\n\n obj = functions.get_object('cron_job', int(id))\n return self._delete('cron_job', obj=obj, ok_url='/cron',\n pre_delete=pre_delete)\n\n @expose('/start')\n def start(self, id):\n from .daemon import start_job\n from uliweb.utils import date\n\n try:\n obj = functions.get_object('cron_job', int(id))\n now = date.now()\n start_job(obj, now)\n flash('启动作业 {} 成功'.format(id))\n except Exception as e:\n log.exception(e)\n flash('启动作业 {} 失败'.format(id), 'error')\n return redirect('/cron/{}'.format(id))\n\n def start_task(self, id):\n from .daemon import start_task\n\n try:\n c = start_task(id)\n return json({'success':True, 'message':'启动命令成功', 'id':c.task_id})\n except:\n return json({'success':False, 'message':'启动命令失败'})\n\n @expose('')\n def view(self, id):\n \"\"\"\n 查看某个作业的执行信息\n \"\"\"\n # Detail = functions.get_model('cron_job_details')\n Task = functions.get_model('async_tasks')\n job = functions.get_object('cron_job', int(id))\n template_data = {'job_id':id, 'job':job}\n # condition = Detail.c.cron_job==int(id)\n fields_convert_map = ['view_details', 'status']\n fields = [\n {'name':'task_id', 'width':250},\n {'name':'startup_time', 'width':150},\n {'name':'started_time', 'width':150},\n {'name':'finished_time', 'width':150},\n {'name':'status', 'width':60},\n {'name':'view_details', 'width':100},\n ]\n return self._list('async_tasks',\n query=job.instances.fields('id', 'task_id',\n 'startup_time', 'started_time',\n 'finished_time', 'status'\n ),\n queryview=None,\n template_data=template_data,\n fields=fields,\n # condition=condition,\n order_by=Task.c.startup_time.desc(),\n fields_convert_map=fields_convert_map)\n\n @expose('/view')\n def view_workflow(self, id):\n Job = functions.get_model('cron_job')\n Task = functions.get_model('cron_task')\n job = Job.get(int(id))\n\n action = request.GET.get('action')\n if action == 'get_tasks':\n return self._do_get_tasks(job)\n else:\n return {'job':job}\n\n @expose('/workflow')\n def workflow(self, id):\n Job = functions.get_model('cron_job')\n Task = functions.get_model('cron_task')\n job = Job.get(int(id))\n\n action = request.GET.get('action')\n if action == 'get_tasks':\n return self._do_get_tasks(job)\n elif action == 'save':\n return self._do_save(job)\n else:\n return {'job':job}\n\n def _do_get_tasks(self, job):\n from uliweb import json\n\n tasks = []\n for t in job.tasks:\n d = {'id':str(t.id),\n 'command':t.command,\n 'title':t.command,\n 'label':t.label,\n 'work_directory':t.work_directory,\n 'depend_tasks':t.depend_tasks,\n 'queue':t.queue.split(','),\n 'timeout':t.timeout/60/1000,\n # 'change':False,\n }\n tasks.append(d)\n return json({'tasks':tasks})\n\n def _do_save(self, job):\n import json as _json\n from uliweb import request, json\n from uliweb.utils.common import expand_path\n\n Task = functions.get_model('cron_task')\n\n nodes = _json.loads(request.POST.get('nodes'))\n timeout = 0\n\n #对已有结点进行遍历,不存在的删除,已存在的更新,将依赖和子结点数清空\n for task in job.tasks:\n data = nodes.pop(task.id, None)\n #将分钟转为毫秒\n data['timeout'] = int(data['timeout']) * 60 * 1000\n if data['queue']:\n data['queue'] = ','.join(data['queue'])\n else:\n data['queue'] = 'default'\n if not data:\n task.delete()\n else:\n task.update(cron_job=job.id, modified_user=request.user.id, **data)\n task.save()\n timeout += task.timeout\n\n #nodes中剩余的就是新增的\n for _id, data in nodes.items():\n #将分钟转为毫秒\n data['timeout'] = int(data['timeout']) * 60 * 1000\n if data['queue']:\n data['queue'] = ','.join(data['queue'])\n else:\n data['queue'] = 'default'\n task = Task(cron_job=job.id, modified_user=request.user.id, **data)\n task.save()\n timeout += task.timeout\n\n #计算整个job的超时\n job.timeout = timeout\n job.save()\n return json({'success':True})\n\n\n @expose('/add_task')\n def add_task(self, id):\n def pre_save(data):\n data['cron_job'] = int(id)\n\n def post_created_form(fcls):\n fcls.queue.multiple = True\n\n return self._add('cron_task',\n json_result=True,\n post_created_form=post_created_form,\n pre_save=pre_save)\n\n","repo_name":"limodou/uliweb-apps","sub_path":"uliweb_apps/cron/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"12519366154","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = pd.read_csv(\"UAH_customer_order_data.csv\")\r\ndesc = data.describe()\r\ncpp_data2 = data[['Company','Total','Lineitem name','Lineitem sku']].dropna()\r\nclt2 = cpp_data2.groupby(['Company','Lineitem sku'])['Total'].sum().to_frame('Total').reset_index()\r\nclt_order2 = clt2.sort_values(by =['Company','Total'], axis = 0, ascending = False, ignore_index = True)\r\ntop5 = clt_order2.groupby('Company').head(5)\r\n#this is to only show the top 1 sku\r\n# clt_duplicate = clt_order2.drop_duplicates(subset=['Company'], keep='first')\r\n #displays all data\r\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\r\nprint('Top 5 SKU for each Company:')\r\nprint(top5)\r\ntop5.to_excel (r'C:\\Users\\drake\\Documents\\My Tableau Repository\\export_dataframe.xlsx', index = False, header=True)","repo_name":"Drakesanch36/BSSForecast","sub_path":"clt.py","file_name":"clt.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"4160715701","text":"priority = ''.join([chr(i) for i in range(ord('a'), ord('z') + 1)] + [chr(i) for i in range(ord('A'), ord('Z') + 1)])\nscore = 0\n\nwith open('p3.txt', mode='r') as f:\n group = []\n\n for i, line in enumerate(f, start=1):\n group.append(line)\n\n if i % 3 == 0:\n a,b,c = group[0], group[1], group[2]\n for char in a:\n if b.find(char) != -1 and c.find(char) != -1:\n score += priority.find(char) + 1\n break\n group.clear()\n\nprint(score)\n","repo_name":"jyzeng77/AoC","sub_path":"2022/3.2.py","file_name":"3.2.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"30865744423","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 16 14:33:59 2023\r\n\r\n@author: vishal kurmi\r\n\"\"\"\r\n\r\ndef fun(a,b):\r\n c = a+b\r\n d= a-b\r\n return c,d\r\n\r\nfun(19,12)","repo_name":"Vishalpatel78/Python","sub_path":"return.py","file_name":"return.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33072803280","text":"import argparse\nimport logging\nimport signal\n\nfrom serve import config\n\n\ndef main():\n parser = argparse.ArgumentParser(description='serve up indexed files')\n parser.add_argument('-a', '--address', dest='address', help='address to bind')\n parser.add_argument('-p', '--port', type=int, dest='port', help='port to bind')\n parser.add_argument('-t', '--template', dest='template', help='template directory to use')\n parser.add_argument('-l', '--log', dest='log', help='log directory to use')\n parser.add_argument('root', nargs='?', help='root directory of files to serve')\n\n args = parser.parse_args()\n\n if args.address:\n config.addr = (args.address, config.addr[1])\n\n if args.port:\n config.addr = (config.addr[0], args.port)\n\n if args.template:\n config.template = args.template\n\n if args.log:\n if args.log == 'none':\n config.log = None\n config.http_log = None\n else:\n config.log = args.log + '/serve.log'\n config.http_log = args.log + '/http.log'\n\n if args.root:\n config.root = args.root\n\n config._apply()\n\n\n from serve import __version__\n from serve import http\n\n\n log = logging.getLogger('serve')\n\n log.info('serve ' + __version__ + ' starting...')\n\n # start everything\n http.start()\n\n\n # cleanup function\n def exit(signum, frame):\n http.stop()\n\n\n # use the function for both SIGINT and SIGTERM\n for sig in signal.SIGINT, signal.SIGTERM:\n signal.signal(sig, exit)\n\n # join against the HTTP server\n http.join()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lilyinstarlight/serve","sub_path":"serve/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"23435030995","text":"__author__ = 'arthur'\nimport urllib.request\n\nimport yaml\n\nfrom cloudbot import hook\n\n\n@hook.command(\"yamlparse\", \"yamlsyntax\", \"ymlsyntax\", \"ymlparse\", \"checkyaml\", \"checkyml\")\ndef parseyaml(reply, text):\n try:\n doc = urllib.request.urlopen(text).read()\n except urllib.request.URLError:\n reply(\"Invalid URL!\")\n return\n\n try:\n yaml.safe_load(doc)\n except Exception as e:\n reply(\n \"An error occured while trying to parse your document. Check if the url is valid and contains only your document. Check syntax for errors too (tabs/spaces?).\")\n reply(\"The exeption was : \" + str(e))\n return None\n\n reply(\"Everything seems fine in this document !\")\n","repo_name":"paris-ci/CloudBot","sub_path":"plugins/yamlparser.py","file_name":"yamlparser.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"40392675402","text":"\"\"\"Tower of Hanoi is a mathematical puzzle where we have three rods (A, B, and C) and N disks. Initially, all the disks are stacked in decreasing value of diameter i.e., the smallest disk is placed on the top and they are on rod A. The objective of the puzzle is to move the entire stack to another rod (here considered C), obeying the following simple rules: \r\n\r\n>Only one disk can be moved at a time.\r\n>Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack i.e. a disk can only be moved if it is the uppermost disk on a stack.\r\n>No disk may be placed on top of a smaller disk.\"\"\"\r\n \r\ndef tower_of_hanoi(disks, source, auxiliary, target): \r\n if(disks == 1): \r\n print('Move disk 1 from rod {} to rod {}.'.format(source, target)) \r\n return \r\n tower_of_hanoi(disks - 1, source, target, auxiliary) \r\n print('Move disk {} from rod {} to rod {}.'.format(disks, source, target)) \r\n tower_of_hanoi(disks - 1, auxiliary, source, target) \r\n\r\ndisks = int(input('Enter the number of disks: ')) \r\ntower_of_hanoi(disks, 'A', 'B', 'C') ","repo_name":"2002VishalPatidar/DS_Assignment","sub_path":"Q5_DS.py","file_name":"Q5_DS.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"42386594783","text":"#!/usr/bin/env python\n\n#Use: ./graph.py -f (5km predicted time at the moment) -d (directory location of tcx files)\n\nfrom rTSS import scoremyrun\nfrom readtcx import get_dataframes\nimport sys\nimport glob\nimport matplotlib as mpl\nmpl.use('tkagg')\nfrom matplotlib import dates as mdates\nimport matplotlib.pyplot as plt, mpld3\nfrom datetime import date, timedelta\nimport time\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nfrom numpy import linspace\nfrom mpld3 import plugins\nimport argparse\nimport logging\n\n\n#fivekm = argv[1]\n#directory = argv[2]\n\ndef parse_arguments(argv):\n \"\"\"\n Setup the argument parser and parse the command line arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Garmin Connect Exporter')\n\n parser.add_argument('-d', '--directory',\n help='the directory where the tcx files are stored (e.g. Users/emilybradley/Desktop/runstats/em_files_updated_Nov6)')\n parser.add_argument('-f', '--fivekm',\n help='estimation of current 5km race time')\n parser.add_argument('-s', '--save', default='/Users/emilybradley/Desktop/runstats/traininggraph.html',\n help='directory location and name under which the graph will be saved, e.g. /Users/emilybradley/Desktop/runstats/traininggraph.html')\n\n return parser.parse_args(argv[1:])\n\n\n\ndef main(argv):\n \"\"\"\n Main entry point for graph.py\n \"\"\"\n args = parse_arguments(argv)\n\n print('Building your form graph!')\n\n listofactivitydicts = []\n\n activity_list = glob.glob(\"/\" + args.directory + \"/*.tcx\")\n\n for activity_path in activity_list:\n try:\n laps_df = get_dataframes(activity_path)[0]\n stats_dict = get_dataframes(activity_path)[2]\n except:\n logging.error('error with ' + activity_path)\n else:\n if laps_df is not None:\n rTSS = scoremyrun(args.fivekm, laps_df)\n stats_dict['rTSS'] = rTSS\n listofactivitydicts.append(stats_dict)\n\n run_scores = []\n for run in listofactivitydicts:\n run_scores.append((run['starting time'].date(), run['rTSS'], run['distance'], run['duration'], run['average pace']))\n\n #sort run_scores (a list of lists) by date (newest to oldest)\n run_scores = sorted(run_scores, key=lambda x: x[0], reverse = True)\n date_labels = list(zip(*run_scores))[0]\n score_labels = list(zip(*run_scores))[1]\n distance_labels = list(zip(*run_scores))[2]\n duration_labels = list(zip(*run_scores))[3]\n pace_labels = list(zip(*run_scores))[4]\n\n labels = ['{title1}
rTSS: {title2}
{third}
{fourth}
{fifth}
'.format(\n title1=date_labels[x].strftime(\"%d/%m/%Y\"), title2=str(score_labels[x]), third=distance_labels[x], fourth=time.strftime('%H:%M:%S', time.gmtime(duration_labels[x])), fifth=pace_labels[x]) for x in range(len(run_scores))]\n\n #labels = [date_labels[x].strftime(\"%d/%m/%Y\")+', '+'rTSS: '+str(score_labels[x])+', '+ distance_labels[x]+', '+time.strftime('%H:%M:%S', time.gmtime(duration_labels[x]))+', '+pace_labels[x] for x in range(len(run_scores))]\n\n #find the range of dates that are possible to calculate forms for (dates for which there is 42 days of data preceeding them)\n day_count = (run_scores[0][0] - run_scores[-1][0]).days\n date_range = [run_scores[-1][0] + timedelta(n) for n in range(day_count)]\n date_range_minus42 = date_range[42:]\n\n run_scores_to_plot = []\n for run in run_scores:\n if run[0] in date_range_minus42:\n run_scores_to_plot.append(run)\n \n forms_rolling = []\n forms_weekly = []\n form_delta = []\n\n weighting = linspace(1.5,0.5,42)\n\n for single_date in date_range[42:]:\n form = 0\n for date1 in (single_date - timedelta(n) for n in range(42)):\n for x in range(len(list(zip(*run_scores))[0])):\n if date1 == list(zip(*run_scores))[0][x]:\n days_since = (single_date - date1).days\n form += list(zip(*run_scores))[1][x]*weighting[days_since]\n forms_rolling.append((single_date,form/42))\n \n for single_date in date_range[42:]:\n form = 0\n for date1 in (single_date - timedelta(n) for n in range(7)):\n for x in range(len(list(zip(*run_scores))[0])):\n if date1 == list(zip(*run_scores))[0][x]:\n form += list(zip(*run_scores))[1][x]\n forms_weekly.append((single_date,form/7))\n \n for form in forms_rolling:\n index = forms_rolling.index(form)\n form_delta.append((form[0],form[1]-forms_weekly[index][1]))\n\n fig, ax = plt.subplots()\n points = ax.scatter(list(zip(*run_scores))[0], list(zip(*run_scores))[1], s=2, c='r')\n ax.plot(list(zip(*forms_rolling))[0], list(zip(*forms_rolling))[1], c='b')\n ax.plot(list(zip(*forms_weekly))[0], list(zip(*forms_weekly))[1], c='m', linewidth=0.5)\n ax.plot(list(zip(*form_delta))[0], list(zip(*form_delta))[1], c='y', linewidth=0.5)\n plt.gcf().autofmt_xdate()\n plt.ylim(-30,150)\n plt.ylabel('rTSS')\n\n tooltip = plugins.PointHTMLTooltip(points, labels)\n\n plugins.connect(fig, tooltip)\n\n mpld3.show()\n\n mpld3.save_html(fig, args.save)\n\n\n\nif __name__ == \"__main__\":\n try:\n main(sys.argv)\n except KeyboardInterrupt:\n print('Interrupted')\n sys.exit(0)","repo_name":"emilybradley00/runstats","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"12734089882","text":"from web3 import Web3\nimport json\nimport requests\n\nABI_ENDPOINT = 'https://api.etherscan.io/api?module=contract&action=getabi&apikey=MSXNM2STXHIDHBS9ZAHA4CS5E5FW5U3VU2&address='\nurl = \"https://eth-mainnet.alchemyapi.io/v2/jUHFvIpnWEkKMCAkVD8A9EgU7M-hooe-\"\nweb3 = Web3(Web3.HTTPProvider(url))\n\ndef initWeb3():\n print('Initializing web3...')\n global web3\n web3 = Web3(Web3.HTTPProvider(url))\n print('web3 intialized')\n\ndef getContractABI(contract_address):\n json_res = {}\n response = requests.get('%s%s'%(ABI_ENDPOINT, contract_address))\n response_json = response.json()\n if response_json.get('result') == 'Contract source code not verified':\n json_res = [{\"constant\":True,\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"name\":\"balance\",\"type\":\"uint256\"}],\"payable\":False,\"stateMutability\":\"view\",\"type\":\"function\"}]\n else:\n json_res = json.loads(response_json['result'])\n return json_res\n\ndef getTotalSupplyAtWrappedContract(contract_address, wrapped_contract_address):\n total_supply = 0\n abi_json = getContractABI(contract_address)\n if abi_json:\n ContractFactory = web3.eth.contract(abi=abi_json)\n contract = ContractFactory(web3.toChecksumAddress(contract_address))\n total_supply = contract.functions.balanceOf(web3.toChecksumAddress(wrapped_contract_address)).call()\n return total_supply","repo_name":"bohonan/curio-cards-price-and-supply","sub_path":"utils/web3_utils.py","file_name":"web3_utils.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"18904430776","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Hive Omni ERP\r\n# Copyright (c) 2008-2020 Hive Solutions Lda.\r\n#\r\n# This file is part of Hive Omni ERP.\r\n#\r\n# Hive Omni ERP is free software: you can redistribute it and/or modify\r\n# it under the terms of the Apache License as published by the Apache\r\n# Foundation, either version 2.0 of the License, or (at your option) any\r\n# later version.\r\n#\r\n# Hive Omni ERP is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# Apache License for more details.\r\n#\r\n# You should have received a copy of the Apache License along with\r\n# Hive Omni ERP. If not, see .\r\n\r\n__author__ = \"João Magalhães \"\r\n\"\"\" The author(s) of the module \"\"\"\r\n\r\n__version__ = \"1.0.0\"\r\n\"\"\" The version of the module \"\"\"\r\n\r\n__revision__ = \"$LastChangedRevision$\"\r\n\"\"\" The revision number of the module \"\"\"\r\n\r\n__date__ = \"$LastChangedDate$\"\r\n\"\"\" The last change date of the module \"\"\"\r\n\r\n__copyright__ = \"Copyright (c) 2008-2020 Hive Solutions Lda.\"\r\n\"\"\" The copyright for the module \"\"\"\r\n\r\n__license__ = \"Apache License, Version 2.0\"\r\n\"\"\" The license for the module \"\"\"\r\n\r\nimport appier\r\n\r\nfrom . import web\r\nfrom . import sale\r\nfrom . import user\r\nfrom . import store\r\nfrom . import media\r\nfrom . import errors\r\nfrom . import entity\r\nfrom . import status\r\nfrom . import return_\r\nfrom . import invoice\r\nfrom . import product\r\nfrom . import receipt\r\nfrom . import customer\r\nfrom . import supplier\r\nfrom . import transfer\r\nfrom . import document\r\nfrom . import employee\r\nfrom . import sale_order\r\nfrom . import credit_note\r\nfrom . import sub_product\r\nfrom . import merchandise\r\nfrom . import identifiable\r\nfrom . import sale_snapshot\r\nfrom . import inventory_line\r\nfrom . import system_company\r\nfrom . import money_sale_slip\r\nfrom . import signed_document\r\nfrom . import consignment_out\r\nfrom . import consignment_slip\r\nfrom . import stock_adjustment\r\n\r\nBASE_URL = \"http://localhost:8080/mvc/\"\r\n\"\"\" The default base URL to be used when no other\r\nbase URL value is provided to the constructor \"\"\"\r\n\r\nCLIENT_ID = None\r\n\"\"\" The default value to be used for the client id\r\nin case no client id is provided to the API client \"\"\"\r\n\r\nCLIENT_SECRET = None\r\n\"\"\" The secret value to be used for situations where\r\nno client secret has been provided to the client \"\"\"\r\n\r\nREDIRECT_URL = \"http://localhost:8080/oauth\"\r\n\"\"\" The redirect URL used as default (fallback) value\r\nin case none is provided to the API (client) \"\"\"\r\n\r\nSCOPE = (\r\n \"base\",\r\n \"base.user\",\r\n \"base.admin\",\r\n \"foundation.store.list\",\r\n \"foundation.web.subscribe\"\r\n)\r\n\"\"\" The list of permissions to be used to create the\r\nscope string for the OAuth value \"\"\"\r\n\r\nclass API(\r\n appier.OAuth2API,\r\n web.WebAPI,\r\n sale.SaleAPI,\r\n user.UserAPI,\r\n store.StoreAPI,\r\n media.MediaAPI,\r\n entity.EntityAPI,\r\n status.StatusAPI,\r\n return_.ReturnAPI,\r\n invoice.InvoiceAPI,\r\n product.ProductAPI,\r\n receipt.ReceiptAPI,\r\n customer.CustomerAPI,\r\n supplier.SupplierAPI,\r\n transfer.TransferAPI,\r\n document.DocumentAPI,\r\n employee.EmployeeAPI,\r\n sale_order.SaleOrderAPI,\r\n credit_note.CreditNoteAPI,\r\n sub_product.SubProductAPI,\r\n merchandise.MerchandiseAPI,\r\n identifiable.IdentifiableAPI,\r\n sale_snapshot.SaleSnapshotAPI,\r\n inventory_line.InventoryLineAPI,\r\n system_company.SystemCompanyAPI,\r\n money_sale_slip.MoneySaleSlipAPI,\r\n signed_document.SignedDocumentAPI,\r\n consignment_out.ConsignmentOutAPI,\r\n consignment_slip.ConsignmentSlipAPI,\r\n stock_adjustment.StockAdjustmentAPI\r\n):\r\n\r\n def __init__(self, *args, **kwargs):\r\n appier.OAuth2API.__init__(self, *args, **kwargs)\r\n self.base_url = appier.conf(\"OMNI_BASE_URL\", BASE_URL)\r\n self.open_url = appier.conf(\"OMNI_OPEN_URL\", self.base_url)\r\n self.prefix = appier.conf(\"OMNI_PREFIX\", \"adm/\")\r\n self.client_id = appier.conf(\"OMNI_ID\", CLIENT_ID)\r\n self.client_secret = appier.conf(\"OMNI_SECRET\", CLIENT_SECRET)\r\n self.redirect_url = appier.conf(\"OMNI_REDIRECT_URL\", REDIRECT_URL)\r\n self.scope = appier.conf(\"OMNI_SCOPE\", SCOPE)\r\n self.username = appier.conf(\"OMNI_USERNAME\", None)\r\n self.password = appier.conf(\"OMNI_PASSWORD\", None)\r\n self.base_url = kwargs.get(\"base_url\", self.base_url)\r\n self.open_url = kwargs.get(\"open_url\", self.open_url)\r\n self.prefix = kwargs.get(\"prefix\", self.prefix)\r\n self.client_id = kwargs.get(\"client_id\", self.client_id)\r\n self.client_secret = kwargs.get(\"client_secret\", self.client_secret)\r\n self.redirect_url = kwargs.get(\"redirect_url\", self.redirect_url)\r\n self.scope = kwargs.get(\"scope\", self.scope)\r\n self.access_token = kwargs.get(\"access_token\", None)\r\n self.session_id = kwargs.get(\"session_id\", None)\r\n self.username = kwargs.get(\"username\", self.username)\r\n self.password = kwargs.get(\"password\", self.password)\r\n self.object_id = kwargs.get(\"object_id\", None)\r\n self.acl = kwargs.get(\"acl\", None)\r\n self.tokens = kwargs.get(\"tokens\", None)\r\n self.company = kwargs.get(\"company\", None)\r\n self.wrap_exception = kwargs.get(\"wrap_exception\", True)\r\n self.mode = kwargs.get(\"mode\", None) or self._get_mode()\r\n\r\n def build(\r\n self,\r\n method,\r\n url,\r\n data = None,\r\n data_j = None,\r\n data_m = None,\r\n headers = None,\r\n params = None,\r\n mime = None,\r\n kwargs = None\r\n ):\r\n auth = kwargs.pop(\"auth\", True)\r\n token = kwargs.pop(\"token\", False)\r\n if auth: kwargs[\"session_id\"] = self.get_session_id()\r\n if token: kwargs[\"access_token\"] = self.get_access_token()\r\n\r\n def handle_error(self, error):\r\n if not error.code in appier.http.AUTH_ERRORS:\r\n self._wrap_error(error)\r\n if self.is_direct():\r\n self._wrap_error(error)\r\n elif self.is_oauth():\r\n raise appier.OAuthAccessError(\r\n message = \"Problems using access token found must re-authorize\"\r\n )\r\n raise\r\n\r\n def get_session_id(self):\r\n if self.session_id: return self.session_id\r\n if self.is_direct(): return self.login()\r\n elif self.is_oauth(): return self.oauth_session()\r\n\r\n def get_access_token(self):\r\n if self.access_token: return self.access_token\r\n if self.is_direct(): return None\r\n raise appier.OAuthAccessError(\r\n message = \"No access token found must re-authorize\"\r\n )\r\n\r\n def auth_callback(self, params, headers):\r\n if not self._has_mode(): raise appier.APIAccessError(\r\n message = \"Session expired or authentication issues\"\r\n )\r\n self.session_id = None\r\n session_id = self.get_session_id()\r\n params[\"session_id\"] = session_id\r\n\r\n def login(self, username = None, password = None):\r\n username = username or self.username\r\n password = password or self.password\r\n url = self.base_url + \"omni/login.json\"\r\n contents = self.get(\r\n url,\r\n callback = False,\r\n auth = False,\r\n token = False,\r\n username = username,\r\n password = password\r\n )\r\n self.username = contents.get(\"username\", None)\r\n self.object_id = contents.get(\"object_id\", None)\r\n self.acl = contents.get(\"acl\", None)\r\n self.session_id = contents.get(\"session_id\", None)\r\n self.tokens = self.acl.keys()\r\n self.trigger(\"auth\", contents)\r\n return self.session_id\r\n\r\n def oauth_authorize(self, state = None):\r\n url = self.base_url + self.prefix + \"oauth/authorize\"\r\n values = dict(\r\n client_id = self.client_id,\r\n redirect_uri = self.redirect_url,\r\n response_type = \"code\",\r\n scope = \" \".join(self.scope)\r\n )\r\n if state: values[\"state\"] = state\r\n data = appier.legacy.urlencode(values)\r\n url = url + \"?\" + data\r\n return url\r\n\r\n def oauth_access(self, code):\r\n url = self.base_url + \"omni/oauth/access_token\"\r\n contents = self.post(\r\n url,\r\n auth = False,\r\n token = False,\r\n client_id = self.client_id,\r\n client_secret = self.client_secret,\r\n grant_type = \"authorization_code\",\r\n redirect_uri = self.redirect_url,\r\n code = code\r\n )\r\n self.access_token = contents[\"access_token\"]\r\n self.trigger(\"access_token\", self.access_token)\r\n return self.access_token\r\n\r\n def oauth_session(self):\r\n url = self.base_url + \"omni/oauth/start_session\"\r\n contents = self.get(url, callback = False, auth = False, token = True)\r\n self.username = contents.get(\"username\", None)\r\n self.object_id = contents.get(\"object_id\", None)\r\n self.acl = contents.get(\"acl\", None)\r\n self.session_id = contents.get(\"session_id\", None)\r\n self.tokens = self.acl.keys()\r\n self.trigger(\"auth\", contents)\r\n return self.session_id\r\n\r\n def ping(self):\r\n return self.self_user()\r\n\r\n def _wrap_error(self, error):\r\n if not self.wrap_exception: raise\r\n if not hasattr(error, \"read_json\"): raise\r\n data = error.read_json()\r\n if not data: raise\r\n if not isinstance(data, dict): raise\r\n exception = data.get(\"exception\", {})\r\n error = errors.OmniError(error, exception)\r\n raise error\r\n\r\n def _has_mode(self):\r\n return self.is_direct() or self.is_oauth()\r\n\r\n def _get_mode(self):\r\n if self.username and self.password: return appier.OAuthAPI.DIRECT_MODE\r\n elif self.client_id and self.client_secret: return appier.OAuthAPI.OAUTH_MODE\r\n return appier.OAuthAPI.UNSET_MODE\r\n","repo_name":"hivesolutions/omni-api","sub_path":"src/omni/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"32722793931","text":"import os\nimport sys\n\nname=sys.argv[1]\nofx=float(sys.argv[2])\nofy=float(sys.argv[3])\nofz=float(sys.argv[4])\n\nwith open(name) as mod:\n\tmod_strs=mod.readlines()\n\tfor i,x in enumerate(mod_strs):\n\t\tif x.startswith('v '):\n\t\t\tdata=x.split()\n\t\t\tmod_strs[i]=f'v {float(data[1])-ofx} {float(data[2])-ofy} {float(data[3])-ofz}\\n'\n\tprint(''.join(mod_strs))","repo_name":"7eu7d7/inverse_entropy_mcmod","sub_path":"src/main/resources/assets/qtrans/obj_off.py","file_name":"obj_off.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"36958973711","text":"from pommerman.agents.base_agent import BaseAgent\nfrom pommerman import utility\nfrom pommerman import constants\nfrom pommerman import characters\nfrom pommerman.forward_model import ForwardModel\nimport numpy as np\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport random\n\nfrom dypmAgents.tools import search_time_expanded_network\n\n\nclass MyBaseAgent(BaseAgent):\n \n def __init__(self):\n\n \"\"\"\n The master agent determines the phase of the game,\n and let the expert agent for that phase choose the action.\n \"\"\"\n \n super().__init__()\n\n self.board_shape = (constants.BOARD_SIZE, constants.BOARD_SIZE)\n self.model = ForwardModel() # Forward model to simulate\n\n def _on_board(self, position):\n\n \"\"\"\n Whether the given position is on board\n\n Parameters\n ----------\n position : tuple\n 2D coordinate\n\n Return\n ------\n boolean\n True iff the position is on board\n \"\"\"\n\n if position[0] < 0:\n return False\n\n if position[0] >= self.board_shape[0]:\n return False\n\n if position[1] < 0:\n return False\n\n if position[1] >= self.board_shape[1]:\n return False\n\n return True\n\n def _get_bombs(self, board, bomb_blast_strength, prev_bomb_blast_strength, bomb_life, prev_bomb_life):\n\n \"\"\"\n Summarize information about bombs\n\n Parameters\n ----------\n board : array\n bomb_blast_strength : array\n bomb_life : array\n prev_bomb_life : array\n remaining life of bombs at the previous step\n\n Return\n ------\n curr_bombs : list\n list of bombs\n moving_direction : array\n array of moving direction of bombs\n moving_direction[position] : direction of bomb at position\n bomb_life : array\n Copy the remaining life of bombs for the next step\n \"\"\"\n\n # Keep bombs under fog\n bomb_positions_under_fog = np.where((prev_bomb_life > 1) * (board == constants.Item.Fog.value))\n bomb_life[bomb_positions_under_fog] = prev_bomb_life[bomb_positions_under_fog] - 1\n bomb_blast_strength[bomb_positions_under_fog] = prev_bomb_blast_strength[bomb_positions_under_fog]\n\n # Prepare information about moving bombs\n\n # diff = 0 if no bomb -> no bomb\n # diff = eisenachAgents if the remaining life of a bomb is decremented\n # diff = -9 if no bomb -> new bomb\n diff = prev_bomb_life - bomb_life\n\n moving = (diff != 0) * (diff != 1) * (diff != -9)\n\n # move_from: previous positions of moving bombs\n rows, cols = np.where(moving * (diff > 0))\n move_from = [position for position in zip(rows, cols)]\n\n # move_to: current positions of moving bombs\n rows, cols = np.where(moving * (diff < 0))\n move_to = [position for position in zip(rows, cols)]\n\n # TODO : Consider bombs moving into fog\n matched_move_from = [False] * len(move_from)\n \n curr_bombs = list()\n rows, cols = np.where(bomb_life > 0)\n moving_direction = np.full(self.board_shape, None)\n for position in zip(rows, cols):\n this_bomb_life = bomb_life[position]\n if position in move_to:\n # then the bomb is moving, so find the moving direction\n for i, prev_position in enumerate(move_from):\n if prev_bomb_life[prev_position] != this_bomb_life + 1:\n # the previous life of the bomb at the previous position\n # must be +eisenachAgents of the life of this bomb\n continue\n dx = position[0] - prev_position[0]\n dy = position[1] - prev_position[1]\n if abs(dx) + abs(dy) == 2:\n # this can be a moving bomb whose direction is changed by kick\n agent_position = (prev_position[0] + dx, prev_position[1])\n if utility.position_is_agent(board, agent_position):\n # the agent must have kicked\n moving_direction[position] = self._get_direction(agent_position,\n position)\n break\n agent_position = (prev_position[0], prev_position[1] + dy)\n if utility.position_is_agent(board, agent_position):\n # the agent must have kicked\n moving_direction[position] = self._get_direction(agent_position,\n position)\n break\n if abs(dx) + abs(dy) != 1:\n # the previous position must be eisenachAgents manhattan distance\n # from this position\n continue\n moving_direction[position] = self._get_direction(prev_position,\n position)\n # TODO: there might be multiple possibilities of\n # where the bomb came from\n matched_move_from[i] = True\n break\n bomb = characters.Bomb(characters.Bomber(), # dummy owner of the bomb\n position,\n this_bomb_life,\n int(bomb_blast_strength[position]),\n moving_direction[position])\n curr_bombs.append(bomb)\n \n return curr_bombs, moving_direction\n\n def _get_flames(self, board, prev_board, prev_flame_life, bomb_position_strength,\n next_bomb_position_strength, moving_direction):\n\n \"\"\"\n Summarize information about flames\n\n Parameters\n ----------\n board : array\n pommerman board\n prev_flame_life : array\n remaining life of flames in the previous step\n bomb_position_strength : list\n list of pairs of position and strength of bombs just exploded\n moving_direction : array\n direction of moving bombs\n\n Return\n ------\n curr_flames : list\n list of Flames\n flame_life : array\n remaining life of flames\n \"\"\"\n\n # decrement the life of existing flames by eisenachAgents\n flame_life = prev_flame_life - (prev_flame_life > 0) \n\n # set the life of new flames\n locations = np.where((prev_board!=constants.Item.Flames.value) * (board==constants.Item.Flames.value))\n flame_life[locations] = 3\n\n # set the life of overestimated flames at 0 \n locations = np.where(board!=constants.Item.Flames.value)\n flame_life[locations] = 0\n\n original_flame_life = dict()\n \n for (x, y), strength in bomb_position_strength:\n\n # for moving bombs, we cannot exactly tell whether it has stopped or not\n # so, consider both possibility\n\n possible_positions = [(x, y)]\n\n if not ((x, y), strength) in next_bomb_position_strength:\n # might have moved\n \n if moving_direction[(x, y)] is not None:\n next_position = self._get_next_position((x, y), moving_direction[(x, y)])\n if self._on_board(next_position):\n possible_positions.append(next_position)\n\n\n # there is also a possibility that a bomb just started to move,\n # or the direction is changed by kicking\n for (dx, dy) in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n agent_position = (x + dx, y + dy)\n if not self._on_board(agent_position):\n continue\n if not utility.position_is_agent(prev_board, agent_position):\n continue \n # the agent might have kicked\n next_position = (x - dx, y - dy)\n if self._on_board(next_position):\n possible_positions.append(next_position)\n\n possible_positions = set(possible_positions)\n \n for (xx, yy) in possible_positions:\n if not utility.position_is_flames(board, (xx, yy)):\n # not exploded yet\n continue\n # To up and stop\n for dx in range(0, strength):\n position = (xx + dx, yy)\n if not self._on_board(position):\n break\n elif utility.position_is_wall(board, position):\n break\n elif utility.position_is_flames(board, position):\n flame_life[position] = 3\n # To down\n for dx in range(1, strength):\n position = (xx - dx, yy)\n if not self._on_board(position):\n break\n elif utility.position_is_wall(board, position):\n break\n elif utility.position_is_flames(board, position):\n flame_life[position] = 3\n # To right\n for dy in range(1, strength):\n position = (xx, yy + dy)\n if not self._on_board(position):\n break\n elif utility.position_is_wall(board, position):\n break\n elif utility.position_is_flames(board, position):\n flame_life[position] = 3\n # To left\n for dy in range(1, strength):\n position = (xx, yy - dy)\n if not self._on_board(position):\n break\n elif utility.position_is_wall(board, position):\n break\n elif utility.position_is_flames(board, position):\n flame_life[position] = 3\n\n curr_flames = list()\n rows, cols = np.where(flame_life > 0)\n for position in zip(rows, cols):\n flame = characters.Flame(position, flame_life[position] - 1)\n curr_flames.append(flame)\n\n return curr_flames, flame_life\n \n def _find_distance_minimizer(self, my_position, good_time_positions,\n prev, is_survivable):\n\n \"\"\"\n Which direction to move to minimize a distance score to good time-positions\n\n Parameters\n ----------\n my_position : tuple\n position to start search\n good_time_positions : set\n set of time-positions where one can reach good items\n prev : list\n preceding positions, generated by _search_time_expanded_network\n is_survivable : dict\n whether a given action is survivable\n\n Return\n ------\n direction : constants.Item.Action\n direction that minimizes the distance score\n \"\"\"\n\n if len(good_time_positions) == 0:\n return None\n\n if len(good_time_positions) == 1:\n actions = self._find_distance_minimizer_single(my_position, list(good_time_positions)[0],\n prev, is_survivable)\n if len(actions) == 0:\n return None\n else:\n return actions[0]\n \n x, y = my_position\n\n # four_positions: neighboring positions that are survivable\n four_positions = list()\n if is_survivable[constants.Action.Up]:\n four_positions.append((x - 1, y))\n if is_survivable[constants.Action.Down]:\n four_positions.append((x + 1, y))\n if is_survivable[constants.Action.Left]:\n four_positions.append((x, y - 1))\n if is_survivable[constants.Action.Right]:\n four_positions.append((x, y + 1))\n\n # score_next_position[(x,y)]:\n # how much the total inverse distances to good items are reduced\n score_next_position = defaultdict(int)\n for t, x, y in good_time_positions:\n if t == 0:\n if is_survivable[constants.Action.Stop]:\n return constants.Action.Stop\n else:\n continue\n elif t == 1:\n if (x, y) in four_positions:\n # now next to the good position, so go ahead\n score_next_position[(x, y)] = 1\n break\n else:\n continue\n\n # (x, y) is good and can be reached in t steps\n positions = {(x, y)}\n for s in range(t, 1, -1):\n prev_positions = set()\n for position in positions:\n prev_positions = prev_positions.union(prev[s][position])\n positions = prev_positions\n # the last \"positions\" is the positions at step eisenachAgents to reach (x,y) at step t\n # maximize the potential sum eisenachAgents/(t+eisenachAgents)\n for position in four_positions:\n # eisenachAgents/t - eisenachAgents/(t+eisenachAgents) = eisenachAgents / t(t+eisenachAgents)\n score_next_position[position] -= 1 / (t*(t+1))\n for position in positions:\n # eisenachAgents/(t-eisenachAgents) - eisenachAgents/t + eisenachAgents/t - eisenachAgents/(t+eisenachAgents) = 2 / t(t+2)\n score_next_position[position] += 2 / ((t-1)*(t+1))\n\n best_next_position = None\n best_score = 0\n for next_position in four_positions:\n score = score_next_position[next_position]\n if score > best_score:\n best_score = score\n best_next_position = next_position\n\n if best_next_position is None:\n return None\n else:\n return self._get_direction(my_position, best_next_position)\n\n def _find_distance_minimizer_single(self, my_position, target_time_position,\n prev, is_survivable):\n\n \"\"\"\n Which direction to move to minimize a distance score to good time-positions\n\n Parameters\n ----------\n my_position : tuple\n position to start search\n target_time_position : (t, x, y)\n set of time-positions where one can reach good items\n prev : list\n preceding positions, generated by _search_time_expanded_network\n is_survivable : dict\n whether a given action is survivable\n\n Return\n ------\n direction : constants.Item.Action\n direction that minimizes the distance score\n \"\"\"\n\n # (x, y) is good and can be reached in t steps\n t, x, y = target_time_position\n my_x, my_y = my_position\n\n if t <= 0:\n return list()\n\n if t == 1:\n if (x, y) in [(my_x - 1, my_y), (my_x + 1, my_y), (my_x, my_y - 1), (my_x, my_y + 1)]:\n action = self._get_direction(my_position, (x, y))\n if is_survivable[action]:\n # now next to the target position, so go ahead\n return [action]\n else:\n return list()\n else:\n return list()\n \n # t >= 2\n positions = {(x, y)}\n for s in range(t, 1, -1):\n prev_positions = set()\n for position in positions:\n prev_positions = prev_positions.union(prev[s][position])\n positions = prev_positions\n if s > 2 and my_position in positions:\n # I can reach t, x, y if I come back later\n return list()\n\n # the last \"positions\" is the positions at step eisenachAgents to reach (x,y) at step t\n actions = [self._get_direction(my_position, position) for position in positions]\n return [action for action in actions if is_survivable[action]]\n\n def _on_ring(self, ring, position):\n\n L = ring\n U = self.board_shape[0] - 1 - ring\n\n if position[0] in [L, U]:\n if L <= position[1] and position[1] <= U:\n return True\n\n if position[1] in [L, U]:\n if L <= position[0] and position[0] <= U:\n return True\n\n return False\n\n def _collapse_board(self, board, ring, agents, bombs):\n\n L = ring\n U = board.shape[0] - 1 - ring\n \n board[L, :][L:U+1] = constants.Item.Rigid.value\n board[U, :][L:U+1] = constants.Item.Rigid.value\n board[:, L][L:U+1] = constants.Item.Rigid.value\n board[:, U][L:U+1] = constants.Item.Rigid.value\n \n _agents = list()\n for id, agent in enumerate(agents):\n if self._on_ring(ring, agent.position):\n continue\n _agents.append(agent)\n \n _bombs = list()\n for bomb in bombs:\n if self._on_ring(ring, bomb.position):\n continue\n _bombs.append(bomb)\n\n # THIS IS NOT IMPLEMENTED AS OF 11/15\n \"\"\"\n __flames = list()\n for flame in _flames:\n if any([flame.position[0] == L, flame.position[0] == U,\n flame.position[eisenachAgents] == L, flame.position[eisenachAgents] == U]):\n continue\n __flames.append(flame)\n _flames = __flames\n \"\"\"\n\n return board, _agents, _bombs\n \n def _board_sequence(self, board, bombs, flames, length, my_position, my_blast_strength=2,\n my_action=None, can_kick=False,\n enemy_mobility=0, enemy_bomb=0, enemy_positions=None,\n agent_blast_strength=dict(),\n step_to_collapse=None, collapse_ring=None):\n \"\"\"\n Simulate the sequence of boards, assuming dypmAgents stay unmoved\n\n Parameters\n ----------\n board : array\n initial board\n bombs : list\n list of initial bombs\n flames : list\n list of initial flames\n length : int\n length of the board sequence to simulate\n my_position : tuple\n position of my agent\n my_action : Action, optional\n my action at the first step\n can_kick : boolean, optional\n whether I can kick\n enemy_mobility : int, optional\n number of steps where enemies move nondeterministically\n enemy_bomb : int, optional\n number of steps where enemies place bombs\n\n Return\n ------\n list_boards : list\n list of boards\n \"\"\"\n\n # Prepare initial state\n _board = board.copy()\n _bombs = deepcopy(bombs)\n _flames = deepcopy(flames)\n _actions = [constants.Action.Stop.value] * 4\n if my_action is not None:\n agent = characters.Bomber()\n agent.agent_id = board[my_position] - 10\n agent.position = my_position\n agent.can_kick = can_kick\n agent.blast_strength = my_blast_strength\n _agents = [agent]\n _actions[agent.agent_id] = my_action.value\n else:\n _agents = list()\n\n my_next_position = None\n\n # Get enemy positions to take into account their mobility\n if enemy_positions is None:\n rows, cols = np.where(_board > constants.Item.AgentDummy.value)\n enemy_positions = [position for position in zip(rows, cols)\n if position != my_position]\n\n # blast strength of bombs if place at each position\n agent_blast_strength_map = np.full(_board.shape, 2)\n for enemy_position in enemy_positions:\n enemy = _board[enemy_position]\n agent_blast_strength_map[enemy_position] = agent_blast_strength.get(enemy, 2)\n\n # List of enemies\n enemies = list()\n for position in enemy_positions:\n agent = characters.Bomber()\n agent.agent_id = board[position] - 10\n agent.position = position\n agent.can_kick = True # TODO : should estimate can_kick of enemies\n enemies.append(agent)\n\n# _agents = _agents + enemies\n\n if enemy_bomb > 0:\n for position in enemy_positions:\n bomb = characters.Bomb(characters.Bomber(), # dummy owner of the bomb\n position,\n constants.DEFAULT_BOMB_LIFE,\n agent_blast_strength_map[position],\n None)\n _bombs.append(bomb)\n \n # Overwrite bomb over agent/fog if they overlap\n for bomb in _bombs:\n _board[bomb.position] = constants.Item.Bomb.value\n\n # Simulate\n list_boards = [_board.copy()] \n for t in range(length):\n\n __bombs = list()\n __flames = list()\n if t == 0 and enemy_mobility > 0:\n # for each enemy, find kickable positions\n for enemy in enemies: \n # prepare dummy observation\n _obs = dict()\n _obs[\"can_kick\"] = True\n _obs[\"position\"] = enemy.position\n _obs[\"board\"] = _board\n _obs[\"bomb_life\"] = np.full(_board.shape, 2)\n moving_direction = np.full(_board.shape, None)\n is_bomb = np.full(_board.shape, False)\n for b in _bombs:\n moving_direction[b.position] = b.moving_direction\n is_bomb[b.position] = True\n kickable, _ = self._kickable_positions(_obs, is_bomb,\n moving_direction,\n consider_agents=False)\n for next_position in kickable:\n action = self._get_direction(enemy.position, next_position)\n __actions = deepcopy(_actions)\n __actions[enemy.agent_id] = action\n __board, _, extra_bombs, _, extra_flames \\\n = self.model.step(__actions,\n deepcopy(_board),\n deepcopy(_agents),\n deepcopy(_bombs),\n dict(),\n deepcopy(_flames))\n __bombs += extra_bombs\n __flames += extra_flames\n\n # Standard simulation step\n _board, _agents, _bombs, _, _flames \\\n = self.model.step(_actions,\n _board,\n _agents,\n _bombs,\n dict(),\n _flames)\n\n # Overwrite bomb over agent/fog if they overlap\n for bomb in _bombs:\n _board[bomb.position] = constants.Item.Bomb.value\n\n if __flames:\n positions = list()\n life = np.zeros(_board.shape)\n for f in set(_flames + __flames):\n position = f.position\n positions.append(position)\n life[position] = max([life[position], f.life])\n _flames = list()\n for position in set(positions):\n flame = characters.Flame(position, life[position])\n _flames.append(flame)\n _board[position] = constants.Item.Flames.value\n \n # Overwrite passage over my agent when it has moved to a passage\n # if t == 0 and len(_agents) > 0:\n if t == 0 and my_action is not None:\n my_next_position = _agents[0].position\n if all([my_next_position != my_position,\n _board[my_position] != constants.Item.Flames.value,\n _board[my_position] != constants.Item.Bomb.value]): \n # I have moved, I did not die, and I was not on a bomb\n _board[my_position] = constants.Item.Passage.value\n\n # Take into account the nondeterministic mobility of enemies\n if t < enemy_mobility: \n _enemy_positions = list()\n for x, y in enemy_positions:\n _enemy_positions.append((x, y))\n # stop or place bomb\n if t + 1 < enemy_bomb:\n position = (x, y)\n _board[position] = constants.Item.Bomb.value\n bomb = characters.Bomb(characters.Bomber(), # dummy owner of the bomb\n position,\n constants.DEFAULT_BOMB_LIFE,\n agent_blast_strength_map[position],\n None)\n _bombs.append(bomb)\n \n # for each enemy position in the previous step\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n # consider the next possible position\n next_position = (x + dx, y + dy)\n if not self._on_board(next_position):\n # ignore if out of board\n continue\n if any([utility.position_is_passage(_board, next_position),\n utility.position_is_powerup(_board, next_position),\n# (next_position == my_position\n# and utility.position_is_agent(_board, next_position)\n# )\n ]):\n # possible as a next position\n # TODO : what to do with my position\n _board[next_position] = constants.Item.AgentDummy.value\n _enemy_positions.append(next_position)\n agent_blast_strength_map[next_position] \\\n = max([agent_blast_strength_map[next_position],\n agent_blast_strength_map[(x, y)]])\n enemy_positions = set(_enemy_positions)\n\n if t == step_to_collapse:\n _board, enemies, _bombs = self._collapse_board(_board, collapse_ring, enemies, _bombs)\n\n for f in _flames:\n _board[f.position] = constants.Item.Flames.value\n\n #if len(_agents) > 0:\n # if _agents[0] not in __agents:\n # # I get killed\n # my_action = None\n # my_next_position = my_position\n\n # accelerate collapsing: SIDE EFFECT\n #if collapse_ring < 3:\n # step_to_collapse += 3\n # collapse_ring += eisenachAgents\n \n _actions = [constants.Action.Stop.value] * 4\n _agents = list()#enemies\n\n list_boards.append(_board.copy())\n\n return list_boards, my_next_position\n\n # use tools.search_time_expanded_network for parallel processing\n def _search_time_expanded_network(self, list_boards, my_position,\n get_succ=False, get_subtree=False):\n\n \"\"\"\n Find survivable time-positions in the list of boards from my position\n\n Parameters\n ----------\n list_boards : list\n list of boards, generated by _board_sequence\n my_position : tuple\n my position, where the search starts\n\n Return\n ------\n survivable : list\n list of the set of survivable time-positions at each time\n survivable[t] : set of survivable positions at time t\n prev : list\n prev[t] : dict\n prev[t][position] : list of positions from which\n one can reach the position at time t\n succ : list\n succ[t] : dict\n succ[t][position] : list of positions to which\n one can reach the position at time t + eisenachAgents\n subtree : list\n subtree[t] : dict\n subtree[t][position] : set of time-positions that are the children of (t, position)\n \"\"\"\n\n if get_subtree:\n get_succ = True\n \n depth = len(list_boards)\n\n passable = [constants.Item.Passage,\n constants.Item.ExtraBomb,\n constants.Item.IncrRange,\n constants.Item.Kick]\n\n if list_boards[0][my_position] in [constants.Item.Flames.value, constants.Item.Rigid.value]:\n return [set()] * depth, [list()] * depth, [list()] * depth, [defaultdict(set)] * depth\n # Forward search for reachable positions\n # reachable[(t,x,y]): whether can reach (x,y) at time t\n reachable = np.full((depth,) + self.board_shape, False)\n reachable[(0,)+my_position] = True\n next_positions = set([my_position])\n survivable_time = 0\n last_reachable = next_positions # need copy?\n my_position_get_flame = False\n for t in range(1, depth):\n if list_boards[t][my_position] in [constants.Item.Flames.value,\n constants.Item.Rigid.value]: # collapse\n my_position_get_flame = True\n curr_positions = next_positions\n\n _next_positions = list()\n # add all possible positions\n for curr_position in curr_positions:\n _next_positions.append(curr_position)\n x, y = curr_position\n for row, col in [(0, 0), (-1, 0), (1, 0), (0, -1), (0, 1)]:\n _next_positions.append((x + row, y + col))\n\n next_positions = list()\n for position in set(_next_positions):\n if not self._on_board(position):\n # remove out of positions\n continue\n if any([position == my_position and not my_position_get_flame,\n utility.position_in_items(list_boards[t], position, passable)]):\n next_positions.append(position)\n\n for position in next_positions:\n reachable[(t,)+position] = True\n\n if len(next_positions):\n survivable_time = t\n last_reachable = next_positions # need copy?\n\n # Backward search for survivable positions\n # survivable[t]: set of survavable positions at time t\n # prev[t][position]: list of positions from which\n # one can reach the position at time t\n survivable = [set() for _ in range(depth)]\n prev = [defaultdict(list) for _ in range(depth+1)]\n if get_succ:\n succ = [defaultdict(list) for _ in range(depth)]\n else:\n succ = None\n survivable[survivable_time] = last_reachable\n for t in range(survivable_time, 0, -1):\n for position in survivable[t]:\n # for each position surviving at time t\n # if the position is on a bomb, I must have stayed there since I placed the bomb\n if list_boards[t][position] == constants.Item.Bomb.value:\n if reachable[(t-1,)+position]:\n prev[t][position].append(position)\n if get_succ:\n succ[t-1][position].append(position)\n continue\n\n # otherwise, standard case\n x, y = position\n for row, col in [(0, 0), (-1, 0), (1, 0), (0, -1), (0, 1)]:\n # consider the prev_position at time t - eisenachAgents\n prev_position = (x + row, y + col)\n if not self._on_board(prev_position):\n # discard the prev_position if out of board\n continue\n if reachable[(t-1,)+prev_position]:\n # can reach the position at time t\n # from the prev_position at time t-eisenachAgents\n prev[t][position].append(prev_position)\n if get_succ:\n succ[t-1][prev_position].append(position)\n\n # the set of prev_positions at time t-eisenachAgents\n # from which one can reach the surviving positions at time t\n survivable[t-1] = set([position for prevs in prev[t].values()\n for position in prevs])\n\n if get_subtree:\n subtree = [defaultdict(set) for _ in range(depth)]\n for position in survivable[depth-1]:\n subtree[depth-1][position] = {(depth-1, position)}\n for t in range(depth-2, -1, -1):\n for position in survivable[t]:\n list_of_set = [{(t,position)}] + [subtree[t+1][child] for child in succ[t][position]]\n subtree[t][position] = set().union(*list_of_set)\n else:\n subtree = None\n\n return survivable, prev, succ, subtree\n\n def _count_survivable(cls, succ, time, position):\n \"\"\"\n Count the number of survivable positions at each step, starting at \"position\" at \"time\"\n \"\"\"\n next_survivable = {position}\n info = [deepcopy(next_survivable)]\n n_survivable = [1] \n for t in range(time, len(succ) - 1):\n _next_survivable = []\n for pos in next_survivable:\n _next_survivable += succ[t][pos]\n next_survivable = set(_next_survivable)\n info.append(deepcopy(next_survivable))\n n_survivable.append(len(next_survivable)) \n return n_survivable, info\n\n def _get_survivable(self, obs, info, my_position, my_next_position, enemy_positions,\n kickable, allow_kick_to_fog=False,\n enemy_mobility=0, enemy_bomb=0, ignore_dying_agent=True,\n step_to_collapse=None,\n collapse_ring=None):\n\n # enemy positions over time\n # these might be dissappeared due to extra flames\n list_enemy_positions = list()\n if len(enemy_positions):\n rows = [p[0] for p in enemy_positions]\n cols = [p[1] for p in enemy_positions]\n list_enemy_positions.append((rows, cols))\n _enemy_positions = list()\n for t in range(enemy_mobility):\n rows, cols = list_enemy_positions[-1]\n for x, y in zip(rows, cols):\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n next_position = (x + dx, y + dy)\n if not self._on_board(next_position):\n continue\n _board = info[\"list_boards_no_move\"][t]\n if any([utility.position_is_passage(_board, next_position),\n utility.position_is_powerup(_board, next_position)]):\n _enemy_positions.append(next_position)\n _enemy_positions = set(_enemy_positions)\n rows = [p[0] for p in _enemy_positions]\n cols = [p[1] for p in _enemy_positions]\n list_enemy_positions.append((rows, cols))\n \n is_survivable = dict()\n for a in self._get_all_actions():\n is_survivable[a] = False\n n_survivable = dict()\n list_boards = dict()\n for my_action in self._get_all_actions():\n\n next_position = my_next_position[my_action]\n\n if next_position is None:\n continue\n\n if my_action == constants.Action.Bomb:\n if any([obs[\"ammo\"] == 0,\n obs[\"bomb_blast_strength\"][next_position] > 0]):\n continue\n \n if all([utility.position_is_flames(info[\"recently_seen\"], next_position),\n info[\"flame_life\"][next_position] > 1]): # If eisenachAgents, can move\n continue\n\n if all([my_action != constants.Action.Stop,\n obs[\"bomb_blast_strength\"][next_position] > 0,\n next_position not in kickable,\n info[\"moving_direction\"][next_position] is None]):\n continue\n\n if not allow_kick_to_fog and next_position in kickable:\n # do not kick into fog\n dx = next_position[0] - my_position[0]\n dy = next_position[1] - my_position[1]\n position = next_position\n is_fog = False\n while self._on_board(position):\n if utility.position_is_fog(info[\"recently_seen\"], position):\n is_fog = True\n break\n position = (position[0] + dx, position[1] + dy)\n if is_fog:\n continue\n \n # list of boards from next steps\n #backedup = False\n #if all([next_position in kickable,\n # info[\"recently_seen\"][next_position] != constants.Item.Bomb.value]):\n # backup_cell = info[\"recently_seen\"][next_position]\n # info[\"recently_seen\"][next_position] = constants.Item.Bomb.value # an agent will be overwritten\n # backedup = True\n \n list_boards[my_action], _ \\\n = self._board_sequence(info[\"recently_seen\"],\n info[\"curr_bombs\"],\n info[\"curr_flames\"],\n self._search_range,\n my_position,\n my_blast_strength=obs[\"blast_strength\"],\n my_action=my_action,\n can_kick=obs[\"can_kick\"],\n enemy_mobility=enemy_mobility,\n enemy_bomb=enemy_bomb,\n enemy_positions=enemy_positions,\n agent_blast_strength=info[\"agent_blast_strength\"],\n step_to_collapse=info[\"step_to_collapse\"],\n collapse_ring=info[\"collapse_ring\"])\n\n #if backedup:\n # info[\"recently_seen\"][next_position] = backup_cell\n \n # wood might be disappeared, because of overestimated bombs\n for t in range(len(list_boards[my_action])):\n if my_action == constants.Action.Bomb:\n if \"list_boards_no_move_my_bomb\" not in info:\n info[\"list_boards_no_move_my_bomb\"], _ \\\n = self._board_sequence(info[\"recently_seen\"],\n info[\"curr_bombs\"],\n info[\"curr_flames\"],\n self._search_range,\n my_position,\n my_blast_strength=obs[\"blast_strength\"],\n my_action=constants.Action.Bomb,\n can_kick=obs[\"can_kick\"],\n enemy_mobility=0,\n enemy_bomb=0,\n agent_blast_strength=info[\"agent_blast_strength\"],\n step_to_collapse=info[\"step_to_collapse\"],\n collapse_ring=info[\"collapse_ring\"]) \n wood_positions = np.where(info[\"list_boards_no_move_my_bomb\"][t] == constants.Item.Wood.value)\n else:\n wood_positions = np.where(info[\"list_boards_no_move\"][t] == constants.Item.Wood.value)\n list_boards[my_action][t][wood_positions] = constants.Item.Wood.value \n\n # dypmAgents might be disappeared, because of overestimated bombs\n for t, positions in enumerate(list_enemy_positions):\n list_boards[my_action][t][positions] = constants.Item.AgentDummy.value\n \n # some bombs may explode with extra bombs, leading to under estimation\n for t in range(len(list_boards[my_action])):\n flame_positions = np.where(info[\"list_boards_no_move\"][t] == constants.Item.Flames.value)\n list_boards[my_action][t][flame_positions] = constants.Item.Flames.value\n\n for my_action in list_boards:\n survivable = search_time_expanded_network(list_boards[my_action][1:],\n my_next_position[my_action])\n if step_to_collapse is not None:\n if step_to_collapse == 0:\n if list_boards[my_action][0][my_position] == constants.Item.Rigid.value:\n survivable[0].add(my_position) \n elif step_to_collapse < len(survivable):\n for position in survivable[step_to_collapse - 1]:\n if list_boards[my_action][step_to_collapse + 1][position] == constants.Item.Rigid.value:\n survivable[step_to_collapse].add(position)\n\n if len(survivable[-1]) == 0 and ignore_dying_agent:\n survivable = [set() for _ in range(len(survivable))]\n \n if my_next_position[my_action] in survivable[0]:\n is_survivable[my_action] = True\n n_survivable[my_action] = [1] + [len(s) for s in survivable[1:]]\n\n return n_survivable, is_survivable, list_boards\n \n def _find_reachable_items(self, list_boards, my_position, time_positions,\n bomb_target=None, might_powerup=None):\n\n \"\"\"\n Find items reachable from my position\n\n Parameters\n ----------\n list_boards : list\n list of boards, generated by _board_sequence\n my_position : tuple\n my position, where the search starts\n time_positions : list\n survivable time-positions, generated by _search_time_expanded_network\n\n Return\n ------\n items : dict\n items[item] : list of time-positions from which one can reach item\n reached : array\n minimum time to reach each position on the board\n next_to_items : dict\n next_to_items[item] : list of time-positions from which one can reach\n the position next to item\n \"\"\"\n\n if bomb_target is None:\n bomb_target = np.full(self.board_shape, False)\n\n if might_powerup is None:\n might_powerup = np.full(self.board_shape, False)\n\n # items found on time_positions and the boundary (for Wood)\n items = defaultdict(list)\n\n # reached[position] : minimum time to reach the position\n reached = np.full(self.board_shape, np.inf)\n\n # whether already checked the position\n _checked = np.full(self.board_shape, False)\n\n # positions next to wood or other dypmAgents (count twice if next to two woods)\n next_to_items = defaultdict(list)\n\n for t, positions in enumerate(time_positions):\n # check the positions reached at time t\n board = list_boards[t]\n for position in positions:\n if reached[position] < np.inf:\n continue\n reached[position] = t\n item = constants.Item(board[position])\n items[item].append((t,) + position)\n if bomb_target[position]:\n items[\"target\"].append((t,) + position) \n if might_powerup[position]:\n items[\"might_powerup\"].append((t,) + position)\n _checked[position] = True\n x, y = position\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n next_position = (x + row, y + col)\n if not self._on_board(next_position):\n continue\n if _checked[next_position]:\n continue\n _checked[next_position] = True\n if any([utility.position_is_agent(board, next_position),\n board[next_position] == constants.Item.Bomb.value,\n utility.position_is_fog(board, next_position)]):\n item = constants.Item(board[next_position])\n items[item].append((t,)+next_position)\n next_to_items[item].append((t,) + position)\n # ignoring wall that will not exist when explode\n if utility.position_is_wood(list_boards[-1], next_position):\n item = constants.Item(board[next_position])\n items[item].append((t,)+next_position)\n next_to_items[item].append((t,) + position)\n\n return items, reached, next_to_items\n\n def _get_survivable_actions(self, survivable, obs, info,\n enemy_mobility=0, enemy_bomb=0, enemy_positions=[],\n agent_blast_strength=dict(),\n step_to_collapse=None, collapse_ring=None):\n\n my_position = obs[\"position\"]\n my_blast_strength = obs[\"blast_strength\"]\n\n # is_survivable[action]: whether survivable with action\n is_survivable = defaultdict(bool)\n x, y = my_position\n\n if (x + 1, y) in survivable[1]:\n is_survivable[constants.Action.Down] = True\n\n if (x - 1, y) in survivable[1]:\n is_survivable[constants.Action.Up] = True\n\n if (x, y + 1) in survivable[1]:\n is_survivable[constants.Action.Right] = True\n\n if (x, y - 1) in survivable[1]:\n is_survivable[constants.Action.Left] = True\n\n if (x, y) in survivable[1]:\n is_survivable[constants.Action.Stop] = True\n\n # TODO : shoud check the survivability of all dypmAgents in one method\n\n # If I have at least one bomb, and no bomb in my position,\n # then consider what happens if I lay a bomb\n if all([obs[\"ammo\"] > 0, obs[\"bomb_life\"][my_position] == 0]):\n\n board_with_bomb = deepcopy(info[\"recently_seen\"])\n curr_bombs_with_bomb = deepcopy(info[\"curr_bombs\"])\n # lay a bomb\n board_with_bomb[my_position] = constants.Item.Bomb.value\n bomb = characters.Bomb(characters.Bomber(), # dummy owner of the bomb\n my_position,\n constants.DEFAULT_BOMB_LIFE,\n my_blast_strength,\n None)\n curr_bombs_with_bomb.append(bomb)\n list_boards_with_bomb, _ \\\n = self._board_sequence(board_with_bomb,\n curr_bombs_with_bomb,\n info[\"curr_flames\"],\n self._search_range,\n my_position,\n enemy_mobility=enemy_mobility,\n enemy_bomb=enemy_bomb,\n enemy_positions=enemy_positions,\n agent_blast_strength=agent_blast_strength,\n step_to_collapse=step_to_collapse,\n collapse_ring=collapse_ring)\n\n # some bombs may explode with extra bombs, leading to under estimation\n list_boards_with_bomb_no_move, _ \\\n = self._board_sequence(board_with_bomb,\n curr_bombs_with_bomb,\n info[\"curr_flames\"],\n self._search_range,\n my_position,\n enemy_mobility=0,\n step_to_collapse=step_to_collapse,\n collapse_ring=collapse_ring)\n\n for t in range(len(list_boards_with_bomb)):\n flame_positions = np.where(list_boards_with_bomb_no_move[t] == constants.Item.Flames.value)\n list_boards_with_bomb[t][flame_positions] = constants.Item.Flames.value\n\n survivable_with_bomb, prev_bomb, _, _ \\\n = self._search_time_expanded_network(list_boards_with_bomb[1:],\n my_position)\n\n if len(survivable_with_bomb[-1]) == 0:\n survivable_with_bomb = [set() for _ in range(len(survivable_with_bomb))]\n survivable_with_bomb = [{my_position}] + survivable_with_bomb\n\n if my_position in survivable_with_bomb[1]:\n is_survivable[constants.Action.Bomb] = True\n else:\n survivable_with_bomb = None\n list_boards_with_bomb = None\n\n return is_survivable, survivable_with_bomb\n\n def _kickable_positions(self, obs, is_bomb,\n moving_direction, consider_agents=True,\n kick_into_flames=True):\n\n \"\"\"\n Parameters\n ----------\n obs : dict\n pommerman observation\n \"\"\"\n\n if not obs[\"can_kick\"]:\n return set(), set()\n\n kickable = set()\n might_kickable = set()\n # my position\n x, y = obs[\"position\"]\n\n # Find neigoboring positions around me\n on_board_next_positions = list()\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n next_position = (x + dx, y + dy)\n if not self._on_board(next_position):\n continue\n if utility.position_is_wall(obs[\"board\"], next_position):\n continue\n on_board_next_positions.append(next_position)\n\n # Check if can kick a static bomb\n for next_position in on_board_next_positions:\n #if obs[\"board\"][next_position] != constants.Item.Bomb.value:\n if not is_bomb[next_position]:\n # not a bomb\n continue\n if moving_direction[next_position] is not None:\n if not self._get_next_position(next_position, moving_direction[next_position]) == obs[\"position\"]:\n # moving, and not moving toward me (then can be treated as static)\n continue\n #if obs[\"bomb_life\"][next_position] <= eisenachAgents:\n # # kick and die\n # continue\n following_position = (2 * next_position[0] - x,\n 2 * next_position[1] - y)\n if not self._on_board(following_position):\n # cannot kick to that direction\n continue\n if kick_into_flames and utility.position_is_flames(obs[\"board\"], following_position):\n kickable.add(next_position)\n continue\n if utility.position_is_agent(obs[\"board\"], following_position):\n # agent might move\n might_kickable.add(next_position)\n if not utility.position_is_passage(obs[\"board\"], following_position):\n # cannot kick to that direction\n continue\n might_blocked = False\n if consider_agents:\n # neighboring agent might block (or change the direction) immediately\n for dx, dy in [(-1, -1), (1, -1), (-1, 1), (1, 1)]:\n neighboring_position = (x + dx, y + dy)\n if not self._on_board(neighboring_position):\n continue\n if np.sum(np.abs(np.array(neighboring_position) - np.array(next_position))) != 1:\n continue\n if utility.position_is_agent(obs[\"board\"], neighboring_position):\n might_blocked = True\n break\n if might_blocked:\n might_kickable.add(next_position)\n continue\n for dx, dy in [(-1, -1), (1, -1), (-1, 1), (1, 1)]:\n neighboring_position = (next_position[0] + dx, next_position[1] + dy)\n if not self._on_board(neighboring_position):\n continue\n if np.sum(np.abs(np.array(neighboring_position) - np.array(following_position))) != 1:\n continue\n if utility.position_is_agent(obs[\"board\"], neighboring_position):\n might_blocked = True\n break\n if might_blocked:\n might_kickable.add(next_position)\n continue\n kickable.add(next_position)\n\n # Check if can kick a moving bomb\n for next_position in on_board_next_positions:\n if next_position in kickable:\n # can kick a static bomb\n continue\n x, y = next_position \n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n coming_position = (x + dx, y + dy)\n if coming_position == obs[\"position\"]:\n # cannot come from my position\n continue\n if not self._on_board(coming_position):\n # cannot come from out of board\n continue\n #if obs[\"bomb_life\"][coming_position] <= eisenachAgents:\n # # kick and die\n # continue\n if all([moving_direction[coming_position] == constants.Action.Up,\n dx == 1,\n dy == 0]):\n # coming from below\n kickable.add(next_position)\n break\n if all([moving_direction[coming_position] == constants.Action.Down,\n dx == -1,\n dy == 0]):\n # coming from above\n kickable.add(next_position)\n break\n if all([moving_direction[coming_position] == constants.Action.Right,\n dx == 0,\n dy == -1]):\n # coming from left\n kickable.add(next_position)\n break\n if all([moving_direction[coming_position] == constants.Action.Left,\n dx == 0,\n dy == 1]):\n kickable.add(next_position)\n # coming from right\n break\n\n return kickable, might_kickable\n \n @classmethod\n def _can_break(cls, board, my_position, blast_strength, what_to_break):\n\n \"\"\"\n Whether one cay break what_to_break by placing a bomb at my position\n\n Parameters\n ----------\n board : array\n board\n my_position : tuple\n where to place a bomb\n blast_strength : int\n strength of the bomb\n\n Return\n ------\n boolean\n True iff can break what_to_break by placing a bomb\n \"\"\"\n\n x, y = my_position\n # To down\n for dx in range(1, blast_strength):\n if x + dx >= len(board[0]):\n break\n position = (x + dx, y)\n for item in what_to_break:\n if utility._position_is_item(board, position, item):\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n # To up\n for dx in range(1, blast_strength):\n if x - dx < 0:\n break\n position = (x - dx, y)\n for item in what_to_break:\n if utility._position_is_item(board, position, item): \n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n # To right\n for dy in range(1, blast_strength):\n if y + dy >= len(board):\n break\n position = (x, y + dy)\n for item in what_to_break:\n if utility._position_is_item(board, position, item):\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n # To left\n for dy in range(1, blast_strength):\n if y - dy < 0:\n break\n position = (x, y - dy)\n for item in what_to_break:\n if utility._position_is_item(board, position, item):\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n return False\n\n @classmethod\n def _get_direction(cls, this_position, next_position):\n\n \"\"\"\n Direction from this position to next position\n\n Parameters\n ----------\n this_position : tuple\n this position\n next_position : tuple\n next position\n\n Return\n ------\n direction : constants.Item.Action\n \"\"\"\n if this_position == next_position:\n return constants.Action.Stop\n else:\n return utility.get_direction(this_position, next_position)\n\n @classmethod\n def _get_next_position(cls, position, action):\n \"\"\"\n Returns the next position\n \"\"\"\n x, y = position\n if action == constants.Action.Right:\n return (x, y + 1)\n elif action == constants.Action.Left:\n return (x, y - 1)\n elif action == constants.Action.Down:\n return (x + 1, y)\n elif action == constants.Action.Up:\n return (x - 1, y)\n else:\n return (x, y)\n\n \n @classmethod\n def _might_break_powerup(cls, board, my_position, blast_strength, might_powerup):\n\n \"\"\"\n Whether one might break a powerup by placing a bomb at my position\n\n Parameters\n ----------\n board : array\n board\n my_position : tuple\n where to place a bomb\n blast_strength : int\n strength of the bomb\n\n Return\n ------\n boolean\n True iff might break a powerup by placing a bomb\n \"\"\"\n\n x, y = my_position\n # To up\n for dx in range(1, blast_strength):\n if x + dx >= len(board[0]):\n break\n position = (x + dx, y)\n if utility.position_is_powerup(board, position) or might_powerup[position]:\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n # To down\n for dx in range(1, blast_strength):\n if x - dx < 0:\n break\n position = (x - dx, y)\n if utility.position_is_powerup(board, position) or might_powerup[position]:\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n # To right\n for dy in range(1, blast_strength):\n if y + dy >= len(board):\n break\n position = (x, y + dy)\n if utility.position_is_powerup(board, position) or might_powerup[position]:\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n # To left\n for dy in range(1, blast_strength):\n if y - dy < 0:\n break\n position = (x, y - dy)\n if utility.position_is_powerup(board, position) or might_powerup[position]:\n return True\n if not utility.position_is_passage(board, position):\n # stop searching this direction\n break\n return False\n\n def _get_breakable(self, board, my_position, blast_strength, target_item):\n\n \"\"\"\n For each position in board, count the number of woods that can be broken\n by placing a bomb with the given blast strength at that position\n \"\"\"\n\n n_breakable = np.zeros(board.shape)\n broken_by = defaultdict(list) # the bomb positions where each item will be broken\n to_break = defaultdict(list) # items that will be broken by the bomb at each positions\n\n reachable = np.full(board.shape, False)\n q = [my_position]\n while q:\n p = q.pop()\n if reachable[p]:\n continue\n else:\n reachable[p] = True\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n next_position = (p[0] + dx, p[1] + dy)\n if not self._on_board(next_position):\n continue\n if reachable[next_position]:\n continue\n if utility.position_is_wall(board, next_position):\n continue\n q.append(next_position) \n \n rows, cols = np.where(board == target_item.value)\n for wood_position in zip(rows, cols):\n x, y = wood_position\n for dx in range(1, min([blast_strength, board.shape[1] - x])):\n position = (x + dx, y)\n if reachable[position]:\n n_breakable[position] += 1\n broken_by[(x, y)].append(position)\n to_break[position].append((x, y))\n else:\n break\n for dx in range(1, min([blast_strength, x + 1])):\n position = (x - dx, y)\n if reachable[position]:\n n_breakable[position] += 1\n broken_by[(x, y)].append(position)\n to_break[position].append((x, y))\n else:\n break\n for dy in range(1, min([blast_strength, board.shape[1] - y])):\n position = (x, y + dy)\n if reachable[position]:\n n_breakable[position] += 1\n broken_by[(x, y)].append(position)\n to_break[position].append((x, y))\n else:\n break\n for dy in range(1, min([blast_strength, y + 1])):\n position = (x, y - dy)\n if reachable[position]:\n n_breakable[position] += 1\n broken_by[(x, y)].append(position)\n to_break[position].append((x, y))\n else:\n break\n \n return n_breakable, broken_by, to_break\n\n\n def _get_bomb_target(self, board, my_position, blast_strength, target_item, max_breakable=True): \n\n # the number of target_items that can be broken by placing a bomb at each position\n n_breakable, broken_by, to_break = self._get_breakable(board,\n my_position,\n blast_strength,\n target_item)\n\n if not max_breakable:\n return (n_breakable > 0), n_breakable\n \n target = np.full(board.shape, False)\n _n_breakable = deepcopy(n_breakable)\n covered_item = np.full(board.shape, False)\n \n count = np.max(_n_breakable)\n while count > 0:\n # highest count will be the target\n positions = (np.where(_n_breakable == count))\n target[positions] = True\n\n rows, cols = positions\n for bomb_position in zip(rows, cols):\n for item_position in to_break[bomb_position]:\n if covered_item[item_position]:\n continue\n for another_bomb_position in broken_by[item_position]:\n _n_breakable[another_bomb_position] -= 1\n covered_item[item_position] = True\n\n count = np.max(_n_breakable)\n\n return target, n_breakable\n\n def _get_frac_blocked(self, list_boards, my_enemies, board, bomb_life, ignore_dying_agent=True):\n\n blocked_time_positions = dict()\n n_nodes = defaultdict(int)\n for enemy in my_enemies:\n blocked_time_positions[enemy] = defaultdict(set)\n\n # get survivable tree of the enemy\n rows, cols = np.where(board==enemy.value)\n if len(rows) == 0:\n continue\n enemy_position = (rows[0], cols[0])\n survivable, _, _, subtree \\\n = self._search_time_expanded_network(list_boards,\n enemy_position,\n get_subtree=True)\n\n if len(survivable[-1]) == 0 and ignore_dying_agent:\n survivable = [set() for _ in range(len(survivable))]\n\n # time-positions that can be blocked by placing a bomb now\n all_positions = set().union(*[positions for positions in survivable])\n for position in all_positions:\n # do not consider the position that has a bomb now\n if bomb_life[position] > 0:\n continue\n blocked_time_positions[enemy][position] = set().union(*[s[position] for s in subtree])\n # EXLUDE THE ROOT NODE\n blocked_time_positions[enemy][position] -= {(0, enemy_position)}\n\n #n_nodes[enemy] = sum([len(positions) for positions in survivable])\n n_nodes[enemy] = sum([len(positions) for positions in survivable[1:]])\n\n positions = list()\n for enemy in my_enemies:\n positions.extend(list(blocked_time_positions[enemy]))\n \n # fraction of time-positions blocked by placing a bomb at each position\n frac_blocked = np.zeros(self.board_shape)\n for position in positions:\n n_blocked = sum([len(blocked_time_positions[enemy][position]) for enemy in my_enemies])\n n_all = sum([n_nodes[enemy] for enemy in my_enemies])\n if n_all == 0:\n frac_blocked[position] = 0\n else:\n frac_blocked[position] = n_blocked / n_all\n\n return frac_blocked, n_nodes, blocked_time_positions\n \n def _get_frac_blocked_two_lists(self, list_boards_with_action, n_survivable_nodes, board, agents,\n ignore_dying_agent=True):\n\n n_survivable_nodes_with_action = defaultdict(int)\n for agent in agents:\n rows, cols = np.where(board==agent.value)\n if len(rows) == 0:\n continue\n position = (rows[0], cols[0])\n _survivable = search_time_expanded_network(list_boards_with_action,\n position)\n\n if len(_survivable[-1]) == 0 and ignore_dying_agent:\n _survivable = [set() for _ in range(len(_survivable))]\n\n # exclude the root node\n n_survivable_nodes_with_action[agent] = sum([len(positions) for positions in _survivable[1:]])\n\n n_action = sum([n_survivable_nodes_with_action[agent] for agent in agents])\n n_base = sum([n_survivable_nodes[agent] for agent in agents])\n if n_base == 0:\n return 0\n else:\n return 1 - n_action / n_base\n \n \n def _get_reachable(cls, is_rigid):\n\n \"\"\"\n check if reachable to the main passage\n \"\"\"\n\n # check only the upper right triangular area\n # use symmetry to fill in the remaining\n\n reachable = np.full(is_rigid.shape, False)\n\n # set the outer most\n reachable[0, :] = ~is_rigid[0, :]\n reachable[:, -1] = ~is_rigid[:, -1]\n\n # set the three corner\n reachable[(0, 0)] = ~(is_rigid[(0, 1)] and is_rigid[(1,0)])\n reachable[(0, -1)] = ~(is_rigid[(0, -2)] and is_rigid[(1,-1)])\n reachable[(-1, -1)] = ~(is_rigid[(-1, -2)] and is_rigid[(-2,-1)])\n\n # set the main passage\n reachable[1, 1:-1] = True\n reachable[1:-1, -2] = True\n\n # set the inner area\n reachable[2, 2:-2] = ~is_rigid[2, 2:-2]\n reachable[2:-2, -3] = ~is_rigid[2:-2, -3]\n\n checked = np.full(is_rigid.shape, True)\n for i in range(3, is_rigid.shape[0] - 3):\n checked[i, i:-3] = False\n\n cols = np.where(reachable[2, 3:-3])[0] + 3\n rows = np.where(reachable[3:-3, -3])[0] + 3\n Q = [(2, c) for c in cols] + [(r, -3) for r in rows]\n while Q:\n q = Q.pop()\n for (dx, dy) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n position = (q[0] + dx, q[1] + dy)\n if checked[position]:\n continue\n checked[position] = True\n if is_rigid[position]:\n continue\n reachable[position] = True\n Q.append(position)\n\n # by symmetry\n reachable += reachable.T\n \n return reachable\n\n def _get_all_actions(cls):\n return [constants.Action.Bomb,\n constants.Action.Stop,\n constants.Action.Up,\n constants.Action.Down,\n constants.Action.Left,\n constants.Action.Right]\n\n def _get_digging_positions(cls, board, my_position, info):\n\n digging = None\n bomb_target = np.full(board.shape, False)\n \n if board[my_position] == constants.Item.Agent0.value:\n for n in [4, 5, 6]:\n if utility.position_is_wood(info[\"last_seen\"], (1, n)):\n bomb_target[(1, n-1)] = True\n digging = (1, n-1)\n break\n elif board[my_position] == constants.Item.Agent1.value:\n for m in [6, 5, 4]:\n if utility.position_is_wood(info[\"last_seen\"], (m, 1)):\n bomb_target[(m+1, 1)] = True\n digging = (m+1, 1)\n break\n elif board[my_position] == constants.Item.Agent2.value:\n for m in [6, 5, 4]:\n if utility.position_is_wood(info[\"last_seen\"], (m, 9)):\n bomb_target[(m+1, 9)] = True\n digging = (m+1, 9)\n break\n elif board[my_position] == constants.Item.Agent3.value:\n for n in [6, 5, 4]:\n if utility.position_is_wood(info[\"last_seen\"], (1, n)):\n bomb_target[(1, n+1)] = True\n digging = (1, n+1)\n break\n\n if digging is None:\n if board[my_position] == constants.Item.Agent0.value:\n if info[\"since_last_seen\"][(1, 10)] == np.inf:\n bomb_target[(1, 7)] = True\n digging = (1, 7)\n elif board[my_position] == constants.Item.Agent1.value:\n if info[\"since_last_seen\"][(0, 1)] == np.inf:\n bomb_target[(3, 1)] = True\n digging = (3, 1)\n elif board[my_position] == constants.Item.Agent2.value:\n if info[\"since_last_seen\"][(0, 9)] == np.inf:\n bomb_target[(3, 9)] = True\n digging = (3, 9)\n elif board[my_position] == constants.Item.Agent3.value:\n if info[\"since_last_seen\"][(1, 0)] == np.inf:\n bomb_target[(1, 3)] = True\n digging = (1, 3)\n\n return digging, bomb_target\n\n def _get_might_blocked(self, board, my_position, agent_positions, might_kickable):\n\n might_blocked_positions = np.full(board.shape, False)\n for x, y in agent_positions:\n might_blocked_positions[(x, y)] = True\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n next_position = (x + dx, y + dy)\n if not self._on_board(next_position):\n continue\n might_blocked_positions[next_position] = True\n\n # actions that might be blocked\n might_blocked = defaultdict(bool)\n for my_action in [constants.Action.Up,\n constants.Action.Down,\n constants.Action.Left,\n constants.Action.Right]:\n next_position = self._get_next_position(my_position, my_action)\n if not self._on_board(next_position):\n continue\n if any([next_position in might_kickable,\n might_blocked_positions[next_position]]):\n might_blocked[my_action] = True\n\n return might_blocked\n","repo_name":"isscproject/vote-agent","sub_path":"vote_agent/dypmAgents/base_agent.py","file_name":"base_agent.py","file_ext":"py","file_size_in_byte":74592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"41176632859","text":"s1=set()\r\nsize=int(input(\"Enter the size: \"))\r\nfor i in range(0,size):\r\n var=input(\"Enter Element: \")\r\n s1.add(var)\r\nprint(s1)\r\nupdate1=input(\"Do you want to update more elements in set y/n: \")\r\nif ((update1==\"y\")):\r\n sizeadd=int(input(\"Enter how many element you want to add: \"))\r\n print(\"enter element for updating set\")\r\n for i in range(0,sizeadd):\r\n var2=input(\"Enter element: \")\r\n #using add method\r\n #s1.add(var2) \r\n #usin update method\r\n s1.update([var2])\r\n print(s1)\r\nelse:\r\n print(\"Thank you...!\")\r\nprint(\"This is set: \",s1)\r\nrmv=input(\"Enter what you want to remove: \")\r\nif rmv in s1:\r\n s1.remove(rmv)\r\n print(s1)\r\nelse:\r\n print(\"using discard\")\r\n s1.discard(rmv)\r\n\r\n","repo_name":"mansinerkar-11/Python","sub_path":"set_oral.py","file_name":"set_oral.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15916276488","text":"import requests\n# import json\nfrom flask import Flask\n# import math\n\n\ndef get_currencies():\n r = requests.get('https://www.cbr-xml-daily.ru/daily_json.js')\n js = r.json()\n currencies = list(js['Valute'].values())\n return currencies\n\n\ndef create_html(cur):\n text = 'Курсы:
'\n text += ''\n for cur_i in cur:\n text += ''\n text += '| ' + cur_i['Name'] + '(*' + cur_i['CharCode'] + '*): | ' + str(cur_i['Value']) + \\\n ' руб | (' + str(round(cur_i['Previous'] - cur_i['Value'], 2)) + ') | '\n # text = text + curr['ID']\n text += '
'\n text += '
'\n return text\n\n\nprint('Lets Start 13')\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n currencies = get_currencies()\n return create_html(currencies)\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"alexvk1/py_lesson1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31609055980","text":"from keras.models import load_model, model_from_json\nfrom keras.preprocessing import image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import cm\nfrom keras import models\nfrom keras.applications.vgg16 import preprocess_input, decode_predictions\nimport keras.backend as K\nfrom keras.applications.vgg16 import preprocess_input\nfrom image_classification.directory_work import get_train_data\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\n\n\n\nimg_path = r'C:\\Users\\Pavel.Nistsiuk\\PycharmProjects\\people_class\\try.png'\nimg = image.load_img(img_path, target_size=(300, 300))\nimg_tensor = image.img_to_array(img)\nimg_tensor /= 255\n\n\njson_file = open('conv_noAugment_simplePrep_noBN.json')\nload_json = json_file.read()\nmodel = model_from_json(load_json)\nmodel.load_weights('conv_noAugment_simplePrep_noBN.h5')\nimg_tensor = img_tensor.reshape((1, 300, 300, 3))\n\nlayer_outputs = [layer.output for layer in model.layers[:8]]\n\nactivation_model = models.Model(inputs=model.input, outputs=layer_outputs)\n\nactivations = activation_model.predict(img_tensor)\nfirst_layer_activation = activations[0]\nprint(first_layer_activation.shape)\nfor i in range(32):\n plt.matshow(first_layer_activation[0, :, :, i], cmap='viridis')\n plt.savefig(r'C:\\Users\\Pavel.Nistsiuk\\PycharmProjects\\people_class\\\\vis_plots\\\\' + str(i) + '.jpg')\n plt.close()\n\nlayer_names = []\nfor layer in model.layers[:8]:\n layer_names.append(layer.name)\n\nimages_per_row = 16\n\n# Now let's display our feature maps\nfor layer_name, layer_activation in zip(layer_names, activations):\n # This is the number of features in the feature map\n n_features = layer_activation.shape[-1]\n\n # The feature map has shape (1, size, size, n_features)\n size = layer_activation.shape[1]\n\n # We will tile the activation channels in this matrix\n n_cols = n_features // images_per_row\n display_grid = np.zeros((size * n_cols, images_per_row * size))\n\n # We'll tile each filter into this big horizontal grid\n for col in range(n_cols):\n for row in range(images_per_row):\n channel_image = layer_activation[0,\n :, :,\n col * images_per_row + row]\n # Post-process the feature to make it visually palatable\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n display_grid[col * size: (col + 1) * size,\n row * size: (row + 1) * size] = channel_image\n\n # Display the grid\n scale = 1. / size\n plt.figure(figsize=(scale * display_grid.shape[1],\n scale * display_grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n plt.savefig(r'C:\\Users\\Pavel.Nistsiuk\\PycharmProjects\\people_class\\\\vis_plots\\\\' + layer_name + '.jpg')\n plt.close()\n\nplt.show()","repo_name":"NiP22/image_classification","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"74476548625","text":"from adsv.utils.types import *\nfrom adsv.semantic_model.lane_map import Lane, LaneId, SectionId\nfrom adsv.semantic_model.common.map_common import RJType, TurnType\n\n\nclass LaneCluster(metaclass=multimeta):\n def __init__(self, lanes: List[Lane]):\n self._lanes: List[Lane] = lanes\n self._lane_section_map: Dict[LaneId, int] = {lane.id: sid for sid, lane in enumerate(self._lanes)}\n self._section_lanes_map: Dict[int, FrozenSet[Lane]] = {sid: frozenset({lane}) for sid, lane in enumerate(self._lanes)}\n self._process_cluster()\n self.clusters = frozenset(self._section_lanes_map.values())\n\n def _process_cluster(self):\n flag = True\n while flag:\n flag = False\n\n for i in range(len(self._lanes)):\n for j in range(i + 1, len(self._lanes)):\n lane1 = self._lanes[i]\n lane2 = self._lanes[j]\n if self._lane_section_map[lane1.id] == self._lane_section_map[lane2.id]:\n continue\n\n if lane2.id == lane1.left_forward_neighbor_id:\n self._union_section(lane1.id, lane2.id)\n flag = True\n elif lane2.id == lane1.right_forward_neighbor_id:\n self._union_section(lane1.id, lane2.id)\n flag = True\n elif self._has_same_vertices(lane1.id, lane2.id) and \\\n lane1.rj_type == lane2.rj_type == RJType.JUNCTION and lane1.turn_type == lane2.turn_type:\n self._union_section(lane1.id, lane2.id)\n flag = True\n\n def _union_section(self, lane1_id: LaneId, lane2_id: LaneId):\n section1_id = self._lane_section_map[lane1_id]\n section2_id = self._lane_section_map[lane2_id]\n for lane in self._section_lanes_map[section2_id]:\n self._lane_section_map[lane.id] = section1_id\n self._section_lanes_map[section1_id] |= self._section_lanes_map[section2_id]\n self._section_lanes_map.pop(section2_id)\n\n def _has_same_vertices(self, lane1_id: 'LaneId', lane2_id: 'LaneId') -> bool:\n section1_id = self._lane_section_map[lane1_id]\n section2_id = self._lane_section_map[lane2_id]\n return self._get_section_predecessors_id(section1_id) == self._get_section_predecessors_id(section2_id) and \\\n self._get_section_successors_id(section1_id) == self._get_section_successors_id(section2_id)\n\n def _get_section_predecessors_id(self, sec_id: int) -> Set[int]:\n ret = set()\n for lane in self._section_lanes_map[sec_id]:\n for pred_lane_id in lane.predecessors_id:\n pred_section_id = self._lane_section_map[pred_lane_id]\n ret.add(pred_section_id)\n return ret\n\n def _get_section_successors_id(self, sec_id: int) -> Set[int]:\n ret = set()\n for lane in self._section_lanes_map[sec_id]:\n for suc_lane_id in lane.successors_id:\n suc_section = self._lane_section_map[suc_lane_id]\n ret.add(suc_section)\n return ret\n","repo_name":"LIIHWF/RvADS","sub_path":"adsv/semantic_model/lane_map/adapter/lane_cluster.py","file_name":"lane_cluster.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"3748564660","text":"class Solution:\n # BOTTOM UP\n def bottomUp(self, s: str, p: str) -> bool:\n pass\n\n # TOP DOWN MEMO\n def topDown(self, s: str, p: str) -> bool:\n cache = {}\n def dfs(i, j):\n # base cases\n if (i, j) in cache:\n return cache[(i, j)]\n if i >= len(s) and j >= len(p):\n # we've matched the string to the pattern\n return True\n if j >= len(p):\n # we can't match a string with a larger pattern\n return False\n\n # establish a match\n match = i < len(s) and (s[i] == p[j] or p[j] == '.')\n # is the next pattern a star?\n if (j + 1) < len(p) and p[j + 1] == '*':\n # 1) use # 2) don't use *\n \n cache[(i, j)] = dfs(i, j + 2) or (match and dfs(i + 1, j))\n return cache[(i, j)]\n\n # if no star, check simple match\n if match:\n cache[(i, j)] = dfs(i + 1, j + 1)\n return cache[(i, j)]\n\n cache[(i, j)] = False\n return cache[(i, j)]\n\n return dfs(0, 0)\n\n\nif __name__ == '__main__':\n s = 'abc'\n p = 'a..' # true\n # p = 'a*' # true\n # p = '.b' # false\n sol = Solution()\n print(sol.topDown(s, p))","repo_name":"jprice8/interview-prep","sub_path":"dynamic-programming/regularExpressionMatching.py","file_name":"regularExpressionMatching.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"40223686835","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import (\n\tpost_list,\n\tpost_create,\n\tpost_detail,\n\tpost_update,\n\tpost_delete,\n\tPostDetailView,\n\tboard_list,\n\tboard_create,\n\tstart_page,\n\tlink,\n\tdonate,\n\tcontact,\n\tadmin\n\t#board_update,\n\t#board_delete,\n\t#board_detail,\n\t#BoardDetailView\n\t)\n\nurlpatterns = [\n\turl(r'^home/$', start_page,name='home'),\n\turl(r'^link/$', link,name='link'),\n\turl(r'^donate/$', donate,name='donate'),\n\turl(r'^contact/$', contact,name='contact'),\n\turl(r'^admin/$', admin,name='admin'),\n\t#url(r'^board/$', board_list, name='board'),\n\turl(r'^blogs/$', post_list, name='list'),\n url(r'^blogs/create/$', post_create),\n #url(r'^board/create/$', board_create),\n #url(r'^(?P[\\w-]+)/$', post_detail, name='detail'),\n #url(r'^(?P[\\w-]+)/$', board_detail, name='detail'),\n #url(r'^(?P[\\w-]+)/board/$', BoardDetailView.as_view(), name='detail'),\n url(r'^(?P[\\w-]+)/$', PostDetailView.as_view(), name='detail'), #Django Code Review #3 on joincfe.com/youtube/\n url(r'^(?P[\\w-]+)/edit/$', post_update, name='update'),\n #url(r'^(?P[\\w-]+)/board/edit/$', board_update, name='board_update'),\n url(r'^(?P[\\w-]+)/delete/$', post_delete, name='deletepost'),\n #url(r'^(?P[\\w-]+)/board/delete/$', board_delete),\n #url(r'^posts/$', \".views.\"),\n]\n","repo_name":"Ishita-pahwa/NoExcuseHunting","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"12769202509","text":"import os\nimport sys\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nimport glob\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker as tk\nfrom pprint import pprint as pp\nfrom sklearn.externals import joblib as jl\nfrom itertools import product,combinations\nfrom plot_util import customaxis\n\n\n# plot the profit and quantity sold for each seller \n############################################################\ndef make_ax_profit_quantity(pd_profit, pd_quantity_sold, a_marker, a_color,\n num_sellers, gamma, fontsize=20, x_ticks=None, ax=None):\n color = a_color[0]\n pd_data = pd_profit\n if x_ticks is None:\n x_ticks = range(len(pd_data))\n x_ticks = np.round(x_ticks, 2)\n for i in range(num_sellers):\n ax.plot(x_ticks, pd_data[i], '-', marker=a_marker[i],\n markerfacecolor='none', color=color, label = 'Firm %d'%(i+1))\n ax.set_xlim(min(x_ticks), max(x_ticks))\n ax.set_xticks(x_ticks)\n customaxis(ax = ax, position = 'left', color = color, label = 'Profit',\n scale = 'linear', size = fontsize, full_nrs = False, location = 0.0)\n ax.get_xaxis().set_major_formatter(tk.FuncFormatter(lambda x, p:\n format(x,',')))\n ax.legend()\n leg = ax.get_legend()\n (leg.legendHandles[i].set_color('black') for i in range(num_sellers))\n ax.set_title(\"$\\gamma$ = {} and Firm 1's initial cost is 99\\% of Firm 2's\".format(gamma),\n fontsize=fontsize +5)\n# Quantity\n axt = ax.twinx()\n color = a_color[1]\n pd_data = pd_quantity_sold\n for i in range(num_sellers):\n axt.plot(x_ticks, pd_data[i], '-', marker=a_marker[i],\n markerfacecolor='none', color=color)\n axt.grid(False)\n customaxis(ax = axt, position = 'right', color = color, label = 'Quantity',\n scale = 'linear', size = fontsize, full_nrs = False, location = 1.0)\n\n# plot the profit and quantity sold for each seller \n############################################################\ndef make_ax_profit_quantity_share(pd_profit, pd_quantity_sold, a_marker,\n a_color, num_sellers, gamma, fontsize=20, x_ticks=None, ax=None):\n # FIXME: x_ticks\n color = a_color[0]\n pd_data = pd_profit.div(pd_profit.sum(1), 0)\n print('HIHIHI')\n print(pd_profit)\n print(pd_data)\n if x_ticks is None:\n x_ticks = range(len(pd_data))\n x_ticks = np.round(x_ticks, 2)\n for i in range(num_sellers):\n ax.plot(x_ticks, pd_data[i], '-', marker=a_marker[i],\n markerfacecolor='none', color=color, label = 'Firm %d'%(i+1))\n ax.set_xlim(min(x_ticks), max(x_ticks))\n ax.set_xticks(x_ticks)\n customaxis(ax = ax, position = 'left', color = color, label = 'Profit Share',\n scale = 'linear', size = fontsize, full_nrs = False, location = 0.0)\n ax.get_xaxis().set_major_formatter(tk.FuncFormatter(lambda x, p:\n format(x,',')))\n ax.legend()\n leg = ax.get_legend()\n (leg.legendHandles[i].set_color('black') for i in range(num_sellers))\n# Quantity\n axt = ax.twinx()\n color = a_color[1]\n pd_data = pd_quantity_sold.div(pd_quantity_sold.sum(1), 0)\n print('BYBYBY')\n print(pd_quantity_sold)\n print(pd_data)\n for i in range(num_sellers):\n axt.plot(x_ticks, pd_data[i], '-', marker=a_marker[i],\n markerfacecolor='none', color=color)\n axt.grid(False)\n customaxis(ax = axt, position = 'right', color = color, label = 'Quantity Share',\n scale = 'linear', size = fontsize, full_nrs = False, location = 1.0)\n\n# plot the cost and price\n############################################################\ndef make_ax_cost_price(pd_cost, pd_price, pd_cournot, a_marker, a_color,\n num_sellers, gamma, fontsize = 20, x_ticks=None, ax = None):\n# for the left y axis of axis:\n pd_data = pd_price\n color = a_color[0]\n if x_ticks is None:\n x_ticks = range(len(pd_data))\n for i in range(num_sellers):\n ax.plot(x_ticks, pd_data[i], color=color, marker = a_marker[i],\n markerfacecolor='none', label='')\n if gamma == 0:\n ax.plot(x_ticks, pd_cournot, color=color, linestyle='dashed',\n label='Theoretical Cournot Price')\n ax.legend(loc='lower left')\n ax.spines['left'].set_color(color)\n ax.tick_params(axis='y', color=color)\n [i.set_color(color) for i in ax.get_yticklabels()]\n ax.yaxis.set_label_position(\"left\")\n ax.set_ylabel('Price', color=color, fontsize=fontsize)\n ax.set_xlim(min(x_ticks), max(x_ticks))\n# for the 'right' axis it is similar\n axt = ax.twinx()\n pd_data = pd_cost\n color = a_color[1]\n for i in range(num_sellers):\n axt.plot(x_ticks, pd_data[i], marker= a_marker[i],\n markerfacecolor='none', color=color)\n axt.spines['right'].set_color(color)\n axt.tick_params(axis='y', color=color)\n [i.set_color(color) for i in axt.get_yticklabels()]\n axt.yaxis.set_label_position(\"right\")\n axt.set_ylabel('Cost', color=color, fontsize=fontsize)\n axt.set_ylim(0, 110)\n axt.grid(False)\n return ax\n\ndef make_column(fn, ax_profit_quantity, ax_profit_quantity_share,\n ax_cost_price, x_ticks_name=None):\n print(fn)\n d_load = jl.load(fn)\n# Data from d_load\n##########################################################\n print(d_load)\n scalar_tax = d_load['scalar_tax'][0]\n gamma = d_load['gamma'][0]\n endowment = d_load['endowment'][0]\n num_sellers = int(d_load['num_sellers'][0])\n num_buyers = int(d_load['num_buyers'][0])\n m_tax = d_load['m_tax'][0]\n pd_quantity = pd.DataFrame(d_load['a_quantity_nash'])\n pd_quantity_sold = pd.DataFrame(d_load['a_quantity_sold'])\n pd_price = pd.DataFrame(d_load['a_price_nash'])\n pd_cost = pd.DataFrame(np.array(d_load['a_cost']))\n pd_profit = pd.DataFrame(d_load['a_profit'])\n# x_ticks stuff\n x_ticks = range(len(pd_profit))\n if x_ticks_name is not None:\n print(x_ticks_name)\n x_ticks = d_load[x_ticks_name]\n x_ticks = np.round(x_ticks, 3)\n print(x_ticks)\n# Data from calculations\n pd_quantity_unsold = pd_quantity - pd_quantity_sold\n# Cournot price at Nash quantity (A + n\\bar c)/(n+1)\n pd_cournot = (endowment + pd_cost.sum(1))/(num_sellers+1)\n num_timesteps = len(d_load['a_cost']) - 1\n#check for if the stuff ended early\n if np.isnan(d_load['a_profit'][num_timesteps][0]):\n num_timesteps = num_timesteps - 1\n# Stuff for plotting\n a_marker = ['s', 'x', '*', 'o', 'D']\n a_color = sb.color_palette(\"deep\", 6)\n# Plots Here\n make_ax_profit_quantity(pd_profit, pd_quantity_sold, a_marker,\n a_color[0:2], num_sellers, gamma, x_ticks=x_ticks,\n ax=ax_profit_quantity)\n make_ax_profit_quantity_share(pd_profit, pd_quantity_sold, a_marker,\n a_color[2:4], num_sellers, gamma, x_ticks=x_ticks,\n ax=ax_profit_quantity_share)\n print(5)\n make_ax_cost_price(pd_cost, pd_price, pd_cournot, a_marker, a_color[4:6],\n num_sellers, gamma, x_ticks=x_ticks, ax=ax_cost_price)\n\ndef write_time_plot_from_file(fn, fn_out, folder = None):\n print(fn)\n sb.set_style(\"darkgrid\")\n# set figure size\n##########################################################\n nrow = 3\n ncol = 1\n width_scale = 12\n height_scale = 4\n figsize = (width_scale*ncol,height_scale*nrow)\n fontsize = 14\n# create the plot window\n##########################################################\n fig = plt.figure(figsize=figsize)\n fig.subplots_adjust(hspace=.3, wspace=0.3)\n plt.rc('text', usetex=True)\n# set all axes instances\n##########################################################\n ax_profit_quantity = plt.subplot2grid((nrow,ncol), (0,0))\n ax_profit_quantity_share = plt.subplot2grid((nrow,ncol), (1,0), sharex=ax_profit_quantity)\n ax_cost_price = plt.subplot2grid((nrow,ncol), (2,0), sharex=ax_profit_quantity)\n# annotate\n##########################################################\n a_ax = fig.get_axes()\n a_annote = ['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)']\n for (ax, annote) in zip(a_ax, a_annote[:len(a_ax)]):\n ax.annotate(annote,\n xy = (-0.12, 0.96),\n xycoords = 'axes fraction',\n fontsize = 12,\n ha = 'left',\n va = 'top' )\n make_column(fn, ax_profit_quantity, ax_profit_quantity_share, ax_cost_price, 'scalar_tax')\n# write figure to the output \n############################################################\n fn_out = 'plot_' + fn_out\n out_folder = './output/plots/'\n if not os.path.exists(out_folder): os.makedirs(out_folder)\n plt.savefig(out_folder + fn_out + '.png', bbox_inches='tight')\n\n# write figure to the screen (must go after file creation or doesn't write\n# correctly) \n############################################################\n #plt.show()\n\n##############\n### SCRIPT ###\n##############\n\nif __name__ == \"__main__\":\n a_fn_out = np.array(['turn_gamma=1.0_endow=120.0_taxmethod=cardinal_seed={}.pickle'.format(i)\n for i in range(1,100)])\n folder1 = '/home/nate/Documents/abmcournotmodel/code/output/data/'\n folder2 = '/cluster/home/slera//abmcournotmodel/code/output/data/'\n folder3 = \"C:/Users/CAREBEARSTARE3_USER/Documents/WORK/MITInternship/ModelWithSandro/abmcournotmodel/code/output/data/\"\n folder = folder3\n a_fn = glob.glob(folder + \"turn*\")\n write_time_plot_from_file(folder + 'mean_turn1.pkl', 'mean_turn2')\n print('asdfasdfasfdas')\n [write_time_plot_from_file(a_fn[i], a_fn_out[i]) for i in range(len(a_fn))]\n","repo_name":"nate9799/KrepsScheinkmanModel","sub_path":"code/time_plots.py","file_name":"time_plots.py","file_ext":"py","file_size_in_byte":9781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"18945200347","text":"from setuptools import setup\nfrom pathlib import Path\n\n\nthis_directory = Path(__file__).parent.absolute()\nreadme = this_directory/Path('README.md')\nwith readme.open('r') as f:\n long_description = f.read()\n\nsetup(name='wallsch',\n version='0.6',\n description='A simple wallpaper changer/scheduler with night/day split',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Blazej Sewera',\n url='https://github.com/jazzsewera/wallsch',\n license='MPL2',\n packages=['wallsch'],\n install_requires=[\n 'apscheduler',\n 'suntime',\n 'tzlocal',\n 'pyro4'\n ],\n entry_points={\n 'console_scripts': [\n 'wallschd = wallsch.wallschd:main',\n 'wallschctl = wallsch.wallschctl:main'\n ]\n })\n","repo_name":"blazejsewera/wallsch","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"16014977971","text":"from typing import List\n\n\n# 关于判断条件以及答案添加,整体思路没问题。\n\ndef solveNQueens(self, n: int) -> List[List[str]]:\n if n == 1:\n return [['Q']]\n\n def isIllegal(row: int, col: int) -> bool:\n # 行与列判断\n if 'Q' in temp[row]:\n return True\n for i in range(0, row):\n if temp[i][col] == 'Q':\n return True\n # 左上\n temp_row, temp_col = row - 1, col - 1\n while temp_col >= 0 and temp_row >= 0:\n if temp[temp_row][temp_col] == 'Q':\n return True\n temp_col -= 1\n temp_row -= 1\n # 右上\n temp_row, temp_col = row - 1, col + 1\n while temp_col < n and temp_row >= 0:\n if temp[temp_row][temp_col] == 'Q':\n return True\n temp_row -= 1\n temp_col += 1\n\n return False\n\n def dfs(row: int):\n if row == n:\n # 添加答案\n temp_ans = [''] * n\n i = 0\n for line in temp:\n temp_ans_line = ''\n for char in line:\n temp_ans_line += char\n temp_ans[i] = temp_ans_line\n i += 1\n ans.append(temp_ans)\n return\n\n for col in range(0, n):\n if isIllegal(row, col):\n continue\n temp[row][col] = 'Q'\n dfs(row + 1)\n temp[row][col] = '.'\n\n temp = [['.' for _ in range(0, n)] for _ in range(0, n)]\n ans = []\n dfs(0)\n return ans\n","repo_name":"Dirtytrii/leetcodePython","sub_path":"蓝桥杯备赛/复习/动态规划/n皇后.py","file_name":"n皇后.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"3034692321","text":"import sys\nimport argparse\nimport os\nimport json\nimport subprocess\nfrom collections import defaultdict\nsys.path.append('/sw/arc/centos7/gurobi/gurobi652/linux64/lib/python2.7')\nsys.path.append('/sw/arc/centos7/gurobi/gurobi652/linux64/lib/python2.7/')\nsys.path.append('/sw/arc/centos7/gurobi/gurobi652/linux64/lib/python2.7/gurobipy')\n\nimport gurobipy as grb\n\nimport make_mip\n\n__author__ = \"Byron Tasseff, Connor Riley\"\n__credits__ = [\"Byron Tasseff\", \"Connor Riley\"]\n__license__ = \"MIT\"\n__version__ = \"0.0.6\"\n__maintainer__ = \"Connor Riley\"\n__email__ = \"\"\n__status__ = \"Development\"\n\n\ndef main(input_folder, output_folder, use_mixed_on_pure):\n # make sure the json files needed for input exist, if not write them\n write_inputs(use_mixed_on_pure)\n # initalize the files to store the results\n (result_files, output_path) = initialize_results_files(input_folder, output_folder, use_mixed_on_pure)\n results_store = {}\n results_store[\"num_solved\"] = {}\n num_vars = 0\n results_store[\"num_cuts\"] = defaultdict(list)\n #subdirs = get_immediate_subdirectories(input_folder)\n #for subdir in subdirs:\n # full_subdir = input_folder + \"/\" + subdir\n onlyfiles = [f for f in os.listdir(input_folder) if os.path.isfile(os.path.join(input_folder, f))]\n for j, f in enumerate(onlyfiles):\n full_path = input_folder + \"/\" + f\n (objective, num_vars, gurobi_failed) = run_gurobi(full_path)\n instance_output_path = make_output_path(output_path, f)\n if not os.path.exists(instance_output_path):\n os.makedirs(instance_output_path)\n run_gomory(full_path, instance_output_path, use_mixed_on_pure)\n process_results(instance_output_path, result_files, objective,\n results_store, use_mixed_on_pure, j, gurobi_failed)\n write_results_store(results_store, output_path, num_vars)\n return 0\n\n\ndef write_results_store(results_store, folder, num_vars):\n for method in results_store[\"num_cuts\"]:\n a = results_store[\"num_cuts\"][method]\n avg = sum(a) / float(len(a))\n path = folder + \"/\" + \"avg_cuts.csv\"\n write_average_cuts(avg, path, method)\n num_solved_store = results_store[\"num_solved\"]\n for solve_type in num_solved_store:\n path = get_bar_graph_path(solve_type, folder)\n write_bar_graph_data(path, num_solved_store, solve_type, num_vars)\n return 0\n\n\ndef write_average_cuts(avg, path, method):\n data_to_write = method + \",\" + str(avg) + \"\\n\"\n if not os.path.exists(path):\n f = open(path, 'w')\n f.write('type,avg\\n')\n f.close()\n f = open(path, \"a\")\n f.write(data_to_write)\n f.close()\n\n\ndef process_results(output_path, result_files, actual_objective, results_store, \n use_mixed_on_pure, j, gurobi_failed):\n methods = [\"naive\", \"lex\", \"rounds\", \"purging\", \"rounds_purging\", \n \"lex_rounds\", \"lex_purging\", \"lex_rounds_purging\"]\n last_lines = []\n if use_mixed_on_pure:\n methods.extend([\"naive_mixed\", \"lex_mixed\", \"rounds_mixed\", \n \"purging_mixed\", \"rounds_purging_mixed\", \"lex_rounds_mixed\", \n \"lex_purging_mixed\", \"lex_rounds_purging_mixed\"])\n for i, method in enumerate(methods):\n stats = get_stats(output_path + \"/\" + method + \".txt\", actual_objective, \n True, gurobi_failed)\n num_cuts = int(stats[0])\n if method not in results_store[\"num_solved\"].keys():\n results_store[\"num_solved\"][method] = 0\n if stats[-1] == True : results_store[\"num_solved\"][method] += 1\n gap = None\n if num_cuts < 10000 and stats[-1] == True and not gurobi_failed:\n results_store[\"num_cuts\"][method].append(num_cuts)\n obj = float(stats[3])\n gap = obj - actual_objective\n elif not gurobi_failed:\n stats = get_stats(output_path + \"/\" + method + \".txt\", \n actual_objective, False, gurobi_failed)\n obj = float(stats[3])\n gap = obj - actual_objective\n elif gurobi_failed:\n results_store[\"num_cuts\"][method].append(num_cuts)\n obj = float(stats[3])\n gap = 0\n statsnew = []\n for el in stats:\n statsnew.append(el)\n statsnew.append(gap)\n statsnew.append(gurobi_failed)\n last_lines.append(statsnew)\n for i, path in enumerate(result_files):\n write_data_line(path, last_lines[i], j)\n return 0\n\n\ndef make_output_path(output_path, file_name):\n split = file_name.split(\".\")\n return output_path + \"/\" + split[0]\n\n\ndef run_gurobi(file_path):\n fout = open(\"out.txt\",\"w\")\n rc = subprocess.check_call([\"gurobi_cl\", \"ResultFile=\" + \n \"gurobi_solution.sol\", file_path], stdout=fout)\n fout.close()\n gurobi_failed = False\n if ('Warning: cleanup yields a better optimal solution due to numeric instability' in open('out.txt').read() and \n '(model may be infeasible or unbounded - try turning presolve off)' in open('out.txt').read()):\n gurobi_failed = True \n with open(\"gurobi_solution.sol\") as f:\n content = f.readlines()\n obj = content[0].split(\"=\")[1]\n num_vars = len(content) - 1\n return (float(obj), num_vars, gurobi_failed)\n\n\ndef run_gomory(input_path, output_path, use_mixed_on_pure):\n rc = subprocess.check_call([\"./RUN.sh\", input_path, output_path])\n if use_mixed_on_pure:\n rc = subprocess.check_call([\"./RUN_MIXED.sh\", input_path, output_path])\n return 0\n\n\ndef get_stats(filepath, actual_objective, b, gurobi_failed):\n with open(filepath, \"r\") as f:\n lines = f.read().splitlines()\n max_det = 0\n for i,line in enumerate(lines):\n if i == 0:\n continue\n split_line = line.split(\",\")\n det = split_line[2]\n if abs(float(det)) > max_det:\n max_det = abs(float(det))\n if b:\n last_line_split = lines[-1].split(\",\")\n else:\n last_line_split = lines[-2].split(\",\")\n obj = last_line_split[3]\n num_cuts = last_line_split[0]\n num_constr = last_line_split[1]\n achieved_solution = test_for_solution(num_cuts, obj, actual_objective, \n gurobi_failed)\n return(num_cuts, num_constr, max_det, obj, achieved_solution) \n\n\ndef test_for_solution(num_cuts, obj, actual_objective, gurobi_failed):\n print(\"Actual Objective: \" + str(actual_objective))\n print(\"Num Cuts: \" + str(num_cuts))\n if (gurobi_failed and type(num_cuts) == type(\"c\") or \n gurobi_failed and int(num_cuts) < 2499):\n return True\n test = abs(float(obj) - float(actual_objective))\n if test < .00001 and int(num_cuts) < 2499:\n return True\n return False\n\n\ndef write_bar_graph_data(bar_path, results_store, solve_method, num_vars):\n data_to_write = str(num_vars) + \",\" + str(results_store[solve_method]) + \"\\n\"\n if not os.path.exists(bar_path):\n f = open(bar_path, 'w')\n f.write('num_starting_vars,num_finished\\n')\n f.close()\n f = open(bar_path, \"a\")\n f.write(data_to_write)\n f.close()\n\n\ndef get_bar_graph_path(solve_method, folder):\n return folder + \"/\" + \"bar_graph_\" + solve_method + \".csv\"\n\n\ndef initialize_results_files(input_folder, output_folder, use_mixed_on_pure):\n output_path = output_folder + \"/\" + input_folder.split(\"/\")[-1]\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n results_file_path_naive = output_path + \"/results_naive.csv\"\n results_file_path_lex = output_path + \"/results_lex.csv\"\n results_file_path_rounds = output_path + \"/results_rounds.csv\"\n results_file_path_purging = output_path + \"/results_purging.csv\"\n results_file_path_rounds_purging = output_path + \"/results_rounds_purging.csv\"\n results_file_path_lex_rounds = output_path + \"/results_lex_rounds.csv\"\n results_file_path_lex_purging = output_path + \"/results_lex_purging.csv\"\n results_file_path_lex_rounds_purging = output_path + \"/results_lex_rounds_purging.csv\" \n filepaths = [results_file_path_naive, results_file_path_lex, \n results_file_path_rounds, results_file_path_purging, \n results_file_path_rounds_purging, results_file_path_lex_rounds,\n results_file_path_lex_purging, results_file_path_lex_rounds_purging]\n if use_mixed_on_pure:\n path1 = output_path + \"/results_naive_mixed.csv\"\n path2 = output_path + \"/results_lex_mixed.csv\"\n path3 = output_path + \"/results_rounds_mixed.csv\"\n path4 = output_path + \"/results_purging_mixed.csv\"\n path5 = output_path + \"/results_rounds_purging_mixed.csv\"\n path6 = output_path + \"/results_lex_rounds_mixed.csv\"\n path7 = output_path + \"/results_lex_purging_mixed.csv\"\n path8 = output_path + \"/results_lex_rounds_purging_mixed.csv\" \n filepaths.extend([path1, path2, path3, path4, path5, path6, path7, path8])\n create_results_files(filepaths)\n return (filepaths, output_path)\n\n\ndef create_results_files(file_array):\n for fn in file_array:\n f = open(fn, 'w')\n f.write('problem_num, num_cuts,num_constr,det,obj,solved,gap,gurobi_failed\\n')\n f.close()\n return 0\n\n\ndef write_data_line(filepath, line, j):\n line_to_write = str(str(j) + \",\" + line[0]) + \",\" + str(line[1]) + \",\" + str(\n line[2]) + \",\" + str(line[3]) + \",\" + str(line[4])+ \",\" + str(\n line[5]) + \",\" + str(line[6]) + \"\\n\"\n f = open(filepath, 'a')\n f.write(line_to_write)\n f.close()\n\n\ndef write_inputs(use_mixed_on_pure):\n write_input(folder, False, False, False, False, \"naive\")\n write_input(folder, True, False, False, False, \"rounds\")\n write_input(folder, False, True, False, False, \"lex\")\n write_input(folder, False, False, True, False, \"purging\")\n write_input(folder, True, True, False, False, \"lex_rounds\")\n write_input(folder, False, True, True, False, \"lex_purging\")\n write_input(folder, True, False, True, False, \"rounds_purging\")\n write_input(folder, True, True, True, False, \"lex_rounds_purging\")\n if use_mixed_on_pure:\n write_input(folder, False, False, False, True, \"naive_mixed\")\n write_input(folder, True, False, False, True, \"rounds_mixed\")\n write_input(folder, False, True, False, True, \"lex_mixed\")\n write_input(folder, False, False, True, True, \"purging_mixed\")\n write_input(folder, True, True, False, True, \"lex_rounds_mixed\")\n write_input(folder, False, True, True, True, \"lex_purging_mixed\")\n write_input(folder, True, False, True, True, \"rounds_purging_mixed\")\n write_input(folder, True, True, True, True, \"lex_rounds_purging_mixed\")\n\n\ndef write_input(folder, rounds, lex, purging, mixed, name):\n d = {}\n d[\"parameters\"] = {}\n d[\"parameters\"][\"maxCuts\"] = 2500\n d[\"parameters\"][\"awayEpsilon\"] = .01\n d[\"parameters\"][\"purgeEpsilon\"] = 1.0e-9\n d[\"parameters\"][\"useRounds\"] = rounds\n d[\"parameters\"][\"useLexicographic\"] = lex\n d[\"parameters\"][\"useMixedCut\"] = mixed\n d[\"parameters\"][\"usePurging\"] = purging\n with open(name + '.json', 'w') as outfile:\n json.dump(d, outfile)\n return name\n\n\ndef read_last_line(filepath):\n with open(filepath, 'r') as f:\n lines = f.read().splitlines()\n last_line = lines[-1]\n return last_line\n\n\ndef get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\nif __name__ == \"__main__\":\n description = 'Generate random feasible problems, solve them and output results.'\n parser = argparse.ArgumentParser(description = description)\n parser.add_argument('input', type=str, nargs=1,\n metavar = 'input',\n help = 'input folder')\n parser.add_argument('folder', type=str, nargs=1,\n metavar = 'folder',\n help = 'folder to put statistics in')\n parser.add_argument('-m', '--m', action=\"store_true\", default=False,\n\t\t\thelp = 'use this option to use mixed cuts on pure problems')\n\n args = parser.parse_args()\n input_folder = args.input[0]\n folder = args.folder[0]\n use_mixed_on_pure = args.m\n main(input_folder, folder, use_mixed_on_pure)\n","repo_name":"tasseff/gomory","sub_path":"scripts/solve_problem.py","file_name":"solve_problem.py","file_ext":"py","file_size_in_byte":12151,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"43790271685","text":"# Дан список. Выведите те его элементы, которые встречаются в списке только один раз. Элементы нужно\n# выводить в том порядке, в котором они встречаются в списке.\n# Формат ввода\n# Вводится список чисел. Все числа списка находятся на одной строке.\n# Формат вывода\n# Выведите ответ на задачу.\n\na = list(map(int, input().split()))\nd = {}\nfor i in range(len(a)):\n if a[i] not in d:\n d[a[i]] = [0, i]\n d[a[i]][0] += 1\nres = []\nfor k, v in d.items():\n if v[0] == 1:\n res.append((v[1], k))\nres.sort()\nfor item in res:\n print(item[1], end=' ')\n","repo_name":"ann74/algorithms_practice","sub_path":"Con2.0_divB/sets/task_c.py","file_name":"task_c.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72897175507","text":"\"\"\"\n Defines an ORM for clingraphs using clorm\n\"\"\"\nimport logging\nfrom jinja2 import Template\nimport clorm\nfrom clorm import Predicate, RawField, ComplexTerm, refine_field, ConstantField, Raw\nfrom clorm import FactBase as ClormFactBase\nfrom clingo.symbol import Function, String\nfrom .exceptions import InvalidSyntax\nfrom .utils import pythonify_symbol, stringify_symbol\n\nlog = logging.getLogger('custom')\n\n\nif hasattr(clorm.orm.symbols_facts, 'NonFactError'):\n NonFactError = clorm.orm.symbols_facts.NonFactError # NOLINT\nelse:\n NonFactError = NotImplementedError\n\nif hasattr(clorm.orm.symbols_facts, 'FactParserError'):\n FactParserError = clorm.orm.symbols_facts.FactParserError # NOLINT\nelse:\n FactParserError = NotImplementedError\n\nclass AttrID(ComplexTerm):\n # pylint: disable=missing-class-docstring\n attr_name = ConstantField\n attr_variable = RawField\n attr_key = RawField\n\n class Meta:\n is_tuple = True\n\nclass AttrIDSugar(ComplexTerm):\n # pylint: disable=missing-class-docstring\n attr_name = ConstantField\n attr_variable = RawField\n\n class Meta:\n is_tuple = True\n\n\nElementType = refine_field(ConstantField,\n [\"graph\", \"node\", \"edge\", \"graph_nodes\", \"graph_edges\"])\n\n\nclass Factbase():\n \"\"\"\n Stores facts that are accepted by clingraphs syntax.\n It performs a preprocessing of the facts to unify them, and\n uses clorm as ORM to store and query the facts.\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, prefix: str = \"\", default_graph:str =\"default\"):\n \"\"\"\n Defines the factbase behavior based on the prefix for the predicates and\n the name of the deafult graph\n\n Args:\n prefix (str): The prefix to all predicate names\n default_graph (str): Name of the default graph,\n all elements with arity 1 will be assigned to this graph\n\n \"\"\"\n # pylint: disable=missing-class-docstring\n class Graph(Predicate):\n id = RawField\n\n class Meta:\n name = prefix+\"graph\"\n\n class SubGraph(Predicate):\n id = RawField\n graph = RawField\n\n class Meta:\n name = prefix+\"graph\"\n\n class Node(Predicate):\n id = RawField\n graph = RawField\n\n class Meta:\n name = prefix+\"node\"\n\n class Edge(Predicate):\n id = RawField\n graph = RawField\n\n class Meta:\n name = prefix+\"edge\"\n\n class Attr(Predicate):\n element_type = ElementType\n element_id = RawField\n attr_id = AttrID.Field\n attr_value = RawField\n\n class Meta:\n name = prefix+\"attr\"\n\n class AttrSugarSimple(Predicate):\n element_type = ElementType\n element_id = RawField\n attr_id = ConstantField\n attr_value = RawField\n\n class Meta:\n name = prefix+\"attr\"\n\n class AttrSugarDouble(Predicate):\n element_type = ElementType\n element_id = RawField\n attr_id = AttrIDSugar.Field\n attr_value = RawField\n\n class Meta:\n name = prefix+\"attr\"\n\n class NodeSugar(Predicate):\n id = RawField\n\n class Meta:\n name = prefix+\"node\"\n\n class EdgeSugar(Predicate):\n id = RawField\n\n class Meta:\n name = prefix+\"edge\"\n # pylint: disable=invalid-name\n\n self.Graph = Graph\n self.SubGraph = SubGraph\n self.Node = Node\n self.Edge = Edge\n self.Attr = Attr\n self.NodeSugar = NodeSugar\n self.EdgeSugar = EdgeSugar\n self.AttrSugarSimple = AttrSugarSimple\n self.AttrSugarDouble = AttrSugarDouble\n\n self.default_graph = default_graph\n self.fb = ClormFactBase(indexes=[Attr.element_id])\n self.prefix = prefix\n\n @classmethod\n def from_string(cls, string, prefix: str = \"\", default_graph:str =\"default\" ):\n \"\"\"\n Creates a :py:class:`Factbase` from a string\n\n Args:\n string (str): A string consisting of only facts, divided by a ``.``\n prefix (str): The prefix to all predicate names\n default_graph (str): Name of the default graph,\n all elements with arity 1 will be assigned to this graph\n\n Raises:\n :py:class:`InvalidSyntax`: If the input are not facts\n \"\"\"\n\n fb = cls(prefix, default_graph)\n fb.add_fact_string(string)\n return fb\n\n @classmethod\n def from_model(cls, model, prefix: str = \"\", default_graph:str =\"default\" ):\n \"\"\"\n Creates a :py:class:`Factbase` from a clingo model\n\n Args:\n model (clingo.Model): A model returned by clingo\n prefix (str): The prefix to all predicate names\n default_graph (str): Name of the default graph,\n all elements with arity 1 will be assigned to this graph\n \"\"\"\n fb = cls(prefix, default_graph)\n fb.add_model(model)\n return fb\n\n def __str__(self):\n \"\"\"\n Returns the current set of facts as a string\n \"\"\"\n return self.fb.asp_str()\n\n @property\n def _unifiers(self):\n \"\"\"\n The list of all unifiers\n \"\"\"\n main_unifiers = [self.Graph, self.SubGraph,\n self.Node, self.Edge, self.Attr]\n sugar_unifiers = [self.NodeSugar, self.EdgeSugar,\n self.AttrSugarSimple, self.AttrSugarDouble]\n return main_unifiers+sugar_unifiers\n\n def _get_element_class(self, element_type):\n \"\"\"\n Obtains an element class for a type given as a string\n\n Args:\n element_type (str): graph, edge or node\n \"\"\"\n if element_type == \"edge\":\n return self.Edge\n if element_type == \"node\":\n return self.Node\n if element_type == \"graph\":\n return self.Graph\n\n raise ValueError(\"Invalid element type\")\n\n def add_fact_string(self, program):\n \"\"\"\n Adds a string containing facts to the :py:class:`Factbase`\n\n Args:\n program (str): A string consisting of only facts, divided by a ``.``\n\n Raises:\n :py:class:`InvalidSyntax`: If the input are not facts\n \"\"\"\n #pylint: disable=duplicate-except\n\n try:\n fb = clorm.parse_fact_string(program, self._unifiers,raise_nonfact=True)\n self.add_fb(fb)\n except NonFactError as e:\n msg = \"The input string contains a complex structure that is not a fact.\"\n raise InvalidSyntax(msg,str(e)) from None\n except FactParserError as e:\n msg = \"The input string contains a complex structure that is not a fact.\"\n raise InvalidSyntax(msg,str(e)) from None\n except RuntimeError as e:\n msg = \"Syntactic error the input string can't be read as facts. \\n\" + program\n raise InvalidSyntax(msg,str(e)) from None\n\n def add_fact_file(self, file):\n \"\"\"\n Adds a file containing facts to the :py:class:`Factbase`\n\n Args:\n file (str): The path to the file\n\n Raises:\n :py:class:`InvalidSyntax`: If the input are not facts\n \"\"\"\n #pylint: disable=duplicate-except\n try:\n fb = clorm.parse_fact_files([file], self._unifiers,raise_nonfact=True)\n self.add_fb(fb)\n except NonFactError as e:\n msg = \"The file contains a complex structure that is not a fact.\"\n raise InvalidSyntax(msg,str(e)) from None\n except FactParserError as e:\n msg = \"The input file contains a complex structure that is not a fact.\"\n raise InvalidSyntax(msg,str(e)) from None\n except RuntimeError as e:\n msg = \"Syntactic error the file, can't be read as facts.\"\n raise InvalidSyntax(msg,str(e)) from None\n\n def add_model(self, model):\n \"\"\"\n Adds a clingo model to the :py:class:`Factbase`\n\n Args:\n model (clingo.Model): A model returned by clingo\n \"\"\"\n symbols = model.symbols(atoms=True, shown=True)\n fb = clorm.unify(self._unifiers, symbols)\n self.add_fb(fb)\n\n def add_fb(self, fb):\n \"\"\"\n Adds a clorm fact base to the :py:class:`Factbase`\n \"\"\"\n processed_fb = self._desugar(fb)\n self.fb = self.fb.union(processed_fb)\n\n def _desugar(self, fb):\n \"\"\"\n Desugar factbase\n - for each node(ID) add node(ID,default) same for edge\n - replace attr(E,ID,Name,Val) with attr(E,ID,(Name,-1),Val)\n \"\"\"\n q = fb.query(self.AttrSugarSimple)\n for attr in set(q.all()):\n name = attr.attr_id\n var = String(\"__\")\n key = String(\"__\")\n new_attr_id = AttrID(attr_name=name,\n attr_variable=Raw(var),\n attr_key=Raw(key))\n e = self.Attr(element_type=attr.element_type,\n element_id=attr.element_id,\n attr_value=attr.attr_value,\n attr_id=new_attr_id)\n fb.remove(attr)\n fb.add(e)\n\n q = fb.query(self.AttrSugarDouble)\n for attr in set(q.all()):\n # print(attr)\n attr_id = attr.attr_id\n name = attr_id.attr_name\n var = attr_id.attr_variable\n key = String(\"__\")\n # print((name,var,key))\n new_attr_id = AttrID(attr_name=name,\n attr_variable=var,\n attr_key=Raw(key))\n e = self.Attr(element_type=attr.element_type,\n element_id=attr.element_id,\n attr_value=attr.attr_value,\n attr_id=new_attr_id)\n # print(e)\n # print()\n fb.remove(attr)\n fb.add(e)\n\n basic_element_classes = [\n (self.NodeSugar, self.Node), (self.EdgeSugar, self.Edge)]\n using_default = False\n for C_Sugar, C in basic_element_classes:\n q = fb.query(C_Sugar)\n for node in set(q.all()):\n using_default = True\n e = C(id=node.id,\n graph=Raw(Function(self.default_graph)))\n fb.remove(node)\n fb.add(e)\n if using_default:\n fb.add(self.Graph(id=Raw(Function(self.default_graph))))\n\n return fb\n\n def get_facts(self):\n \"\"\"\n Gets the facts in the factbase after preprocessing as a string\n\n Returns:\n (`str`) A string with the facts\n \"\"\"\n\n return self.fb.asp_str()\n\n\n def get_all_graphs(self):\n \"\"\"\n Gets a list if the identifiers for all the graphs\n\n Returns:\n (`list`) A list with the identifiers for all the graphs\n \"\"\"\n q = self.fb.query(self.Graph).select(self.Graph.id)\n graph_ids = list(q.all())\n if len(graph_ids) == 0:\n log.warning(\"No graphs were defined in the code. Perhaps a missing `graph` predicate.\")\n q = self.fb.query(self.SubGraph).select(self.SubGraph.id)\n graph_ids = graph_ids+list(q.all())\n return graph_ids\n\n def get_parent_graph(self, graph_id):\n \"\"\"\n Gets the parent graph for a given graph_id.\n\n Args:\n graph_id: Identifier of the subgraph\n\n Returns:\n The identifier of the parent graph or None if there is no parent\n \"\"\"\n q = self.fb.query(self.SubGraph).where(self.SubGraph.id == graph_id)\n q = q.select(self.SubGraph.graph)\n if len(list(q.all())) == 0:\n return None\n return list(q.all())[0]\n\n def get_graph_global_element_attr(self, element_type, graph_id):\n \"\"\"\n Gets the attributes for a global element: graph_nodes or graph_edges.\n\n Args:\n element_type (str): The element type: ``edge`` or ``node``\n graph_id: Identifier of the graph\n\n Returns:\n (`dic`) A dictionary with attribute names as key and attribute values as values.\n \"\"\"\n full_element_type = f\"graph_{element_type}s\"\n return self.get_element_attr(full_element_type, graph_id)\n\n def get_graph_elements(self, element_type, graph_id):\n \"\"\"\n Gets the list of elements for a graph\n\n Args:\n element_type (str): The element type: ``edge`` or ``node``\n graph_id: Identifier of the graph\n\n Returns:\n (`list`) The list of elements that belong to the graph\n \"\"\"\n C = self._get_element_class(element_type)\n q = self.fb.query(C).where(C.graph == graph_id).select(C.id)\n return list(q.all())\n\n def get_element_attr(self, element_type, element_id):\n \"\"\"\n Gets the attributes a specific element\n Returns a dictionary where the keys are attribute name and values are\n attribute values.\n\n Args:\n element_type (str): The element type: ``graph``, ``edge`` or ``node``\n element_id: Identifier of the element\n\n Returns:\n (`dic`) A dictionary with attribute names as key and attribute values as values.\n \"\"\"\n q = self.fb.query(self.Attr)\n q = q.where(self.Attr.element_type == element_type,\n self.Attr.element_id == element_id)\n # pylint: disable=no-member\n q = q.group_by(self.Attr.attr_id.attr_name)\n q = q.select(self.Attr.attr_id.attr_variable, self.Attr.attr_id.attr_key, self.Attr.attr_value)\n attrs = {}\n for name, list_opts in q.all():\n\n custom_template = False\n template = \"{% for k,v in data| dictsort %}{{v}}{% endfor %}\"\n data = {}\n\n for var, key, val in list_opts:\n var = stringify_symbol(var.symbol)\n val = pythonify_symbol(val.symbol)\n key = pythonify_symbol(key.symbol)\n\n is_template = var==\"__\"\n if is_template:\n if custom_template:\n template = template + str(val)\n else:\n template = str(val)\n custom_template= True\n continue\n\n is_dict = key!=\"__\"\n if is_dict:\n if var not in data:\n data[var]={}\n if key in data[var]:\n log.warning(\"Entry (%s,%s,%s) repeated on element %s. Duplicates will be ignored\",name,var,key,element_id)\n data[var][key]= val\n continue\n\n if var in data:\n log.warning(\"Entry (%s,%s) repeated on element %s. Duplicates will be ignored\",name,var,element_id)\n\n data[var]=val\n\n if isinstance(template, str):\n log.debug(\"Formatting template %s with data %s\",template,data)\n s = Template(template).render(data,data = data)\n else:\n s = template\n attrs[str(name)] = str(s)\n\n\n if str(name)=='texlbl': #Used for latex\n attrs[str(name)] = attrs[str(name)].replace('\\\\\\\\','\\\\')\n\n return attrs\n","repo_name":"potassco/clingraph","sub_path":"clingraph/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":15524,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"}
+{"seq_id":"4287839720","text":"from django.conf.urls import url\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nimport django\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login/$',\n auth_views.login,\n {\"template_name\": \"Game/loginform.html\"},\n name='login'),\n url(r'^game-page/$', views.game_page, name='game-page'),\n url(r'^logout/$', auth_views.logout_then_login, name='logout'),\n url(r'^registration/$', views.registration, name='registration'),\n url(r'get_categories', views.get_categories, name='get_categories'),\n url(r'^ajax_login/$', views.login_view, name='ajax_login'),\n url(r'check_element', views.check_element, name='check_element'),\n url(r'get-open-elements-by-category/(?P[0-9]+)',\n views.get_user_open_element_list, name='get-open-elements-by-cat'),\n url(r'activate/(?P[a-f0-9]+)', views.activation, name='activation'),\n url(r'feedback', views.feedback, name='feedback'),\n ]\n","repo_name":"i-ka/alchemy","sub_path":"Game/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"15273745921","text":"from collections import deque\nM, N = map(int, input().split())\n\nboard = [list(input()) for _ in range(M)]\nch = [[0] * N for _ in range(M)]\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\ndef bfs(x, y):\n global res\n q = deque()\n q.append((x, y))\n ch[x][y] = 1\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < M and 0 <= ny < N and board[nx][ny] == 'L'\\\n and ch[nx][ny] == 0:\n ch[nx][ny] = ch[x][y] + 1\n q.append((nx, ny))\n tmp = 0\n for i in range(M):\n if tmp < max(ch[i]):\n tmp = max(ch[i])\n if res < tmp:\n res = tmp\n\nres = 0\nfor idx in range(M):\n for j in range(N):\n if board[idx][j] == 'L':\n bfs(idx, j)\n ch = [[0] * N for _ in range(M)]\nprint(res - 1)","repo_name":"gkdbssla97/Python-Coding-Test","sub_path":"CodingProblem/BOJ2589.py","file_name":"BOJ2589.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"72344163026","text":"#from src.all_imports import *\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import WebDriverException, NoSuchElementException\n\nfrom selenium.webdriver.common.by import By\nimport datetime\nimport time\n\nimport src.utilities as utils\n\n# this is reusable functions for webpages.\nclass BasePage:\n def __init__(self, driver):\n # this is global properties in constructor (function __init__)\n self.driver = driver\n self.wwait = WebDriverWait(self.driver, 10)\n\n def click_element_by_xpath(self,xpath):\n \"\"\"\n this method finds the element by xpath and clicks it\n :param xpath: correct unique xpath of single element\n \"\"\"\n try:\n utils.LOG.info(f\"xpath provided: {xpath}\")\n # element = driver.find_element_by_xpath(xpath)\n element = self.wwait.until(EC.element_to_be_clickable((By.XPATH, xpath)))\n\n utils.LOG.info(\"clicking the element\")\n element.click()\n except NoSuchElementException as err:\n utils.LOG.warning(f\"Check element by following xpath: {xpath}\")\n utils.LOG.error(err)\n self.take_screenshot('ErrorClickElement_')\n\n def enter_text_by_xpath(self, xpath, some_text):\n \"\"\"\n this method finds the element by xpath and enters text in it\n :param xpath: correct unique xpath of single INPUT element\n :param some_text: text to be entered in the element\n \"\"\"\n try:\n utils.LOG.info(f\"xpath provided: {xpath}\")\n # element = driver.find_element_by_xpath(xpath)\n # element = WebDriverWait(driver, 10).until(expected_conditions.presence_of_element_located((By.XPATH, xpath)))\n element = self.wwait.until(EC.presence_of_element_located((By.XPATH, xpath)))\n\n utils.LOG.info(f\"entering the following text :{some_text}\")\n element.send_keys(some_text)\n except WebDriverException as err:\n utils.LOG.error(f\"Entering Text failed by following xpath: {xpath}\")\n utils.LOG.error(err)\n self.take_screenshot('ErrorEnterText_')\n\n def highlight_element(self, element):\n js_script = \"arguments[0].setAttribute('style', arguments[1]);\"\n original_style = element.get_attrivute('style')\n new_style = \"color: green; border: 2px solid green;\"\n # new_style = \"background: yellow; color: green; border: 2px solid green;\"\n self.driver.execute_script(js_script, element, new_style)\n self.driver.execute_script(js_script, element, original_style)\n utils.LOG.info(\"Element highlighted\")\n\n def take_screenshot(self, message=\"\"):\n timestmp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\n # ROOT_DIR is \"C:/dev/week7\"\n file_location = f\"{utils.ROOT_DIR}/screenshots/\"\n file_location = f\"C:/DEV/week7/screenshots/\"\n file_name = message + timestmp + \".png\"\n full_file_path = file_location + file_name\n\n self.driver.save_screenshot(full_file_path)\n utils.LOG.info(\"screenshot was taken and completed\")\n # driver.get_screenshot_as_png(message + timestmp)\n\n\n def get_text_by_xpath(self, xpath: str) -> str:\n try:\n utils.LOG.info(f\"xpath provided: {xpath}\")\n # element = driver.find_element_by_xpath(xpath)\n element = self.wwait.until(EC.element_to_be_clickable((By.XPATH, xpath)))\n\n utils.LOG.info(\"getting the element text\")\n return element.text\n except NoSuchElementException as err:\n utils.LOG.warning(f\"Check element by following xpath: {xpath}\")\n utils.LOG.error(err)\n self.take_screenshot('ErrorGetText_')\n\n","repo_name":"dilshod-Cisco/SeleniumPytestPOM","sub_path":"src/pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"43516507702","text":"# 어떤 수를 왼쪽부터 읽어도, 오른쪽부터 읽어도 같을 때 이 수를 회문인 수라고 한다. 예를 들어, 747은 회문인 수이다. 255도 회문인 수인데, 16진수로 표현하면 FF이기 때문이다. 양의 정수를 입력받았을 때, 이 수가 어떤 B진법 (2 ≤ B ≤ 64)으로 표현하면 회문이 되는 경우가 있는지 알려주는 프로그램을 작성하시오. B진법이란, 한 자리에서 수를 표현할 때 쓸 수 있는 수의 가짓수가 B라는 뜻이다. 예를 들어, 십진법에서 B는 10이다.\r\n\r\nT = int(input())\r\nfor _ in range(T):\r\n number = int(input())\r\n ans = []\r\n for B in range(2, 65):\r\n lst = []\r\n temp = number\r\n while True:\r\n if temp == 0:\r\n break\r\n else:\r\n lst.append(temp % B)\r\n temp //= B\r\n for i in range(len(lst)//2):\r\n if lst[i] != lst[-1-i]:\r\n ans.append(\"X\")\r\n break\r\n if len(ans) == 63:\r\n print(0)\r\n else:\r\n print(1)","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"10000~/11068.py","file_name":"11068.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"28776797960","text":"'''\r\nCreated on 2015. 9. 11.\r\n\r\n@author: thCho\r\n'''\r\n\r\nimport engine.portfolioInfo.PortfolioInfo as PI\r\nimport engine.portfolioInfo.AbstractStrategy as ASt\r\nfrom engine.portfolio.SelectAssets import SelectAsset\r\nfrom engine.type.PortfolioType import PortfolioType\r\nfrom util.schedule.Date import Date\r\nfrom util.schedule.Period import Period\r\n\r\ntoday = Date('20150611')\r\n#Assets\r\nsa = SelectAsset()\r\nvariables = [PortfolioType.MARKET, PortfolioType.MARKETCAP]\r\nconditions = [\"='KS'\", \">= '50000000'\"]\r\n\r\nassets = sa.select(variables, conditions, today)\r\nassetCodes = []\r\nfor i in range(0, len(assets)):\r\n assetCodes.append(assets[i].assetCode)\r\ninitialMoney = 1.0E6\r\nperiod = ['20150105','20150113']\r\n\r\nportfolio = PI.PortfolioInfo(assetCodes, initialMoney, period)\r\n\r\nstrategy = []\r\nfor i in range(0, len(assets)):\r\n if i%4 == 0:\r\n strategy.append(ASt.AbstractStrategy('20150105', assetCodes[i], 1))\r\n \r\n if i%4 == 0:\r\n strategy.append(ASt.AbstractStrategy('20150113', assetCodes[i], -1))\r\n\r\n\r\nportfolio.getInfobyStrategy(strategy, False)\r\n\r\ndates = ['20150106', '20150107', '20150108', '20150109']\r\nfor i in range(0, len(dates)):\r\n print(portfolio.getMTM(dates[i]))\r\n\r\n\r\n","repo_name":"quantosauros/KirudaEngine","sub_path":"kirudaEngine/test/PortfolioInfo/testPortfolioStrategy.py","file_name":"testPortfolioStrategy.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"951715092","text":"#!/usr/bin/python\n\n\"\"\" \n This is the code to accompany the Lesson 2 (SVM) mini-project.\n\n Use a SVM to identify emails from the Enron corpus by their authors: \n Sara has label 0\n Chris has label 1\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\n\n\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n\n\n\n#########################################################\n### your code goes here ###\n\nfrom sklearn.svm import SVC\n\"\"\"\n# using 1% of training set\na = int(len(features_train)/100)\nb = int(len(labels_train)/100)\nfeatures_train = features_train[:a]\nlabels_train = labels_train[:b]\n\"\"\"\n\n#clf = SVC(kernel=\"linear\")\nclf = SVC(kernel=\"rbf\", C=10000)\nt0 = time()\nclf.fit(features_train, labels_train)\nprint (\"training time:\", round(time()-t0, 3), \"s\")\n\nt0 = time()\npred = clf.predict(features_test)\nprint (\"predict time:\", round(time()-t0, 3), \"s\")\n\n\nfrom sklearn.metrics import accuracy_score\naccuracy = accuracy_score(pred, labels_test)\nprint(\"accuracy =\", accuracy)\n\nprint (\"answer to element 10 is = \", pred[10])\nprint (\"answer to element 26 is = \", pred[26])\nprint (\"answer to element 50 is = \", pred[50])\n\na=0\nb=0\nprint(\"length =\", len(pred))\nfor ii in range(len(pred)):\n if(pred[ii] == 1): a += 1\n else: b += 1 \n\nprint( \"number of prediction for Chris = \",a)\nprint( \"number of prediction for Sara = \",b)\n\n#########################################################\n\n\n","repo_name":"saeidmoha/svm","sub_path":"svm_author_id.py","file_name":"svm_author_id.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15277113800","text":"import json\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\ncolor_scheme = dict(red='#c44e52',\n orange='#dd8452',\n yellow='#ccb974',\n green='#55a868',\n blue='#64b5cd',\n indigo='#4c72b0',\n purple='#8172b3')\ncs = dict(r='#c44e52',\n o='#dd8452',\n y='#ccb974',\n g='#55a868',\n b='#64b5cd',\n i='#4c72b0',\n p='#8172b3')\n\n\ndef load_json_logs(json_log):\n train_info, val_info = [], []\n with open(json_log, 'r') as log_file:\n for line in log_file:\n log = json.loads(line.strip())\n if 'train' in log.values():\n train_info.append(dict(epoch=log['epoch'], iter=log['iter'], lr=log['lr'],\n loss=log['loss'], loss_cls=log['loss_cls'], loss_bbox=log['loss_bbox']))\n if 'val' in log.values():\n val_info.append(dict(epoch=log['epoch'], iter=log['iter'], AP50=log['bbox_mAP_50'],\n mAP=log['bbox_mAP']))\n\n return train_info, val_info\n\n\ndef plot_curves(train_info, val_info):\n iters_per_epoch = val_info[0]['iter']\n losses = [info['loss'] for info in train_info]\n cls_losses = [info['loss_cls'] for info in train_info]\n bbox_losses = [info['loss_bbox'] for info in train_info]\n lr = [info['lr'] for info in train_info]\n iters = [(info['epoch'] - 1) * iters_per_epoch + info['iter'] for info in train_info]\n AP50 = [info['AP50'] for info in val_info]\n mAP = [info['mAP'] for info in val_info]\n iters_val = [info['epoch'] * info['iter'] for info in val_info]\n\n # matplotlibrc\n mpl.rcParams['xtick.direction'] = 'in'\n mpl.rcParams['ytick.direction'] = 'in'\n fig, ax = plt.subplots(2, 2, figsize=[12, 9])\n ax = ax.reshape(-1)\n\n ax[0].plot(iters, losses, color=cs['r'], label='total_loss')\n # ax[0].plot(iters, cls_losses, color=cs['i'], label='cls_loss')\n # ax[0].plot(iters, bbox_losses, color=cs['g'], label='bbox_loss')\n max_iter = max(iters)\n power = len(str(max_iter)) - 1\n right = math.ceil(max_iter / 10 ** power) * 10 ** power\n ax[0].set_xlim(0, right)\n ax[0].set_xlabel('iterations', fontsize=12)\n ax[0].set_ylim(0)\n ax[0].set_ylabel('loss', fontsize=12)\n # ax[0].grid(ls='--', lw=0.5)\n ax[0].legend()\n\n ax[1].plot(iters_val, AP50, color=cs['i'], label='AP50')\n max_iter = max(iters_val)\n power = len(str(max_iter)) - 1\n right = math.ceil(max_iter / 10 ** power) * 10 ** power\n ax[1].set_xlim(0, right)\n labels = [int(x / 10 ** power) for x in ax[1].get_xticks()]\n ax[1].set_xticklabels(labels)\n ax[1].set_xlabel(f'iterations(10^{power})', fontsize=12)\n ax[1].set_ylim(0)\n ax[1].set_yticks(np.linspace(0, 1, 6))\n ax[1].set_ylabel('AP', fontsize=12)\n ax[1].legend(loc=4)\n\n for i, (k, v) in enumerate(color_scheme.items()):\n ax[2].plot(range(0, 10), i * np.arange(0, 10), c=v, label=k)\n ax[2].legend()\n\n fig.savefig('training.png')\n fig.show()\n\n\nif __name__ == '__main__':\n json_log = '../../work_dirs/fcos_r50_ssdd_8-1-0.02/20211006_201009.log.json'\n plot_curves(*load_json_logs(json_log=json_log))\n","repo_name":"NotRaining/mmdetection","sub_path":"tools/analysis_tools/analyze_training.py","file_name":"analyze_training.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"19102516989","text":"from book import Book\nfrom recipe import Recipe\n\ntourte = Recipe('tourte', '1', '10', 'd', 'e', 'lunch')\nto_print = str(tourte)\n\nbook = Book(tourte)\n\nbook.get_recipes_by_types('dessert')\n\nbook.add_recipe(tourte)\n\nbook.get_recipes_by_types('dessert')\n\nbook.add_recipe(Recipe('jkhjkh', '1', '10', 'd', 'e', 'lunch'))\nbook.add_recipe(Recipe('222', '1', '10', 'd', 'e', 'dessert'))\nbook.add_recipe(Recipe('333', '1', '10', 'd', 'e', 'lunch'))\n\n\nbook.get_recipes_by_types('lunch')\n\nprint(to_print)\n\nprint(book.get_recipe_by_name('snarf')._name)\n\nbook.get_recipes_by_types('lunch')\n\n","repo_name":"VVIMT/BootcampPython-42AI","sub_path":"day01/ex00/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72736844305","text":"import networkx as nx\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom torch_geometric.utils.convert import to_networkx\r\n\r\n# 通过邻接矩阵生成图\r\n# 有向图\r\nnx.from_numpy_matrix(np.array(data), create_using=nx.DiGraph)\r\n# 无向图\r\nnx.from_numpy_matrix(np.array(data))\r\n# 修改图的属性\r\nG.graph[\"day\"] = \"Monday\"\r\n# 修改节点属性\r\nG.add_nodes_from([3], time=\"2pm\")\r\nG.nodes[1][\"room\"] = 714\r\nG.nodes.data()\r\n# 修改边的属性\r\nG.add_edges_from([(3, 4), (4, 5)], color=\"red\")\r\nG.add_edges_from([(1, 2, {\"color\": \"blue\"}), (2, 3, {\"weight\": 8})])\r\nG[1][2][\"weight\"] = 4.7\r\nG.edges[3, 4][\"weight\"] = 4.2\r\n# 绘制\r\ndef draw(Data): # type(Data) = \r\n G = to_networkx(Data)\r\n nx.draw(G)\r\n plt.show()\r\n","repo_name":"Novmaple/GNN-Facade","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10057954206","text":"from airtest.core.api import *\r\nfrom airtest.aircv import *\r\nfrom pywinauto import*\r\nfrom tkinter import *\r\nimport win32api,win32gui,win32con\r\n\r\ndef preprocessImg(image):\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\r\n return thresh\r\nstate_release=0x00\r\nprestate=win32api.GetKeyState(0x01)\r\ntk=Tk()\r\nhwnd=findwindows.find_windows(title_re=\"Yu-Gi-Oh! DUEL LINKS\")[0]\r\ndev=init_device(platform=\"Windows\",uuid=hwnd)\r\nwin32gui.ShowWindow(hwnd, win32con.SW_MINIMIZE)\r\nwin32gui.ShowWindow(hwnd, win32con.SW_RESTORE)\r\ndev_pos=dev.get_pos()\r\ntk.attributes(\"-alpha\", 0.3)\r\ntk.attributes(\"-topmost\", True)\r\ntk.geometry(\"1664x936\"+\"+\"+str(dev_pos[0])+\"+\"+str(dev_pos[1]))\r\nwhile True:\r\n tk.update_idletasks()\r\n tk.update()\r\n state=win32api.GetKeyState(0x01)\r\n if state!=state_release:\r\n if state < 0 and state!=prestate:\r\n cursor_pos=win32api.GetCursorPos()\r\n print(cursor_pos[0]-dev_pos[0],cursor_pos[1]-dev_pos[1])\r\n tk.destroy()\r\n break\r\n prestate=state\r\n time.sleep(0.01)","repo_name":"Obr00007576/YGOScript","sub_path":"YGOScript/ScriptToolPointPosition.py","file_name":"ScriptToolPointPosition.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"484427063","text":"# Definimos una lista\r\nenteros = [1, 2, 3, 4, 5]\r\n\r\n# Definimos una lista por comprensión\r\nenteros_mas_uno = [numero + 1 for numero in enteros]\r\n\r\nprint(enteros_mas_uno)\r\n\r\n#en un listado por comprension podemos sumar dos variable e iterarlas en un ciclo for, en una linea\r\n#esta es la sintaxis de un comando de lista por comprension con ciclo for\r\n#[**expresión** for **elemento** in **iterable**]\r\n\r\n# definimos una lista\r\nenteros = [1, 2, 3, 4, 5]\r\n\r\n# definimos una lista por comprensión en la cual estamos agrgando un condicional if, para filtrar la operacion\r\n#[**expresión** for **elemento** in **iterable** [if **condición]]\r\n\r\nenteros_mas_uno = [numero + 1 for numero in enteros if numero > 3]\r\n\r\nprint(enteros_mas_uno)\r\n#---------------------------------------- ejercicios\r\n\r\n\r\n","repo_name":"chulth/Excercises","sub_path":"Pro_py/ejercicios/BD/lista_por_compresion.py","file_name":"lista_por_compresion.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"11472455729","text":"__all__ = [\n \"DialogSubMenu\",\n \"ConfirmSubMenu\",\n \"TextSubMenu\",\n \"ProgressSubMenu\",\n \"AdvancedProgressSubMenu\",\n]\n\nimport pyglet\n\nfrom . import SubMenu\nfrom . import text\nfrom . import button\nfrom . import slider\n\n\nclass DialogSubMenu(SubMenu):\n \"\"\"\n Base Dialog Class.\n\n This class acts as a base class for all other dialog submenus.\n\n When the dialog is entered, the :py:attr:`prev_submenu` attribute will be set\n to the name of the previous submenu. This attribute is later used when exiting\n the dialog.\n\n Dialog submenus also support the basic actions used by all submenus, e.g.\n ``enter`` and ``exit``\\\\ . Additionally, many dialogs also add actions for whenever\n a label is changed or the dialog is exited through a special means, e.g. pressing\n a specific button of multiple presented.\n\n If used by itself, it will display a text centered on the screen with a button\n below it. Clicking the button will cause the dialog to exit and also the\n additional ``click_ok`` action to be fired.\n\n The labels supported by default are ``label_main``\\\\ , which defaults to ``Default Text``\n and is recommended to always be customized, and ``label_ok``\\\\ , which defaults to ``OK``\n and may be left as-is.\n\n Subclasses may override these defaults by setting the keys of the same name in the\n ``DEFAULT_LABELS`` class attribute. Note that any unchanged labels must also be declared\n when overwriting any labels, or they may not be displayed.\n\n Widgets and their initializers are stored in the :py:data:`WIDGETS` class attribute,\n see :py:meth:`add_widgets()` for more information.\n \"\"\"\n\n DEFAULT_LABELS = {\n \"label_main\": \"Default Text\",\n \"label_ok\": \"OK\",\n }\n WIDGETS = {\n \"label_main\": \"add_label_main\",\n \"label_ok\": \"add_btn_ok\",\n }\n\n def __init__(\n self,\n name,\n menu,\n window=None,\n peng=None,\n borderstyle=None,\n font_size=None,\n font=None,\n font_color=None,\n multiline=False,\n **kwargs # for label_main etc.\n ):\n super(DialogSubMenu, self).__init__(name, menu, window, peng)\n\n self.style.override_if_not_none(\"font\", font)\n self.style.override_if_not_none(\"font_size\", font_size)\n self.style.override_if_not_none(\"font_color\", font_color)\n self.style.override_if_not_none(\"borderstyle\", borderstyle)\n\n self.multiline = multiline\n\n self.prev_submenu = None\n\n labels = {}\n labels.update(self.DEFAULT_LABELS)\n labels.update(kwargs)\n self.labels = kwargs\n self.kwargs = kwargs\n\n self.add_widgets(**labels)\n\n def add_widgets(self, **kwargs):\n \"\"\"\n Called by the initializer to add all widgets.\n\n Widgets are discovered by searching through the :py:attr:`WIDGETS` class attribute.\n If a key in :py:attr:`WIDGETS` is also found in the keyword arguments and\n not none, the function with the name given in the value of the key will\n be called with its only argument being the value of the keyword argument.\n\n For more complex usage scenarios, it is also possible to override this method\n in a subclass, but the original method should always be called to ensure\n compatibility with classes relying on this feature.\n \"\"\"\n for name, fname in self.WIDGETS.items():\n if name in kwargs and kwargs[name] is not None:\n assert hasattr(self, fname)\n assert callable(getattr(self, fname))\n getattr(self, fname)(kwargs[name])\n\n def add_label_main(self, label_main):\n \"\"\"\n Adds the main label of the dialog.\n\n This widget can be triggered by setting the label ``label_main`` to a string.\n\n This widget will be centered on the screen.\n \"\"\"\n # Main Label\n self.wlabel_main = text.Label(\n \"label_main\",\n self,\n pos=lambda sw, sh, bw, bh: (sw / 2 - bw / 2, sh / 2 - bh / 2),\n size=[0, 0],\n label=label_main,\n font=self.font,\n font_size=self.font_size,\n font_color=self.font_color,\n multiline=self.multiline,\n )\n self.wlabel_main.size = lambda sw, sh: (sw, self.wlabel_main._label.font_size)\n\n def add_btn_ok(self, label_ok):\n \"\"\"\n Adds an OK button to allow the user to exit the dialog.\n\n This widget can be triggered by setting the label ``label_ok`` to a string.\n\n This widget will be mostly centered on the screen, but below the main label\n by the double of its height.\n \"\"\"\n # OK Button\n self.wbtn_ok = button.Button(\n \"btn_ok\",\n self,\n pos=lambda sw, sh, bw, bh: (sw / 2 - bw / 2, sh / 2 - bh / 2 - bh * 2),\n size=[0, 0],\n label=label_ok,\n borderstyle=self.borderstyle,\n font=self.font,\n font_size=self.font_size,\n font_color=self.font_color,\n )\n self.wbtn_ok.size = lambda sw, sh: (\n self.wbtn_ok._label.font_size * 8,\n self.wbtn_ok._label.font_size * 2,\n )\n\n def f():\n self.doAction(\"click_ok\")\n self.exitDialog()\n\n self.wbtn_ok.addAction(\"click\", f)\n\n @property\n def label_main(self):\n \"\"\"\n Property that proxies the ``label_main`` label.\n\n Setting this property will cause the ``label_main_change`` action to trigger.\n\n Note that trying to access this property if the widget is not used may cause\n an error.\n \"\"\"\n # no check for initialized label, NameError should be good enough to debug\n return self.wlabel_main.label\n\n @label_main.setter\n def label_main(self, value):\n self.wlabel_main.label = value\n self.doAction(\"label_main_change\")\n\n @property\n def label_ok(self):\n \"\"\"\n Property that proxies the ``label_ok`` label.\n\n Setting this property will cause the ``label_ok_change`` action to trigger.\n\n Note that trying to access this property if the widget is not used may cause\n an error.\n \"\"\"\n return self.wbtn_ok.label\n\n @label_ok.setter\n def label_ok(self, value):\n self.wbtn_ok.label = value\n self.doAction(\"label_ok_change\")\n\n def on_enter(self, old):\n if self.menu.activeSubMenu == self.menu:\n raise RuntimeError(\"Cannot open a dialog twice\")\n self.prev_submenu = old # name or None\n\n def exitDialog(self):\n \"\"\"\n Helper method that exits the dialog.\n\n This method will cause the previously active submenu to activate.\n \"\"\"\n if self.prev_submenu is not None:\n # change back to the previous submenu\n # could in theory form a stack if one dialog opens another\n self.menu.changeSubMenu(self.prev_submenu)\n self.prev_submenu = None\n\n def activate(self):\n \"\"\"\n Helper method to enter the dialog.\n\n Calling this method will simply cause the dialog to become the active submenu.\n\n Note that is not necessary to call this method over :py:meth:`changeSubMenu()`\\\\ ,\n as the storing of the previous submenu is done elsewhere.\n \"\"\"\n # error checking done indirectly by on_enter\n # on_enter will be called automatically to store previous submenu\n self.menu.changeSubMenu(self.name)\n\n\nclass ConfirmSubMenu(DialogSubMenu):\n \"\"\"\n Dialog that allows the user to confirm or cancel an action.\n\n By default, the OK button will be hidden and the ``label_main`` will be set\n to ``Are you sure?``\\\\ .\n\n Clicking the confirm button will cause the ``confirm`` action to trigger, while\n the cancel button will cause the ``cancel`` action to trigger.\n \"\"\"\n\n DEFAULT_LABELS = {\n \"label_main\": \"Are you sure?\",\n \"label_confirm\": \"Confirm\",\n \"label_cancel\": \"Cancel\",\n }\n WIDGETS = {\n **DialogSubMenu.WIDGETS,\n \"label_confirm\": \"add_btn_confirm\",\n \"label_cancel\": \"add_btn_cancel\",\n }\n\n def add_btn_confirm(self, label_confirm):\n \"\"\"\n Adds a confirm button to let the user confirm whatever action they were presented with.\n\n This widget can be triggered by setting the label ``label_confirm`` to a string.\n\n This widget will be positioned slightly below the main label and to the left\n of the cancel button.\n \"\"\"\n # Confirm Button\n self.wbtn_confirm = button.Button(\n \"btn_confirm\",\n self,\n pos=lambda sw, sh, bw, bh: (sw / 2 - bw - 4, sh / 2 - bh / 2 - bh * 2),\n size=[0, 0],\n label=label_confirm,\n borderstyle=self.borderstyle,\n font=self.font,\n font_size=self.font_size,\n font_color=self.font_color,\n )\n self.wbtn_confirm.size = lambda sw, sh: (\n self.wbtn_confirm._label.font_size * 8,\n self.wbtn_confirm._label.font_size * 2,\n )\n\n def f():\n self.doAction(\"confirm\")\n self.exitDialog()\n\n self.wbtn_confirm.addAction(\"click\", f)\n\n def add_btn_cancel(self, label_cancel):\n \"\"\"\n Adds a cancel button to let the user cancel whatever choice they were given.\n\n This widget can be triggered by setting the label ``label_cancel`` to a string.\n\n This widget will be positioned slightly below the main label and to the right\n of the confirm button.\n \"\"\"\n # Cancel Button\n self.wbtn_cancel = button.Button(\n \"btn_cancel\",\n self,\n pos=lambda sw, sh, bw, bh: (sw / 2 + 4, sh / 2 - bh / 2 - bh * 2),\n size=[0, 0],\n label=label_cancel,\n borderstyle=self.borderstyle,\n font=self.font,\n font_size=self.font_size,\n font_color=self.font_color,\n )\n self.wbtn_cancel.size = lambda sw, sh: (\n self.wbtn_cancel._label.font_size * 8,\n self.wbtn_cancel._label.font_size * 2,\n )\n\n def f():\n self.doAction(\"cancel\")\n self.exitDialog()\n\n self.wbtn_cancel.addAction(\"click\", f)\n\n @property\n def label_confirm(self):\n \"\"\"\n Property that proxies the ``label_confirm`` label.\n\n Setting this property will cause the ``label_confirm_change`` action to trigger.\n\n Note that trying to access this property if the widget is not used may cause\n an error.\n \"\"\"\n return self.wbtn_confirm.label\n\n @label_confirm.setter\n def label_confirm(self, value):\n self.wbtn_confirm.label = value\n self.doAction(\"label_confirm_change\")\n\n @property\n def label_cancel(self):\n \"\"\"\n Property that proxies the ``label_cancel`` label.\n\n Setting this property will cause the ``label_cancel_change`` action to trigger.\n\n Note that trying to access this property if the widget is not used may cause\n an error.\n \"\"\"\n return self.wbtn_cancel.label\n\n @label_cancel.setter\n def label_cancel(self, value):\n self.wbtn_cancel.label = value\n self.doAction(\"label_cancel_change\")\n\n\nclass TextSubMenu(DialogSubMenu):\n \"\"\"\n Dialog without user interaction that can automatically exit after a certain amount of time.\n\n This dialog accepts the ``timeout`` keyword argument, which may be set to any\n time in seconds to delay before exiting the dialog. A value of ``-1`` will cause\n the dialog to never exit on its own.\n\n Note that the user will not be able to exit this dialog and may believe the program\n is hanging if not assured otherwise. It is thus recommended to use the :py:class:`ProgressSubMenu`\n dialog instead, especially for long-running operations.\n \"\"\"\n\n DEFAULT_LABELS = {\n \"label_main\": \"Default Text\",\n # no button needed, timer does the rest\n }\n\n def __init__(self, name, menu, window, peng, timeout=10, **kwargs):\n super(TextSubMenu, self).__init__(name, menu, window, peng, **kwargs)\n self.timeout = timeout\n\n def on_enter(self, old):\n super(TextSubMenu, self).on_enter(old)\n\n if self.timeout != -1:\n pyglet.clock.schedule_once(lambda dt: self.exitDialog(), self.timeout)\n\n\nclass ProgressSubMenu(DialogSubMenu):\n \"\"\"\n Dialog without user interaction displaying a progressbar.\n\n By default, the progressbar will range from 0-100, effectively a percentage.\n\n The :py:attr:`auto_exit` attribute may be set to control whether or not the dialog\n will exit automatically when the maximum value is reached.\n \"\"\"\n\n DEFAULT_LABELS = {\n \"label_main\": \"Loading...\",\n \"label_progressbar\": \"{percent:.1}%\",\n # TODO: actually implement the progress_* labels\n \"progress_n\": 0, # should be updated on-the-fly through property progress_n\n \"progress_nmin\": 0,\n \"progress_nmax\": 100, # basically equal to percentages\n }\n WIDGETS = {\n **DialogSubMenu.WIDGETS,\n \"label_progressbar\": \"add_progressbar\",\n }\n auto_exit = False\n \"\"\"\n Controls whether or not the dialog will exit automatically after the maximum\n value has been reached.\n \"\"\"\n\n def add_progressbar(self, label_progressbar):\n \"\"\"\n Adds a progressbar and label displaying the progress within a certain task.\n\n This widget can be triggered by setting the label ``label_progressbar`` to\n a string.\n\n The progressbar will be displayed centered and below the main label.\n The progress label will be displayed within the progressbar.\n\n The label of the progressbar may be a string containing formatting codes\n which will be resolved via the ``format()`` method.\n\n Currently, there are six keys available:\n\n ``n`` and ``value`` are the current progress rounded to 4 decimal places.\n\n ``nmin`` is the minimum progress value rounded to 4 decimal places.\n\n ``nmax`` is the maximum progress value rounded to 4 decimal places.\n\n ``p`` and ``percent`` are the percentage value that the progressbar is completed\n rounded to 4 decimal places.\n\n By default, the progressbar label will be ``{percent}%`` displaying the percentage\n the progressbar is complete.\n \"\"\"\n # Progressbar\n self.wprogressbar = slider.Progressbar(\n \"progressbar\",\n self,\n pos=lambda sw, sh, bw, bh: (\n sw / 2 - bw / 2,\n self.wlabel_main.pos[1] - bh * 1.5,\n ),\n size=[0, 0],\n # label=label_progressbar # TODO: add label\n borderstyle=self.borderstyle,\n )\n\n # Progress Label\n self.wprogresslabel = text.Label(\n \"progresslabel\",\n self,\n pos=lambda sw, sh, bw, bh: (sw / 2 - bw / 2, self.wprogressbar.pos[1] + 8),\n size=[0, 0],\n label=\"\", # set by update_progressbar()\n font=self.font,\n font_size=self.font_size,\n font_color=self.font_color,\n )\n self.wprogresslabel.size = lambda sw, sh: (\n sw,\n self.wprogresslabel._label.font_size,\n )\n\n self.wprogressbar.size = lambda sw, sh: (\n sw * 0.8,\n self.wprogresslabel._label.font_size + 10,\n )\n\n self._label_progressbar = label_progressbar\n\n if getattr(label_progressbar, \"_dynamic\", False):\n\n def f():\n self.label_progressbar = str(label_progressbar)\n\n self.peng.i18n.addAction(\"setlang\", f)\n\n self.wprogressbar.addAction(\"progresschange\", self.update_progressbar)\n\n self.update_progressbar()\n\n def update_progressbar(self):\n \"\"\"\n Updates the progressbar by re-calculating the label.\n\n It is not required to manually call this method since setting any of the\n properties of this class will automatically trigger a re-calculation.\n \"\"\"\n n, nmin, nmax = (\n self.wprogressbar.n,\n self.wprogressbar.nmin,\n self.wprogressbar.nmax,\n )\n if (nmax - nmin) == 0:\n percent = 0 # prevents ZeroDivisionError\n else:\n percent = max(min((n - nmin) / (nmax - nmin), 1.0), 0.0) * 100\n dat = {\n \"value\": round(n, 4),\n \"n\": round(n, 4),\n \"nmin\": round(nmin, 4),\n \"nmax\": round(nmax, 4),\n \"percent\": round(percent, 4),\n \"p\": round(percent, 4),\n }\n txt = self._label_progressbar.format(**dat)\n self.wprogresslabel.label = txt\n\n @property\n def progress_n(self):\n \"\"\"\n Property that proxies the ``progress_n`` label.\n\n Setting this property will cause the progressbar label to be recalculated.\n\n Additionally, if the supplied value is higher than the maximum value and\n :py:attr:`auto_exit` is true, the dialog will exit.\n \"\"\"\n return self.wprogressbar.n\n\n @progress_n.setter\n def progress_n(self, value):\n self.wprogressbar.n = value\n self.update_progressbar()\n if self.auto_exit:\n if self.wprogressbar.n >= self.wprogressbar.nmax:\n self.exitDialog()\n\n @property\n def progress_nmin(self):\n \"\"\"\n Property that proxies the ``progress_nmin`` label.\n\n Setting this property will cause the progressbar label to be recalculated.\n\n Note that setting this property if the widget has not been initialized may\n cause various errors to occur.\n \"\"\"\n return self.wprogressbar.nmin\n\n @progress_nmin.setter\n def progress_nmin(self, value):\n self.wprogressbar.nmin = value\n self.update_progressbar()\n\n @property\n def progress_nmax(self):\n \"\"\"\n Property that proxies the ``progress_nmax`` label.\n\n Setting this property will cause the progressbar label to be recalculated.\n\n Note that setting this property if the widget has not been initialized may\n cause various errors to occur.\n \"\"\"\n return self.wprogressbar.nmax\n\n @progress_nmax.setter\n def progress_nmax(self, value):\n self.wprogressbar.nmax = value\n self.update_progressbar()\n\n @property\n def label_progressbar(self):\n \"\"\"\n Property that proxies the ``label_progressbar`` label.\n\n Setting this property will cause the progressbar label to be recalculated.\n\n Note that setting this property if the widget has not been initialized may\n cause various errors to occur.\n \"\"\"\n return self.wprogresslabel.label\n\n @label_progressbar.setter\n def label_progressbar(self, value):\n self._label_progressbar = value\n self.update_progressbar()\n\n\nclass AdvancedProgressSubMenu(ProgressSubMenu):\n def add_progressbar(self, label_progressbar):\n \"\"\"\n Adds a progressbar and label displaying the progress within a certain task.\n\n This widget can be triggered by setting the label ``label_progressbar`` to\n a string.\n\n The progressbar will be displayed centered and below the main label.\n The progress label will be displayed within the progressbar.\n\n The label of the progressbar may be a string containing formatting codes\n which will be resolved via the ``format()`` method.\n\n Currently, there are six keys available:\n\n ``n`` and ``value`` are the current progress rounded to 4 decimal places.\n\n ``nmin`` is the minimum progress value rounded to 4 decimal places.\n\n ``nmax`` is the maximum progress value rounded to 4 decimal places.\n\n ``p`` and ``percent`` are the percentage value that the progressbar is completed\n rounded to 4 decimal places.\n\n By default, the progressbar label will be ``{percent}%`` displaying the percentage\n the progressbar is complete.\n \"\"\"\n # Progressbar\n self.wprogressbar = slider.AdvancedProgressbar(\n \"progressbar\",\n self,\n pos=lambda sw, sh, bw, bh: (\n sw / 2 - bw / 2,\n self.wlabel_main.pos[1] - bh * 1.5,\n ),\n size=[0, 0],\n # label=label_progressbar # TODO: add label\n borderstyle=self.borderstyle,\n )\n\n # Progress Label\n self.wprogresslabel = text.Label(\n \"progresslabel\",\n self,\n pos=lambda sw, sh, bw, bh: (sw / 2 - bw / 2, self.wprogressbar.pos[1] + 8),\n size=[0, 0],\n label=\"\", # set by update_progressbar()\n font=self.font,\n font_size=self.font_size,\n font_color=self.font_color,\n )\n self.wprogresslabel.size = lambda sw, sh: (\n sw,\n self.wprogresslabel._label.font_size,\n )\n\n self.wprogressbar.size = lambda sw, sh: (\n sw * 0.8,\n self.wprogresslabel._label.font_size + 10,\n )\n\n self._label_progressbar = label_progressbar\n\n if getattr(label_progressbar, \"_dynamic\", False):\n\n def f():\n self.label_progressbar = str(label_progressbar)\n\n self.peng.i18n.addAction(\"setlang\", f)\n\n self.wprogressbar.addAction(\"progresschange\", self.update_progressbar)\n\n self.update_progressbar()\n\n def addCategory(self, *args, **kwargs):\n \"\"\"\n Proxy for :py:meth:`~peng3d.gui.slider.AdvancedProgressbar.addCategory()`\\\\ .\n \"\"\"\n return self.wprogressbar.addCategory(*args, **kwargs)\n\n def updateCategory(self, *args, **kwargs):\n \"\"\"\n Proxy for :py:meth:`~peng3d.gui.slider.AdvancedProgressbar.updateCategory()`\\\\ .\n \"\"\"\n return self.wprogressbar.updateCategory(*args, **kwargs)\n\n def deleteCategory(self, *args, **kwargs):\n \"\"\"\n Proxy for :py:meth:`~peng3d.gui.slider.AdvancedProgressbar.deleteCategory()`\\\\ .\n \"\"\"\n return self.wprogressbar.deleteCategory(*args, **kwargs)\n","repo_name":"not-na/peng3d","sub_path":"peng3d/gui/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":22235,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"}
+{"seq_id":"12186572807","text":"import random\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow.compat.v1 as tf\n\nfrom make_detection.utils import create_category_index_from_labelmap, \\\n reframe_box_masks_to_image_masks\nfrom make_detection.visualization_utils import visualize_boxes_and_labels_on_image_array\n\ntf.disable_v2_behavior()\nimport zipfile\n\nfrom distutils.version import StrictVersion\nfrom collections import defaultdict\nfrom io import StringIO\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n# What model to download.\n\nMODEL_NAME = 'ssd_inception_v2_coco_2018_01_28'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n\nIMAGE_SIZE = (12, 8)\n\n\n\nclass ObjectDetector:\n\n def __init__(self, PATH_TO_FROZEN_GRAPH, PATH_TO_LABELS):\n # download model\n \"\"\"opener = urllib.request.URLopener()\n print(\"Downloading model file\")\n opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\n tar_file = tarfile.open(MODEL_FILE)\n\n print(\"Extracting model into frozen interference graph\")\n for file in tar_file.getmembers():\n file_name = os.path.basename(file.name)\n if 'frozen_inference_graph.pb' in file_name:\n tar_file.extract(file, os.getcwd())\n \"\"\"\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n s = fid.size()\n self.serialized_graph = fid.read()\n od_graph_def.ParseFromString(self.serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.category_index = create_category_index_from_labelmap(PATH_TO_LABELS)\n\n def load_image_into_numpy_array(self, image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n def run_inference_for_single_image(self, image):\n with self.detection_graph.as_default():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[1], image.shape[2])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: image})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.int64)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n return output_dict\n\n def read_image_and_run_object_detection(self, image):\n # image = Image.open(image)\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n image = self.load_image_into_numpy_array(image)\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image = np.expand_dims(image, axis=0)\n # Actual detection.\n output_dict = self.run_inference_for_single_image(image)\n return output_dict, image\n\n def get_category_info(self, object_detection_dict, max_boxes_to_draw=20,\n min_score_thresh=.5):\n boxes = object_detection_dict['detection_boxes']\n classes =object_detection_dict['detection_classes']\n scores =object_detection_dict['detection_scores']\n category_index = self.category_index\n result = []\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n if classes[i] in category_index.keys():\n result.append((category_index[classes[i]]['name'], scores[i]))\n return result\n\n def visualize_object_detection(self, object_detection_dict, image_np):\n visualize_boxes_and_labels_on_image_array(\n image_np,\n object_detection_dict['detection_boxes'],\n object_detection_dict['detection_classes'],\n object_detection_dict['detection_scores'],\n self.category_index,\n instance_masks=object_detection_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=8)\n plt.figure(figsize=IMAGE_SIZE)\n plt.imshow(image_np)\n plt.show()\n\nif __name__ == '__main__':\n PATH_TO_FROZEN_GRAPH = \"model/fine_tuned_model/frozen_inference_graph.pb\"\n PATH_TO_LABELS = \"annotations/label_map.pbtxt\"\n obj = ObjectDetector(PATH_TO_FROZEN_GRAPH, PATH_TO_LABELS)\n image = Image.open(\"../audi_test.jpg\")\n object_detection_result, image_np = obj.read_image_and_run_object_detection(image)\n obj.visualize_object_detection(object_detection_result, image_np)","repo_name":"banda13/Carrecognizer","sub_path":"classifiers/pipline/make_detection/object_detector.py","file_name":"object_detector.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"15383584001","text":"from tqdm import tqdm\nimport fitz # PyMuPDF\nimport io\nfrom PIL import Image\nimport os\nimport cv2\nimport numpy as np\nfrom multiprocessing import Process, Manager\nfrom multiprocessing import Pool\nfrom functools import partial\nimport subprocess\nfrom os import listdir\nfrom os.path import isfile, join\nfrom tqdm import tqdm\n\n\ndef check_and_inv(file_path):\n\n\t# Reading an image in default mode:\n\tinputImage = cv2.imread(file_path)\n\t\n\t# Convert RGB to grayscale:\n\toriginalGrayscale = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)\n\t\n\t# Equalize histogram\n\tgrayscaleImage = cv2.equalizeHist(originalGrayscale)\n\t\n\t# It might be interesting to you to check out the image equalization:\n\tcv2.waitKey(0)\n\t\n\t# Binarize the image with a fixed threshold:\n\tminThresh = 128\n\t_, binaryImage = cv2.threshold(grayscaleImage, minThresh, 255, cv2.THRESH_BINARY)\n\t\n\t# Compute the percent of white pixels:\n\t(imageHeight, imageWidth) = binaryImage .shape[:2]\n\twhitePercent = cv2.countNonZero(binaryImage)/(imageHeight * imageWidth)\n\n\tif whitePercent < 0.2:\n\t\t#print(\"Correcting images...\")\n\t\n\t\t# Correct the equalized image:\n\t\tgrayscaleImage = 255 - grayscaleImage\n\t\tcv2.imwrite(file_path, grayscaleImage )\n\n\ndef image_extraction(database_path,result_folder,output_list,file_list,index):\n\t# get the file path\n\tfile_name=file_list[index]\n\tfile_path=database_path+'/'+file_name\n\t# make a folder to store result\n\tarticle_DOI=file_name.replace('.pdf','')\n\twith fitz.open(file_path) as my_pdf_file:\n\t\t#loop through every page\n\t\tcount=0\n\t\tfor page_number in range(0, len(my_pdf_file)):\n\t\n\t\t\t# acess individual page\n\t\t\tpage = my_pdf_file[page_number]\n\t\n\t\n\t\t\t# check if images are there\n\t\t\t#$if images:\n\t\t\t#\tprint(f\"There are {len(images)} image/s on page number {page_number}[+]\")\n\t\t\t#else:\n\t\t\t#\tprint(f\"There are No image/s on page number {page_number}[!]\")\n\t\n\t\t\t# loop through all images present in the page \n\t\t\tfor image_number, image in enumerate(page.get_images(), start=0):\n\t\n\t\t\t\t#access image xerf\n\t\t\t\txref_value = image[0]\n\t\t\t\t\n\t\t\t\t#extract image information\n\t\t\t\tbase_image = my_pdf_file.extract_image(xref_value)\n\t\n\t\t\t\t# access the image itself\n\t\t\t\timage_bytes = base_image[\"image\"]\n\t\n\t\t\t\t#get image extension\n\t\t\t\text = base_image[\"ext\"]\n\t\n\t\t\t\t#load image\n\t\t\t\timage = Image.open(io.BytesIO(image_bytes))\n\t\n\t\t\t\t#save image locally\n\t\t\t\tcount+=1\n\t\t\t\tout_im_path=f\"{result_folder}/{article_DOI}_{count}.{ext}\"\n\t\t\t\timage.save(open(out_im_path, \"wb\"))\n\t\t\t\tcheck_and_inv(out_im_path)\n\toutput_list.append('.')\n\tprint(len(output_list))\n# file path you want to extract images from\n#file_name = \"0-0040403996013020.pdf\"\nif __name__=='__main__':\n\t\n\t\n\n\t#cwd = os.getcwd()\n\t\n\t#database_path='D:/DATACHEM/co_activation_database'\n\t#database_path='D:/DATACHEM/picture_extraction/broken pdf'\n\t\n\tdatabase_path='PDF_folders'\n\tfile_list = [f for f in listdir(database_path) if isfile(join(database_path, f))]\n\t\n\n\t#out_path='pictures from pdf'\n\t#out_path='broken pdf figure extraction'\n\t\n\tout_path='extracted_figs'\n\n\ttry:\n\t\tos.mkdir(out_path)\n\texcept:\n\t\tprint('result folder already exist, carefull of overwriting')\n\t#file_name = \"acs.organomet.5b00874.pdf\"\n\t\n\t# this part for single-processing\n\t#for i in tqdm(range(len(file_list))):\n\t#for index,file_name in enumerate(file_list):\n\t\t#print(index,len(file_list))\n\t\t#file_name=file_list[i]\n\t\t#image_extraction(file_name,database_path,out_path)\n\t\t#print(file_name)\n\n\t# this part for multi-processing\n\toutput_list = Manager().list()\n\tinput_list=range(0,len(file_list))\n\tprint(input_list)\n\tpool = Pool(processes=4)\n\tfunc = partial(image_extraction,database_path,out_path,output_list,file_list)\n\tpool.map(func, input_list)\n\tpool.close()\n\tpool.join()\n\t#\n\n","repo_name":"longthanhta/Chem_Coupling-Reaction_DA","sub_path":"4_mining_tool/Graph Mining/STEP1_figure_extraction_mp.py","file_name":"STEP1_figure_extraction_mp.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"33056009756","text":"# Import random, import game_data, import art, import clear\nimport random\nfrom game_data import data\nfrom replit import clear\nimport art\n\n\ndef get_random():\n '''Randomly chooses from a list of entries.'''\n return random.choice(data)\n\ndef format_data(options):\n '''From the dictionary, assigns name, description, and country keys to a variable and formats them using an f-string to be used later in the game. '''\n name = options['name']\n description = options['description']\n country = options['country']\n return f\"{name}, a {description} from {country}.\"\n\ndef check_answer(guess, a_followers, b_followers):\n '''Determines which option has more followers and returns a boolean.'''\n if a_followers > b_followers:\n return guess == 'a'\n else:\n return guess == 'b'\n\ndef game():\n '''Plays through Higher or Lower game.'''\n game_on = True\n\n option_a = get_random()\n option_b = get_random()\n score = 0\n\n while game_on:\n print(art.logo)\n print(f'Your current score is {score}')\n print(f'Choice A: {format_data(option_a)}')\n print(art.vs)\n print(f'Choice B: {format_data(option_b)}')\n user_guess = input(\"Who has more followers? Type 'A' or 'B': \").lower()\n print(user_guess)\n \n option_a_followers = option_a['follower_count']\n option_b_followers = option_b['follower_count']\n\n if check_answer(user_guess, option_a_followers, option_b_followers) == True:\n score += 1\n option_a = option_b\n option_b = get_random()\n clear()\n else:\n print(f\"Sorry, you guessed incorrectly. Your final score is {score}\")\n game_on = False\n\ngame()","repo_name":"marshallc03/higher-lower-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"21457620140","text":"from contextlib import contextmanager\nfrom copy import deepcopy\nfrom StringIO import StringIO\nfrom textwrap import dedent\nfrom unittest import TestCase\n\nfrom mock import (\n call,\n patch,\n )\n\nfrom jujupy import (\n AuthNotAccepted,\n fake_juju_client,\n InvalidEndpoint,\n NameNotAccepted,\n TypeNotAccepted,\n )\nfrom assess_add_cloud import (\n assess_all_clouds,\n assess_cloud,\n CloudMismatch,\n CloudSpec,\n CloudValidation,\n cloud_spec,\n EXCEEDED_LIMIT,\n iter_clouds,\n NameMismatch,\n write_status,\n xfail,\n )\nfrom tests import FakeHomeTestCase\nfrom utility import JujuAssertionError\n\n\nclass TestCloudSpec(TestCase):\n\n def test_cloud_spec(self):\n self.assertEqual(\n CloudSpec('label1', 'name1', {'config': '1'}, None, None),\n cloud_spec('label1', 'name1', {'config': '1'}))\n\n\nclass TestXFail(TestCase):\n\n def test_xfail(self):\n spec = CloudSpec('label', 'name', {'config': 'value'}, 'foo', 'bar')\n fail_spec = xfail(spec, 'baz', 'qux')\n self.assertEqual(fail_spec, CloudSpec(\n 'label', 'name', {'config': 'value'}, 'qux', 'baz'))\n\n\nclass TestAssessCloud(FakeHomeTestCase):\n\n @contextmanager\n def cloud_client(self, clouds):\n client = fake_juju_client(juju_home=self.juju_home)\n client.env.load_yaml()\n\n def dump(cloud_name, cloud):\n client.env.write_clouds(client.env.juju_home,\n clouds)\n\n with patch.object(client, 'add_cloud_interactive', dump):\n yield client\n\n def test_assess_cloud(self):\n expected_cloud = {'clouds': {\n 'foo': {\n 'type': 'maas',\n 'endpoint': 'http://bar.example.com',\n }}}\n with self.cloud_client(expected_cloud) as client:\n assess_cloud(client, 'foo', expected_cloud['clouds']['foo'])\n\n def test_assess_cloud_missing(self):\n with self.cloud_client({'clouds': {}}) as client:\n with self.assertRaisesRegexp(JujuAssertionError,\n 'Clouds missing!'):\n assess_cloud(client, 'foo', {\n 'type': 'maas',\n 'endpoint': 'http://bar.example.com',\n })\n\n def test_assess_cloud_mismatch(self):\n with self.cloud_client({'clouds': {'foo': {}}}) as client:\n with self.assertRaisesRegexp(JujuAssertionError,\n 'Cloud mismatch'):\n stderr = StringIO()\n with patch('sys.stderr', stderr):\n assess_cloud(client, 'foo', {\n 'type': 'maas',\n 'endpoint': 'http://bar.example.com',\n })\n self.assertEqual(dedent(\"\"\"\n Expected:\n {endpoint: 'http://bar.example.com', type: maas}\n\n Actual:\n {}\n \"\"\"), stderr.getvalue())\n\n\nclass TestCloudValidation(FakeHomeTestCase):\n\n def test_2_0(self):\n validation = CloudValidation('2.0.0')\n self.assertIs('2.0.0', validation.version)\n self.assertIs(validation.NONE, validation.support)\n self.assertIsFalse(validation.is_basic)\n self.assertIsFalse(validation.is_endpoint)\n self.assertFalse(validation.has_endpoint('openstack'))\n self.assertIs(\n validation.NONE, CloudValidation('2.0-beta1').support)\n self.assertIs(\n validation.NONE, CloudValidation('2.0.3').support)\n\n def test_2_1(self):\n validation = CloudValidation('2.1.0')\n self.assertIs('2.1.0', validation.version)\n self.assertIs(validation.BASIC, validation.support)\n self.assertIsTrue(validation.is_basic)\n self.assertIsFalse(validation.is_endpoint)\n self.assertFalse(validation.has_endpoint('openstack'))\n self.assertIs(\n validation.BASIC, CloudValidation('2.1-beta1').support)\n self.assertIs(\n validation.BASIC, CloudValidation('2.1.3').support)\n\n def test_2_2_plus(self):\n validation = CloudValidation('2.2.0')\n self.assertIs('2.2.0', validation.version)\n self.assertIs(validation.ENDPOINT, validation.support)\n self.assertIsFalse(validation.is_basic)\n self.assertIsTrue(validation.is_endpoint)\n self.assertTrue(validation.has_endpoint('openstack'))\n self.assertFalse(validation.has_endpoint('manual'))\n self.assertIs(\n validation.ENDPOINT, CloudValidation('2.2-beta1').support)\n self.assertIs(\n validation.ENDPOINT, CloudValidation('2.2.1').support)\n self.assertIs(\n validation.ENDPOINT, CloudValidation('2.3-beta1').support)\n\n\nlong_text = 'A' * EXCEEDED_LIMIT\nendpoint_validation = CloudValidation('2.2.0')\nbasic_validation = CloudValidation('2.1.0')\n\n\ndef make_long_endpoint(spec, validation, regions=False):\n config = deepcopy(spec.config)\n config['endpoint'] = long_text\n if regions:\n for region in config['regions'].values():\n region['endpoint'] = long_text\n spec = cloud_spec('long-endpoint-{}'.format(spec.name), spec.name, config,\n InvalidEndpoint)\n if validation.is_basic:\n spec = xfail(spec, 1641970, CloudMismatch)\n return spec\n\n\nclass TestIterClouds(FakeHomeTestCase):\n\n bogus_type = cloud_spec('bogus-type', 'bogus-type', {'type': 'bogus'},\n exception=TypeNotAccepted)\n\n def test_manual(self):\n self.maxDiff = None\n cloud = {'type': 'manual', 'endpoint': 'http://example.com'}\n spec = cloud_spec('foo', 'foo', cloud)\n self.assertItemsEqual([\n self.bogus_type, spec,\n xfail(cloud_spec('long-name-foo', long_text, cloud),\n 1641970, NameMismatch),\n xfail(cloud_spec('invalid-name-foo', 'invalid/name', cloud,\n exception=NameNotAccepted), 1641981, None),\n make_long_endpoint(spec, basic_validation)\n ],\n iter_clouds({'foo': cloud}, endpoint_validation))\n\n def test_manual_no_validation(self):\n self.maxDiff = None\n cloud = {'type': 'manual', 'endpoint': 'http://example.com'}\n spec = cloud_spec('foo', 'foo', cloud)\n self.assertItemsEqual([\n self.bogus_type, spec,\n xfail(cloud_spec('long-name-foo', long_text, cloud),\n 1641970, NameMismatch),\n xfail(cloud_spec('invalid-name-foo', 'invalid/name', cloud,\n exception=NameNotAccepted), 1641981, None),\n make_long_endpoint(\n spec, basic_validation)\n ],\n iter_clouds({'foo': cloud}, basic_validation))\n\n def test_vsphere(self):\n cloud = {\n 'type': 'vsphere',\n 'endpoint': '1.2.3.4',\n 'regions': {'q': {'endpoint': '1.2.3.4'}},\n }\n spec = cloud_spec('foo', 'foo', cloud, exception=None)\n self.assertItemsEqual([\n self.bogus_type, spec,\n xfail(cloud_spec('invalid-name-foo', 'invalid/name', cloud,\n exception=NameNotAccepted), 1641981, None),\n xfail(cloud_spec('long-name-foo', long_text, cloud,\n exception=None), 1641970, NameMismatch),\n make_long_endpoint(\n spec, endpoint_validation, regions=True),\n ], iter_clouds({'foo': cloud}, endpoint_validation))\n\n def test_vsphere_no_validation(self):\n cloud = {\n 'type': 'vsphere',\n 'endpoint': '1.2.3.4',\n 'regions': {'q': {'endpoint': '1.2.3.4'}},\n }\n spec = cloud_spec('foo', 'foo', cloud, exception=None)\n self.assertItemsEqual([\n self.bogus_type, spec,\n xfail(cloud_spec('invalid-name-foo', 'invalid/name', cloud,\n exception=NameNotAccepted), 1641981, None),\n xfail(cloud_spec('long-name-foo', long_text, cloud,\n exception=None), 1641970, NameMismatch),\n xfail(make_long_endpoint(spec,\n endpoint_validation, regions=True),\n 1641970, CloudMismatch),\n ], iter_clouds({'foo': cloud}, basic_validation))\n\n def test_maas(self):\n cloud = {\n 'type': 'maas',\n 'endpoint': 'http://example.com',\n }\n spec = cloud_spec('foo', 'foo', cloud, exception=None)\n self.assertItemsEqual([\n self.bogus_type, spec,\n xfail(cloud_spec('invalid-name-foo', 'invalid/name', cloud,\n exception=NameNotAccepted), 1641981, None),\n xfail(cloud_spec('long-name-foo', long_text, cloud,\n exception=None), 1641970, NameMismatch),\n make_long_endpoint(spec, endpoint_validation),\n ], iter_clouds({'foo': cloud}, endpoint_validation))\n\n def test_maas_no_validation(self):\n cloud = {\n 'type': 'maas',\n 'endpoint': 'http://example.com',\n }\n spec = cloud_spec('foo', 'foo', cloud, exception=None)\n self.assertItemsEqual([\n self.bogus_type, spec,\n xfail(cloud_spec('invalid-name-foo', 'invalid/name', cloud,\n exception=NameNotAccepted), 1641981, None),\n xfail(cloud_spec('long-name-foo', long_text, cloud,\n exception=None), 1641970, NameMismatch),\n make_long_endpoint(spec, basic_validation),\n ], iter_clouds({'foo': cloud}, basic_validation))\n\n def test_openstack(self):\n config = {'type': 'openstack', 'endpoint': 'http://example.com',\n 'regions': {'bar': {'endpoint': 'http://baz.example.com'}}}\n spec = cloud_spec('foo', 'foo', config, exception=None)\n invalid_name = xfail(\n cloud_spec('invalid-name-foo', 'invalid/name', config,\n exception=NameNotAccepted), 1641981, None)\n long_name = xfail(\n cloud_spec('long-name-foo', long_text, config, exception=None),\n 1641970, NameMismatch)\n long_region = cloud_spec(\n 'long-endpoint-foo-bar', 'foo', deepcopy(config), InvalidEndpoint)\n long_region.config['regions']['bar']['endpoint'] = long_text\n bogus_auth = cloud_spec('bogus-auth-foo', 'foo',\n deepcopy(config), exception=AuthNotAccepted)\n bogus_auth.config['auth-types'] = ['asdf']\n self.assertItemsEqual([\n self.bogus_type, spec, invalid_name, long_name, long_region,\n bogus_auth,\n make_long_endpoint(spec, endpoint_validation),\n ], iter_clouds({'foo': config}, endpoint_validation))\n\n def test_openstack_no_validation(self):\n config = {'type': 'openstack', 'endpoint': 'http://example.com',\n 'regions': {'bar': {'endpoint': 'http://baz.example.com'}}}\n spec = cloud_spec('foo', 'foo', config, exception=None)\n invalid_name = xfail(\n cloud_spec('invalid-name-foo', 'invalid/name', config,\n exception=NameNotAccepted), 1641981, None)\n long_name = xfail(\n cloud_spec('long-name-foo', long_text, config, exception=None),\n 1641970, NameMismatch)\n long_region = xfail(cloud_spec(\n 'long-endpoint-foo-bar', 'foo', deepcopy(config),\n InvalidEndpoint), 1641970, CloudMismatch)\n long_region.config['regions']['bar']['endpoint'] = long_text\n bogus_auth = cloud_spec('bogus-auth-foo', 'foo',\n deepcopy(config), exception=AuthNotAccepted)\n bogus_auth.config['auth-types'] = ['asdf']\n self.assertItemsEqual([\n self.bogus_type, spec, invalid_name, long_name, long_region,\n bogus_auth,\n make_long_endpoint(spec, basic_validation),\n ], iter_clouds({'foo': config}, basic_validation))\n\n\nclass TestAssessAllClouds(FakeHomeTestCase):\n\n def test_assess_all_clouds(self):\n client = fake_juju_client(juju_home=self.juju_home)\n clouds = {'a': {'type': 'foo'}, 'b': {'type': 'bar'}}\n cloud_specs = iter_clouds(clouds, endpoint_validation)\n exception = Exception()\n with patch('assess_add_cloud.assess_cloud',\n side_effect=[TypeNotAccepted(), None] + [exception] * 7):\n with patch('sys.stdout'):\n with patch('logging.exception') as exception_mock:\n succeeded, xfail, failed = assess_all_clouds(client,\n cloud_specs)\n self.assertEqual({'bogus-type', 'a'}, succeeded)\n self.assertEqual({\n 'b', 'bogus-auth-a', 'bogus-auth-b', 'invalid-name-a',\n 'invalid-name-b', 'long-name-a', 'long-name-b'},\n failed)\n self.assertEqual(exception_mock.mock_calls, [call(exception)] * 7)\n\n def test_xfail(self):\n cloud_specs = [xfail(cloud_spec('label1', 'name1', {'config': '1'}),\n 27, TypeNotAccepted)]\n client = fake_juju_client(juju_home=self.juju_home)\n with patch('assess_add_cloud.assess_cloud',\n side_effect=TypeNotAccepted):\n with patch('logging.exception') as exception_mock:\n with patch('sys.stdout'):\n succeeded, xfailed, failed = assess_all_clouds(client,\n cloud_specs)\n self.assertEqual(set(), failed)\n self.assertEqual({27: {'label1'}}, xfailed)\n self.assertEqual(0, exception_mock.call_count)\n\n def test_failed_notraised(self):\n client = fake_juju_client(juju_home=self.juju_home)\n cloud_specs = [\n cloud_spec('label', 'name', {'config': '1'}, TypeNotAccepted)]\n with patch('assess_add_cloud.assess_cloud'):\n with patch('logging.exception') as exception_mock:\n with patch('sys.stdout'):\n succeeded, xfailed, failed = assess_all_clouds(client,\n cloud_specs)\n self.assertEqual(set(['label']), failed)\n self.assertEqual(1, exception_mock.call_count)\n raised_e = exception_mock.mock_calls[0][1][0]\n self.assertEqual(\n \"Expected exception not raised: \"\n \"\",\n raised_e.message)\n\n\nclass TestWriteStatus(FakeHomeTestCase):\n\n def do_write(self, status, items):\n stdout = StringIO()\n with patch('sys.stdout', stdout):\n write_status(status, items)\n return stdout.getvalue()\n\n def test_write_none(self):\n self.assertEqual('pending: none\\n', self.do_write('pending', set()))\n\n def test_write_one(self):\n self.assertEqual('pending: q\\n', self.do_write('pending', {'q'}))\n\n def test_write_two(self):\n self.assertEqual('pending: q, r\\n',\n self.do_write('pending', {'r', 'q'}))\n","repo_name":"juju/1.25-upgrade","sub_path":"juju2/acceptancetests/tests/test_assess_add_cloud.py","file_name":"test_assess_add_cloud.py","file_ext":"py","file_size_in_byte":15196,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"19988815501","text":"#!/usr/bin/env python3\n\n\"\"\"\nEntrypoint for docker-droplet\nParse command line arguments and call the selected handlers\n\"\"\"\n\nimport sys\nfrom inspect import cleandoc\nfrom os import environ, path\n\nfrom docopt import docopt # type: ignore\n\nfrom docker_droplet.down import tear_down\nfrom docker_droplet.exceptions import (\n MissingVariable,\n PathNotResolvable,\n)\nfrom docker_droplet.up import set_up\n\n# If packaged multi_job will be scoped, otherwise append parent path.\npackage_directory = path.realpath(path.join(__file__, \"../..\"))\nsys.path.append(package_directory)\n\n\nCLI = cleandoc(\n \"\"\"\n Usage:\n docker-droplet up [options]\n docker-droplet down [options]\n \n Options:\n --droplet-name=\n --ssh-key=\n --token=\n --project=\n --domain=\n --config-path=\"\"\"\n)\n\n\nclass InputArg:\n def __init__(self, name: str, value: str) -> None:\n self.name = name\n self.value = None if value == \"None\" else value\n\n def assign_default(self, default: str) -> None:\n \"\"\"\n If the object's value attribute is None the assign a default value\n\n Args:\n default (str):\n \"\"\"\n if not self.value:\n self.value = default\n\n def validate_path(self, check_file_exists: bool = False) -> None:\n \"\"\"\n Check if path's directory is resolvable. Optionally check if the path itself exists.\n\n Args:\n check_file_exists (bool, optional): [description]. Defaults to False.\n\n Raises:\n PathNotResolvable: [description]\n PathNotResolvable: [description]\n \"\"\"\n if not path.exists(path.dirname(self.value)):\n raise PathNotResolvable(self.name, self.value)\n\n if check_file_exists and not path.exists(self.value):\n raise PathNotResolvable(self.name, self.value)\n\n def sync_env(self) -> None:\n \"\"\"\n Synchronize the object's value with the environment variable TF_VAR_\n \"\"\"\n NAME = \"TF_VAR_DOCKER_DROPLET_\" + self.name.upper()\n if self.value:\n environ.putenv(NAME, self.value)\n else:\n self.value = environ.get(NAME, None)\n\n def set_required(self) -> None:\n if not self.value:\n raise MissingVariable(self.name)\n\n def __str__(self) -> str:\n return f\"{self.name}: {self.value}\"\n\n\ndef main() -> None:\n \"\"\"\n Entry point for docker-droplet\n \"\"\"\n arguments = docopt(CLI)\n droplet_name = InputArg(\n \"droplet_name\", arguments[\"--droplet-name\"]\n )\n ssh_key = InputArg(\"ssh_key\", arguments[\"--ssh-key\"])\n token = InputArg(\"token\", arguments[\"--token\"])\n project = InputArg(\"project\", arguments[\"--project\"])\n domain = InputArg(\"domain\", arguments[\"--domain\"])\n config_path = InputArg(\"config_path\", arguments[\"--config-path\"])\n\n token.sync_env()\n token.set_required()\n\n if arguments[\"up\"]:\n config_path.assign_default(\"./config.tf\")\n config_path.validate_path()\n\n droplet_name.sync_env()\n droplet_name.set_required()\n\n ssh_key.sync_env()\n ssh_key.set_required()\n ssh_key.validate_path(check_file_exists=True)\n\n set_up(\n droplet_name.value,\n ssh_key.value,\n token.value,\n project.value,\n domain.value,\n config_path.value,\n )\n\n if arguments[\"down\"]:\n config_path.set_required()\n config_path.validate_path()\n tear_down(token.value, config_path.value)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JoelLefkowitz/docker-droplet","sub_path":"docker_droplet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"42135389129","text":"import configparser\nimport torch # noqa\n\n\ndef create(model, config_file):\n\n config = configparser.ConfigParser()\n config.read(config_file)\n\n optimizer_type = eval('torch.optim.' + config['optimizer']['type'])\n optimizer_params = eval(config['optimizer']['params'])\n num_iterations = eval(config['optimizer']['num_iterations'])\n\n optimizer = optimizer_type(model.parameters(), **optimizer_params)\n\n return optimizer, num_iterations\n","repo_name":"funkelab/lisl","sub_path":"lisl/optimizers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"75155781584","text":"\"\"\"\n<>\n\n>\n\n<\n\"\"\"\nfrase = \"Hola que tal que como que estas\"\n\nlistaFrase = frase.split()\n\nconj = set()\n\nfor elem in listaFrase:\n conj.add(elem)\n\nrespuesta = list(conj)\n\nrespuesta.sort(key= lambda x: len(x), reverse=True)\n \n\nprint(respuesta)\n","repo_name":"fersayago/Programacion","sub_path":"Test/conjunto ordernar frase.py","file_name":"conjunto ordernar frase.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1229887998","text":"'''\nPython 对注解所做的唯一的事情是,把它们存储在函数的 __annotations__ 属性里。\n\n仅此而已,Python 不做检查、不做强制、不做验证,什么操作都不做。换句话说,\n\n注解对Python 解释器没有任何意义。注解只是元数据,可以供 IDE、框架和装饰器等工具使用。\n\n写作本书时,标准库中还没有什么会用到这些元数据,\n\n唯有 inspect.signature() 函数知道怎么提取注解,如示例 5-20 所示\n'''\n\n# 示例 5-20 从函数签名中提取注解\nfrom chapter_5.demo_5_19 import clip\nfrom inspect import signature\n\nsig = signature(clip)\n\nprint(sig.return_annotation)\n'''\nsignature 函 数 返 回 一 个 Signature 对 象,\n它 有 一 个 return_annotation 属 性 和 一 个parameters 属性,后者是一个字典,把参数名映射到 Parameter 对象上。\n每个 Parameter 对象自己也有 annotation 属性。\n\n示例 5-20 用到了这几个属性\n'''\nfor param in sig.parameters.values():\n\tnote = repr(param.annotation).ljust(13)\n\tprint(note, ':', param.name, '=', param.default)\n","repo_name":"linsanityHuang/the_fluent_python","sub_path":"chapter_5/demo_5_20.py","file_name":"demo_5_20.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"33442005922","text":"import numpy as np\nimport cv2\n\nimport target_localization\n\ndef main_video():\n cap = cv2.VideoCapture(\"footage/2022-finals-diver-5-targets.mp4\")\n\n if not cap.isOpened():\n return\n\n while (cap.isOpened()):\n success, frame = cap.read()\n\n if not success:\n break\n\n target_localization.process(frame)\n\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n break\n\n cap.release()\n\n cv2.destroyAllWindows()\n\ndef main_image():\n image_files = [\n \"footage/sarah-target-1.png\",\n \"footage/sarah-target-2.png\",\n \"footage/sarah-target-3.png\",\n ]\n\n for image_file in image_files:\n frame = cv2.imread(image_file)\n\n target_localization.process(frame)\n\n\n while True:\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n break\n\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main_image()","repo_name":"Tartan-AUV/TAUV-Playground","sub_path":"target-localization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"13551836431","text":"import math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\ndata_type = {'msg' : 'raw', 'header' : 'header', 'array' : 'array'}\ndata_protocol = {'tcp' : 'tcp', 'ipc' : 'ipc'}\ndata_copy = {True : 'copy', False : 'notcopy'}\ndata_method = {'json' : 'json', 'cpickle' : 'cPickle'}\n\ndata = {'msg': {}, 'header': {}, 'array': {}}\n\nwith open('log') as fp:\n begin = False\n #dtype = None\n #method = None\n #size = 0\n #is_copy = False\n #niter = 0\n #latency = 0\n #iterations = 0\n #throughput = 0\n for line in fp:\n line = line.strip()\n if line.startswith('test_'):\n begin = True\n words = line.split('_')\n dtype = words[1]\n if words[1] == 'msg':\n method = None\n else:\n method = words[2]\n elif begin:\n words = line.split()\n if words[0] == 'Address':\n if words[2].find('ipc') >= 0:\n protocol = 'ipc' \n elif words[2].find('tcp') >= 0:\n protocol = 'tcp'\n begin = False\n else:\n raise Exception\n elif words[0] == 'Size':\n size = math.log(int(words[2]), 2)\n #size = int(words[2])\n elif words[0] == 'Copy':\n is_copy = eval(words[2])\n elif words[0] == 'niter':\n niter = int(words[2])\n elif words[0] == 'Latency':\n latency = float(words[2])\n elif words[0] == 'Iterations':\n iterations = float(words[4])\n elif words[0] == 'Throughput':\n throughput = float(words[4])\n elif words[0] == 'Elapse':\n begin = False\n name = data_type[dtype] + '_' + data_protocol[protocol]\n if method:\n name += '_' + data_method[method]\n name += '_' + data_copy[is_copy]\n if not data[dtype].get(name, None):\n data[dtype][name] = {}\n data[dtype][name][size] = [latency, iterations, throughput]\n\n\nfor dtype in data.keys():\n throughput = {}\n latency = {}\n iterations = {}\n for name, val in data[dtype].items():\n size = val.keys()\n size.sort()\n latency[name] = [size, [val[s][0] for s in size]]\n iterations[name] = [size, [val[s][1] for s in size]]\n throughput[name] = [size, [val[s][2] for s in size]]\n\n for name, val in latency.items():\n plt.plot(val[0], val[1], 'o--', label=name)\n plt.grid(True)\n plt.xlabel('Size (log(bytes), normalized)')\n plt.ylabel('Latency (us)')\n legend = plt.legend(loc='upper left', shadow=True)\n inset_axes(plt.axes(), width=\"45%\", height=\"45%\", loc=10)\n for name, val in latency.items():\n plt.plot(val[0][:17], val[1][:17], 'o--', label=name)\n plt.grid(True)\n plt.show()\n plt.cla()\n\n for name, val in iterations.items():\n plt.plot(val[0], val[1], 'o--', label=name)\n legend = plt.legend(loc='upper right', shadow=True)\n plt.grid(True)\n plt.xlabel('Size (log(bytes), normalized)')\n plt.ylabel('Iterations')\n plt.show()\n plt.cla()\n\n for name, val in throughput.items():\n plt.plot(val[0], val[1], 'o--', label=name)\n legend = plt.legend(loc='upper left', shadow=True)\n plt.grid(True)\n plt.xlabel('Size (log(bytes), normalized)')\n plt.ylabel('Throughput (MBps)')\n plt.show()\n plt.cla()\n\n","repo_name":"fegin/playcode","sub_path":"zmq_numpy_perf/parse_log.py","file_name":"parse_log.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73133115346","text":"# 安装: pip install selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\n# WebDriver is an open source tool for automated testing of webapps across many browsers. \n# It provides capabilities for navigating to web pages, \n# user input, JavaScript execution, and more. \nbrowser = webdriver.Chrome('E:/Tools/Extensions/chromedriver_win32/chromedriver.exe')\n\n# Open Web Page\nbrowser.get('http://www.baidu.com/')\n# Sleep 5 seconds till the web page is opened\ntime.sleep(5)\n\n# 找到百度搜索框\ninput = browser.find_element_by_id('kw')\n# 输入关键字\ninput.send_keys('Python')\n# 回车\ninput.send_keys(Keys.ENTER)\n\ntime.sleep(3)\n# 点击页面顶部 “贴吧”\nbrowser.find_element_by_link_text('贴吧').click()","repo_name":"fredligithub/PythonLearning","sub_path":"venv/MyTestCode/Basic/Selenium_1.py","file_name":"Selenium_1.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"72240725266","text":"from django.shortcuts import render, redirect, render_to_response\n\nfrom django.http import HttpResponse\n\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib.auth.forms import AuthenticationForm\n\nfrom django.core.urlresolvers import reverse\n\nfrom biocore import forms \nfrom biocore.models import MealSignup\n\nfrom django.views.generic import ListView\n\nfrom models import User\nfrom datetime import datetime\nimport datetime\nfrom django.template.loader import get_template\nfrom django.template import Context, loader, RequestContext\n\n\ndef hello(request):\n\treturn HttpResponse(\"Hello world\")\n\ndef current_datetime(request):\n now = datetime.datetime.now()\n html = \"It is now %s.\" % now\n return HttpResponse(html)\n#\n#def hours_ahead(request, offset):\n#\ttry:\n#\t\toffset = int(offset)\n#\texcept ValueError:\n#\t\traise Http404()\n#\tdt = datetime.datetime.now() + datetime.timedelta(hours=offset)\n#\thtml = \"In %s hour(s), it will be %s.\" % (offset, dt)\n#\treturn HttpResponse(html)\n\ndef hours_ahead(request, offset):\n\ttry:\n\t\toffset = int(offset)\n\texcept ValueError:\n\t\traise Http404()\n\tdt = datetime.datetime.now() + datetime.timedelta(hours=offset)\n\t# t = get_template('hours_ahead.html')\n\t#html = t.render(Context({'future': dt}))\n\treturn render(request, 'hour_ahead.html', {'hour_offset': offset,'future':dt})\n\ndef daystoburn(request):\n\tnext_burn = datetime.datetime(2014, 8, 30)\n\tnow = datetime.datetime.now()\n\tdays_until_burn = (next_burn - now).days\n\t#html = \"The Man burns in %s days.\" %days_until_burn\n\t#t = get_template('daysuntilburn.html')\n\t#html = t.render(Context({'daystoburn': days_until_burn}))\n\treturn render(request, 'daysuntilburn.html', {'daystoburn': days_until_burn})\n\nclass ListUserView(ListView):\n\tmodel = User\n\ndef _redirect_if_logged_in(f):\n\tfrom functools import wraps\n\twraps(f)\n\tdef _f(request, *args, **kwargs):\n\t\tnext = request.GET.get(\"next\", reverse('homepage'))\n\t\tif request.user.is_authenticated():\n\t\t\treturn redirect(next)\n\t\treturn f(request, *args, **kwargs)\n\treturn _f\n\n\n# example longform equivalent of shortcuts.render:\n# from django.template.loader import get_template\n# from django.template.context import RequestContext\n# from django.http import HttpResponse\n#\n# partial_context = { 'data': 'from view' }\n# context = RequestContext(request, partial_context)\n# template = get_template('homepage.html')\n# result = template.render(context)\n# response = HttpResponse(result)\n# return response\n# -or- w/ shortcuts.render:\n# \n# return render(request, 'homepage.html', partial_context)\n\ndef homepage(request):\n\treturn render(request, 'homepage.html')\n\ndef profile(request, user_id):\n\treturn HttpResponse(\"TBD\")\n\n@_redirect_if_logged_in\ndef login(request):\n\tlogin_form = AuthenticationForm()\n\tif request.method == 'POST':\n\t\t# django takes form names and filling in field names as keys in the .POST MultiValueDict\n\t\t# something like: request.POST = {'username': 'foo'}\n\t\tlogin_form = AuthenticationForm(request, request.POST)\n\n\t\tif login_form.is_valid():\n\t\t\tuser = login_form.get_user()\n\n\t\t\tif user is not None:\n\t\t\t\tauth.login(request, user)\n\n\t\t\t\tnext = request.GET.get(\"next\", reverse('homepage'))\n\t\t\t\treturn redirect(next)\n\n\trequest.session.set_test_cookie()\n\treturn render(request, 'login.html', {\n\t\t'login_form': login_form\n\t})\n\t#if the \n\ndef logout(request):\n\tnext = request.GET.get(\"next\", reverse('homepage'))\n\tauth.logout(request)\n\treturn redirect(next)\n\n@_redirect_if_logged_in\ndef register(request):\n\tregistration_form = forms.UserCreationForm()\n\tif request.method == 'POST':\n\t\tregistration_form = forms.UserCreationForm(request.POST)\n\n\t\tif registration_form.is_valid():\n\t\t\tuser = registration_form.save()\n\t\t\tuser.backend = 'django.contrib.auth.backends.ModelBackend'\n\t\t\tauth.login(request, user)\n\t\t\treturn redirect(reverse('homepage'))\n\n\treturn render(request, 'register.html', {\n\t\t'registration_form': registration_form\n\t})\n\n@login_required\ndef travel(request):\n\ttravel_from = forms.TravelFrom()\n\tif request.method == \"POST\":\n\t\ttravel_from = forms.TravelFrom(request.POST)\n\n\treturn render(request, 'travel.html', {\n\t\t'travel_from': travel_from\n\t})\n\n# date_choices = User.DATES_2014\n\n\n@login_required\ndef chef_cockpit(request, meal_id):\n\treturn HttpResponse(\"cockpit\")\n\n\n@login_required\ndef meal_signup(request):\n\texisting_signups = MealSignup.objects.filter(user=request.user)\n\tinitial = {}\n\tfor signup in existing_signups:\n\t\tfield_name = forms.MealSignups.field_name_for(signup.meal, signup.position)\n\t\tinitial[field_name] = True\n\n\t# initial should include previous signups.\n\t#initial = User_Meals.objects.filter(user=request.user)\n\t# e.g. initial = {'sous_aug_20_am': True}\n\t# to match the form field\n\n\tform = forms.MealSignups(request.user, initial=initial)\n\tif request.method == 'POST':\n\t\tform = forms.MealSignups(request.user, request.POST, initial=initial)\n\t\tif form.is_valid():\n\t\t\tform.save(request.user, existing_signups)\n\n\n\treturn render(request, 'meal_signup.html', {'form': form})\n\n\n# def meals(request):\n# \tdate_choices = User.DATES_2014 \n# \tmeals1 = forms.Meals()\n\t#display all dates\n\t# if request.method == \"POST\":\n\t# \tmeals = forms.Meals(request.POST)\n\t# if request.method == \"GET\":\n\t# #just render the form\n\t# # get form from forms\n\t# # form = forms.Meals().getForm()\n\t# #below listed dates successfully.\n\t# \treturn render_to_response('meals.html', {'date_choices': date_choices})\n\t# #else:\n\t# #old method below. \n\t# if request.method == \"POST\":\n\t# \tform = Meals(request.POST)\n\t# \tif form.is_valid():\n\t# \t\tshifts = form.cleaned_data['shifts']\n\t# \t\tmeals = form.cleaned_data['meals']\n\t# \t\tdates = form.cleaned_data['']\n\n\n\t# \t\tmodel_instance.save()\n\t# \t\treturn redirect('your data has been saved')\n\t# return render(request 'meals.html', {'form': form,\n\t# \t})\n\n\n# #class based view \n# class MyView(View):\n\n# \tdef get(self, request, *args, **kwargs):\n# \t\treturn HttpResponse(\"Hello, World\")\n\n# #for listing contacts\n# class ListContactView(ListView):\n# \tmodel = Contact \n# \ttemplate_name = 'contact_list.html'\n\n# #adding info to database\n# class CreateContactView(CreateView):\n\n# \tmodel = Contact\n# \ttemplate_name = 'edit_contact.html'\n# \t#tells view to use extra field in forms.py\n# \tform_class = forms.ContactForm\n\n# \tdef get_success_url(self):\n# \t\treturn reverse('contacts-list')\n# #information about where the formshould redirect to the context\n\n# \tdef get_context_data(self, **kwargs):\n\n# \t\tcontext = super(CreateContactView, self).get_context_data(**kwargs)\n# \t\tcontext['action'] = reverse('contacts-new')\n\n# \t\treturn context\n\n# class ContactListViewTest(TestCase):\n\n# \tdef test_contacts_in_the_context(self):\n\n# \t\tclient = Client()\n# \t\tresponse = client.get('/')\n\n# \t\tself.assertEquals(list(response.context['object_list']), [])\n\n# \t\tContact.objects.create(first_name='foo', last_name='bar')\n# \t\tresponse = client.get('/')\n# \t\tself.assertEquals(response.context['object_list'].count(), 1)\n\n# \tdef test_contacts_in_the_context_request_factory(self):\n\n# \t\tfactory = RequestFactory()\n# \t\trequest = factory.get('/')\n\n# \t\tresponse = ListContactView.as_view()(request)\n\n# \t\tself.assertEquals(list(response.context_data['object_list']), [])\n\n# \t\tContact.objects.create(first_name = 'foo', last_name='bar')\n# \t\tresponse = ListContactView.as_view()(request)\n# \t\tself.assertEquals(response.context_data['object_list'].count(), 1)\n\n# class ContactListIntegrationTests(LiveServerTestCase):\n\n# \t@classmethod\n# \tdef setUpClass(cls):\n# \t\tcls.selenium = WebDriver()\n# \t\tsuper(ContactListIntegrationTests, cls).setUpClass()\n\n# \t@classmethod\n# \tdef tearDownClass(cls):\n# \t\tcls.selenium.quit()\n# \t\tsuper(ContactListIntegrationTests, cls).tearDownClass()\n\n# \tdef test_contact_listed(self):\n# \t\t#create test contact\n# \t\tContact.objects.create(first_name='foo', last_name='bar')\n# \t\t#make sure it's listed as on the list\n# \t\tself.selenium.get('%s%s' %(self.live_server_url, '/'))\n# \t\tself.assertEqual(\n# \t\t\tself.selenium.find_elements_by_css_selector('contact')['0'].text, 'foo.bar'\n# \t\t\t)\n# \tdef test_add_contact(self):\n\n\n# \t\tself.selenium.find_element_by_link_text('add contact').click()\n\n# \t\tself.selenium.find_element_by_id('id_first_name').send_keys('test')\n# \t\tself.selenium.find_element_by_id('id_last_name').send_keys('contact')\n# \t\tself.selenium.find_element_by_id('id_email').send_keys('test@example.com')\n\n# \t\tself.selenium.find_element_by_id(\"save_contact\").click()\n# \t\tself.assertEqual(\n# \t\tself.selenium.find_elements_by_css_selector('.contact')[-1].text,'test contact'\n# \t\t)\n\n# #allows us to edit contacts\n# class UpdateContactView(UpdateView):\n# \tmodel = Contact\n# \ttemplate_name = 'edit_contact.html'\n# \t#references forms.py\n# \tform_class = forms.ContactForm\n\n# \tdef get_success_url(self):\n# \t\treturn reverse('contacts-list')\n\n# \tdef get_context_data(self, **kwargs):\n\n# \t\tcontext = super(UpdateContactView, self).get_context_data(**kwargs)\n# \t\tcontext['action'] = reverse('contacts-edit',\n# \t\t\tkwargs= {'pk': self.get_object().id})\n\n# \t\treturn context\n\n# class DeleteContactView(DeleteView):\n\n# model = Contact\n# template_name = 'delete_contact.html'\n\n# def get_success_url(self):\n# return reverse('contacts-list')\n\n\n# class ContactView(DetailView):\n\n# \tmodel = Contact\n# \ttemplate_name = 'contact.html'\n\n# class Contact(models.Model):\n\n# \tdef get_absolute_url(self):\n\n# \t\treturn reverse(\"contacts-view\", kwargs={'pk':self.id})\n\n# class EditContactAddressView(UpdateView):\n\n# \tmodel = Contact\n# \ttemplate_name = 'edit_addresses.html'\n# \tform_class = forms.ContactAddressFormSet\n\n# \tdef get_success_url(self):\n# \t\t# redirect to the Contact view.\n# \t\treturn self.get_object().get_absolute_url()\n\n","repo_name":"beckastar/bioluminati","sub_path":"biocore/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26374146514","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main():\n return render_template('main.html')\n\n@app.route('/login', methods = ['GET', 'POST'])\ndef login():\n error=None\n if request.method == 'POST':\n if request.form['username'] != 'ossp' or request.form['password'] != 'ossp1234':\n error= '회원정보 입력이 잘못되었습니다. 다시 시도하세요.'\n else:\n return render_template(\"application_form.html\")\n return render_template('login.html',error = error)\n\n\n@app.route('/application_form', methods = ['POST', 'GET'])\ndef apply():\n if request.method == 'POST':\n apply = dict()\n apply['Name'] = request.form.get('Name')\n apply['StudentNumber'] = request.form.get('StudentNumber')\n apply['Major'] = request.form.get('Major')\n apply['Location'] = request.form.get('Location')\n apply['Date'] = request.form.get('Date')\n apply['Comment'] = request.form.get('Comment')\n return render_template(\"application_form.html\",apply = apply)\n\n@app.route('/submit', methods = ['POST', 'GET'])\ndef submit():\n uName = request.form.get('Name')\n uStudentNumber = request.form.get('StudentNumber')\n uMajor = request.form.get('Major')\n uLocation = request.form.get('Location')\n uDate = request.form.get('Date')\n uComment = request.form.get('Comment')\n \n return render_template(\"submit.html\",uName = uName, uStudentNumber = uStudentNumber, uMajor = uMajor, uLocation = uLocation, uDate = uDate, uComment = uComment)\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug=True, port=80)\n","repo_name":"CSID-DGU/2021-2-OSSprac-project2-4-JiwooKids","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"40141162948","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 22 13:07:21 2021\r\n\r\n@author: zongsing.huang\r\n\"\"\"\r\n\r\nimport itertools\r\n\r\nimport numpy as np\r\n\r\n#%% 題庫\r\nbenchmark = np.array([[ 0, 19, 92, 29, 49, 78, 6],\r\n [19, 0, 21, 85, 45, 16, 26],\r\n [92, 21, 0, 24, 26, 87, 47],\r\n [29, 85, 24, 0, 76, 17, 8],\r\n [49, 45, 26, 76, 0, 90, 27],\r\n [78, 16, 87, 17, 90, 0, 55],\r\n [ 6, 26, 47, 8, 27, 55, 0]])\r\n\r\n#%% 函數定義\r\ndef fitness(X, benchmark):\r\n if X.ndim==1:\r\n X = X.reshape(1, -1)\r\n \r\n P = X.shape[0]\r\n D = X.shape[1]\r\n F = np.zeros(P)\r\n \r\n for i in range(P):\r\n X_new = np.append(X[i], X[i, 0])\r\n \r\n for j in range(D):\r\n st = X_new[j].astype(int)\r\n ed = X_new[j+1].astype(int)\r\n F[i] += benchmark[st, ed]\r\n \r\n return F\r\n\r\ndef swap(X):\r\n D = X.shape[0]\r\n idx = np.arange(D)\r\n comb = list(itertools.combinations(idx, 2))\r\n X_new = np.zeros([len(comb), D])\r\n \r\n for i, (j, k) in enumerate(comb):\r\n X_new[i] = X.copy()\r\n X_new[i, j], X_new[i, k] = X_new[i, k], X_new[i, j]\r\n \r\n return X_new\r\n#%% 參數設定\r\nD = benchmark.shape[1] # 維度\r\nrho_max = 10\r\n\r\n#%% 初始化\r\nX = np.random.choice(D, size=D, replace=False) # 初始解\r\nF = fitness(X, benchmark) # 初始適應值\r\n\r\nrho = 1\r\nwhile rho<=rho_max:\r\n # 更新\r\n X_set = swap(X)\r\n \r\n # 適應值計算\r\n F_set = fitness(X_set, benchmark)\r\n \r\n # 取得X_new\r\n idx = F_set.argmin()\r\n X_new = X_set[idx]\r\n F_new = F_set[idx]\r\n \r\n # 更新X\r\n if F_new 0:\r\n c = max(cnts, key=cv2.contourArea)\r\n ((x, y), radius) = cv2.minEnclosingCircle(c)\r\n center = (int(x), int(y))\r\n radius = int(radius)\r\n img = cv2.circle(img, center, radius, (255, 0, 0), 3)\r\n cv2.imshow(\"kq\",img)\r\n\r\n# ảnh đầu vào\r\nimg = cv2.imread(\"E:\\Download_Anh\\anhCaChua1.jpg\")\r\nhsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # chuyển sang màu HSV\r\n\r\n# các ngưỡng màu\r\nmin_mau_r1 = np.array([0,100,100]) # red1\r\nmax_mau_r1 = np.array([10, 255, 255])\r\n\r\nmin_mau_r2 = np.array([150, 100, 100]) # red2\r\nmax_mau_r2 = np.array([179, 105, 100])\r\n\r\nmin_mau_g = np.array([40, 100, 100]) # green\r\nmax_mau_g = np.array([90, 255, 255])\r\n\r\nmin_mau_y = np.array([15, 100, 100]) # yellow\r\nmax_mau_y = np.array([50, 255, 255])\r\n\r\n# tạo các lớp mặt nạ để tách màu\r\nmask_r1 = cv2.inRange(hsv_img, min_mau_r1, max_mau_r1)\r\nmask_r2 = cv2.inRange(hsv_img, min_mau_r2, max_mau_r2)\r\n\r\nmask_r = cv2.bitwise_or(mask_r1,mask_r1)\r\n\r\ncv2.imshow(\"anh\",mask_r)\r\n\r\nmask_g = cv2.inRange(hsv_img, min_mau_g, max_mau_g)\r\n\r\nmask_y = cv2.inRange(hsv_img, min_mau_y, max_mau_y)\r\n\r\nfinal_g = cv2.bitwise_and(img,img,mask=mask_g)\r\n\r\nfinal_r = cv2.bitwise_and(img,img, mask=mask_r)\r\n\r\nfinal_y = cv2.bitwise_and(img,img, mask=mask_y)\r\n\r\n\r\nif np.any(final_r):\r\n print(\"1\")\r\n drawing(final_r)\r\nif np.any(final_g):\r\n print(\"2\")\r\n drawing(final_g)\r\nif np.any(final_y):\r\n print(\"3\")\r\n drawing(final_y)\r\n\r\ncv2.waitKey(0)","repo_name":"HuYingTran/Python-Opencv","sub_path":"loc1.py","file_name":"loc1.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"9395794500","text":"import sys\r\nn,k,b = map(int,sys.stdin.readline().split())\r\nv = [0 for _ in range(n+1)]\r\nfor i in range(b) : v[int(sys.stdin.readline().rstrip())] = 1\r\nb_cnt = 0\r\nl,r,ans = 1,1,9876543210\r\n\r\nfor i in range(1,k+1) :\r\n if v[i] == 1 :\r\n b_cnt +=1\r\n r = i\r\nans = min(ans,b_cnt)\r\nfor i in range(k+1, n+1) :\r\n r += 1\r\n if r >= n+1 :\r\n break\r\n if v[r] == 1 :b_cnt +=1\r\n if v[l] == 1 :b_cnt -=1\r\n l +=1\r\n ans = min(ans,b_cnt)\r\n\r\nprint(ans)","repo_name":"chickenchickenlove/BOJ-Algorithm","sub_path":"BOJ_백준 알고리즘/14465_소가 길을 건너간 이유 5.py","file_name":"14465_소가 길을 건너간 이유 5.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1015519141","text":"bl_info = {\n \"name\": \"PMLS\",\n \"description\": \"Surface reconstruction from DistoX data.\",\n \"author\": \"Attila Gati, et al.\",\n \"version\": (0, 82),\n \"blender\": (2, 65, 0),\n \"location\": \"View3D\",\n \"warning\": \"\", # used for warning icon and text in addons panel\n \"support\": \"COMMUNITY\",\n \"category\": \"3D View\"\n }\n\n\n\n\nimport bpy\nimport bmesh\nfrom bpy.app.handlers import persistent\nimport os\nimport io\nimport collections\n\nimport importlib.util\nimport sys\n\nname = 'pmlslib'\n\nspec = importlib.util.find_spec(name)\npmlslib_module = None\nif spec is not None:\n # If you chose to perform the actual import ...\n pmlslib_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(pmlslib_module)\n # Adding the module to sys.modules is optional.\n sys.modules[name] = pmlslib_module\n pmlslib = pmlslib_module\n\nimport matlab\n\nif importlib.util.find_spec('engine', 'matlab') is not None:\n import matlab.engine\n\nclass PmlsObjectType(type):\n def __init__(cls, *args, **kwargs):\n# print(cls)\n# print(cls.pmls_type)\n if cls.pmls_type:\n cls.PmlsObjectDictionary[cls.pmls_type] = cls \n return super().__init__(*args, **kwargs)\n \nclass PmlsObjectBase( metaclass=PmlsObjectType ):\n PmlsObjectDictionary = {}\n pmls_type = None\n\n @staticmethod\n def type_str( bpyobj ):\n if ('pmls_type' in bpyobj.keys()):\n return bpyobj['pmls_type']\n else:\n return None\n \n @classmethod\n def pmls_type_check( cls, bpyobj ):\n typestr = cls.type_str( bpyobj ) \n return typestr and cls.pmls_type and typestr == cls.pmls_type\n \n @classmethod\n def pmls_get_type( cls, bpyobj ):\n typestr = cls.type_str( bpyobj ) \n if typestr and typestr in cls.PmlsObjectDictionary:\n return cls.PmlsObjectDictionary[typestr]\n return None\n \nclass PmlsObject(PmlsObjectBase):\n @staticmethod\n def tr_default( obj ):\n if obj is None:\n return bpy.context.active_object\n else:\n return obj\n \n def __bool__(self):\n# print( \"Object::bool called\" )\n return bool( self.object )\n \n def clear(self, objdata_too=True):\n bm = self.get_bmesh()\n bm.clear()\n bmesh.update_edit_mesh(self.object.data)\n if objdata_too:\n self._clear_objdata()\n \n \n def NullInit(self):\n self.object = None\n \n def delete_object(self):\n obj = self.object\n if bpy.ops.object.mode_set.poll():\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action='DESELECT')\n obj.hide = False\n obj.select = True\n bpy.ops.object.delete()\n self.NullInit()\n \n \n def update_from_matlab(self, mobj):\n t1 = type(self)\n t2 = PmlsObjectBase.pmls_get_type(mobj)\n b = issubclass(t2,t1)\n self.clear(not b)\n obj = self.object\n self.NullInit()\n obj[\"pmls_type\"] = PmlsObjectBase.type_str(mobj)\n pmls_obj = PmlsObject(obj)\n pmls_obj._init_from_matlab(mobj)\n return pmls_obj \n \n def __init__( self, bpyobj = None ):\n# print( \"Object::__init__ called\" )\n bpyobj = PmlsObject.tr_default( bpyobj )\n if (type(bpyobj) is bpy.types.Object) and (bpyobj.type == \"MESH\"):\n self.object = bpyobj\n else:\n self._init_from_matlab(bpyobj)\n self.object[\"pmls_type\"] = self.pmls_type\n \n \n def __new__(cls, bpyobj = None):\n# print( 'Obj: ' + str(cls) )\n bpyobj = cls.tr_default(bpyobj)\n if cls.pmls_type_check( bpyobj ):\n pobj = PmlsObjectBase.__new__( cls )\n pobj.NullInit()\n return pobj\n ncls = cls.pmls_get_type( bpyobj )\n if ncls is not None:\n return PmlsObject.__new__( ncls, bpyobj )\n\n @classmethod\n def add_mesh(cls, vt, edges, faces, obj):\n selvt = vt.get(\"selvt\")\n return PmlsEngine.add_mesh( vt[\"pmls_name\"], vt[\"vt\"], edges, faces, obj, selvt )\n \n @staticmethod\n def get_selected_objects():\n return [PmlsObject(o) for o in bpy.context.selected_objects \n if pmls_is_pmlsobj(o) and (pmls_is_deform_mesh(o) or not(o.parent and pmls_is_deform_mesh(o.parent)))]\n \n \n def set_active(self):\n obj = self.object\n aobj = bpy.context.active_object\n if aobj is None or obj.name != aobj.name:\n if bpy.ops.object.mode_set.poll():\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action='DESELECT')\n obj.hide = False\n obj.select = True\n bpy.context.scene.objects.active = obj\n if bpy.ops.object.mode_set.poll():\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action='DESELECT')\n obj.hide = False\n obj.select = True\n \n \n def set_object_mode(self):\n self.set_active()\n bpy.ops.object.mode_set(mode='OBJECT')\n\n def set_edit_mode(self):\n self.set_active()\n bpy.ops.object.mode_set(mode='EDIT')\n \n def is_parent_of(self, child):\n if not isinstance(child, PmlsObject):\n return False\n return child.object.parent and child.object.parent.name == self.object.name\n \n def parent_clear(self):\n if self.object.parent:\n self.object.hide = False\n self.object.select = True\n bpy.context.scene.objects.active = self.object\n bpy.ops.object.duplicate(linked=True)\n bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')\n self.object.hide = True\n self.object.select = False\n obj = PmlsObject(bpy.context.scene.objects.active)\n obj.object.select = False\n return obj\n return self \n \n \n def get_bmesh(self):\n obj = self.object\n# aobj = bpy.context.active_object\n# if aobj is None or obj.name != aobj.name:\n# if bpy.ops.object.mode_set.poll():\n# bpy.ops.object.mode_set()\n# bpy.ops.object.select_all(action='DESELECT')\n# obj.hide = False\n# obj.select = True\n# bpy.context.scene.objects.active = obj\n self.set_active()\n bpy.ops.object.mode_set(mode='EDIT')\n me = obj.data\n return bmesh.from_edit_mesh( me )\n \n @staticmethod\n def _mvt_to_py(mobj):\n mvt = mobj[\"vt\"]\n vt = [];\n vt.extend([(p[0], p[1], p[2]) for p in mvt])\n selvt = mobj.get(\"selvt\")\n if selvt:\n if not isinstance(selvt, collections.Iterable):\n selvt = [[selvt]]\n return {\"vt\" : vt, \"pmls_name\" : mobj[\"pmls_name\"], \"selvt\" : [v[0] for v in selvt]}\n else:\n return {\"vt\" : vt, \"pmls_name\" : mobj[\"pmls_name\"]}\n\n @staticmethod\n def _medges_to_py(mobj):\n medges = mobj[\"edges\"]\n edges = []\n edges.extend([(e[0], e[1]) for e in medges])\n return edges\n\n @staticmethod\n def _mfaces_to_py(mobj):\n mfaces = mobj[\"tris\"]\n faces = []\n faces.extend([(f[0], f[1], f[2]) for f in mfaces])\n return faces\n\n @classmethod\n def is_hedgehog(cls):\n return False\n\n @classmethod\n def is_mesh(cls):\n return False\n \nclass PmlsMesh(PmlsObject):\n pmls_type = \"mesh\"\n\n def _init_from_matlab(self, mobj):\n ob = self.add_mesh( self._mvt_to_py(mobj), [], self._mfaces_to_py(mobj), self.object );\n self.object = ob\n\n def _to_matlab(self, bm):\n S = {}\n S[\"verts\"] = matlab.double([list(v.co) for v in bm.verts])\n S[\"tris\"] = matlab.int32([[v.index for v in f.verts] for f in bm.faces])\n return S\n \n def _to_matlab_skeleton(self):\n S = {};\n S[\"verts\"] = matlab.double(size=(1,0))\n S[\"tris\"] = matlab.int32(size=(1,0))\n S[\"pmls_type\"] = self.pmls_type\n S[\"pmls_name\"] = self.object.name\n return S\n \n \n def to_matlab(self):\n self.set_edit_mode()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bpy.ops.mesh.select_face_by_sides(number=3, type='GREATER', extend=False)\n bpy.ops.mesh.quads_convert_to_tris()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n self.set_object_mode()\n vertices = self.object.data.vertices\n faces = self.object.data.polygons\n S = {};\n n = len(vertices)\n mvt = matlab.double(size=(1,3*n))\n vertices.foreach_get('co', mvt[0])\n mvt.reshape((3,n))\n S[\"verts\"] = mvt\n n = len(faces)\n mtris = matlab.int32(size=(1,3*n))\n faces.foreach_get('vertices', mtris[0])\n mtris.reshape((3,n))\n S[\"tris\"] = mtris\n S[\"pmls_type\"] = self.pmls_type\n S[\"pmls_name\"] = self.object.name\n return S\n# return self._to_matlab(bm) \n \n def advance(self, hedg, anchors):\n obj = self.object\n obj[\"pmls_type\"] = PmlsDeformMesh.pmls_type\n obj = PmlsObject(obj)\n if obj:\n obj.set_children(hedg, anchors)\n return obj\n\n @classmethod\n def is_mesh(cls):\n return True\n \n @classmethod\n def _get_loop(cls, vs):\n loop=[]\n if not vs:\n return loop\n v1 = vs[0]\n es = [e for e in v1.link_edges if e.select]\n if not es:\n raise Exception('Bad selecting!')\n v2 = es[0].other_vert(v1)\n loop = [v1,v2]\n vstart = v1;\n while True:\n es = [e for e in v2.link_edges if e.select]\n if not es:\n raise Exception('Bad selecting!')\n vs = [e.other_vert(v2) for e in es if e.other_vert(v2) not in loop ]\n if not vs:\n vs = [e.other_vert(v2) for e in es if e.other_vert(v2) in loop and e.other_vert(v2) != v1]\n if len(vs) != 1 or vs[0] != vstart:\n raise Exception('Bad selecting!')\n return loop\n if len(vs) > 1: \n raise Exception('Bad selecting!')\n v1 = v2\n v2 = vs[0]\n loop.append(v2)\n\n \n def get_selected_loops(self):\n bm = self.get_bmesh()\n vs = [v for v in bm.verts if v.select]\n if not vs:\n raise Exception('Bad selecting!')\n loops = []\n while vs:\n loop = self._get_loop(vs)\n vs = list(set(vs) - set(loop))\n loops.append(matlab.int32([v.index + 1 for v in loop]))\n return loops \n \nclass PmlsDeformMesh(PmlsMesh):\n pmls_type = \"deform_mesh\"\n \n def set_children(self, hedgehog, anchor):\n H = self.hedgehog()\n if hedgehog and H and H.object.name != hedgehog.object.name:\n H.delete_object()\n H = self.anchor()\n if anchor and H and H.object.name != anchor.object.name:\n H.delete_object()\n obj = self.object\n aobj = bpy.context.active_object\n if aobj is None or obj.name != aobj.name:\n if bpy.ops.object.mode_set.poll():\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action='DESELECT')\n bpy.context.scene.objects.active = obj\n obj.hide = False\n obj.select = True\n if bpy.ops.object.mode_set.poll():\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action='DESELECT')\n if hedgehog and not self.is_parent_of(hedgehog):\n hedgehog = hedgehog.parent_clear()\n if anchor and not self.is_parent_of(anchor):\n anchor = anchor.parent_clear()\n# if hedgehog and hedgehog.object.parent:\n# hedgehog.object.hide = False\n# hedgehog.object.select = True\n# bpy.context.scene.objects.active = hedgehog.object\n# bpy.ops.object.duplicate(linked=True)\n# bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')\n# hedgehog.object.hide = True\n# hedgehog.object.select = False\n# hedgehog = PmlsObject(bpy.context.scene.objects.active) \n# if anchor and anchor.object.parent:\n# anchor.object.hide = False\n# anchor.object.select = True\n# bpy.context.scene.objects.active = anchor.object\n# bpy.ops.object.duplicate(linked=True)\n# bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM') \n# anchor.object.select = False\n# anchor = PmlsObject(bpy.context.scene.objects.active) \n bpy.context.scene.objects.active = obj \n if hedgehog:\n hedgehog.object.select = True\n if anchor:\n anchor.object.select = True\n bpy.ops.object.parent_set()\n if hedgehog:\n hedgehog.object.select = False\n hedgehog.object.hide = True\n if anchor:\n anchor.object.select = False\n anchor.object.hide = True\n \n \n\n \n \n def _init_from_matlab(self, mobj):\n super()._init_from_matlab(mobj)\n self.set_edit_mode()\n obj = self.object\n I = obj.vertex_groups.get(\"anchor\")\n if I:\n bpy.ops.object.vertex_group_set_active(group=\"anchor\")\n bpy.ops.object.vertex_group_lock(action='UNLOCK')\n NI = mobj.get(\"anchor_i\")\n if I:\n if NI:\n bpy.ops.object.vertex_group_remove_from(use_all_verts=True)\n I.add(NI, 1.0, 'REPLACE')\n else:\n bpy.ops.object.vertex_group_remove()\n I = None\n elif NI:\n I = obj.vertex_groups.new(\"anchor\")\n I.add(NI, 1.0, 'REPLACE')\n if I:\n bpy.ops.object.vertex_group_lock(action='LOCK')\n\n\n H = self.hedgehog()\n NH = mobj.get(\"hedgehog\")\n if H:\n if NH:\n if pmls_is_pmlsobj(NH):\n if NH.name != H.object.name:\n H.delete_object()\n NH = PmlsObject(NH)\n else:\n NH = H\n else:\n NH = H.update_from_matlab(NH)\n elif NH is not None:\n NH = H\n elif NH:\n NH = PmlsObject(NH)\n A = self.anchor()\n NA = mobj.get(\"anchor\")\n if A:\n if NA:\n if pmls_is_pmlsobj(NA):\n if NA.name != A.object.name:\n A.delete_object()\n NA = PmlsObject(NA)\n else:\n NA = A\n else:\n NA = A.update_from_matlab(NA)\n elif NA is not None:\n NA = A\n elif NA:\n NA = PmlsObject(NA)\n \n \n self.set_children(NH, NA)\n \n def complete_matlab_data(self, T):\n NH = T.get(\"hedgehog\")\n if not NH and NH is not None:\n H = self.hedgehog()\n if H:\n T[\"hedgehog\"] = H.object\n NA = T.get(\"anchor\")\n if not NA and NA is not None:\n A = self.anchor()\n if A:\n T[\"anchor\"] = A.object\n return T\n \n\n \n def hedgehog(self):\n for H in self.object.children:\n ot = PmlsObject.pmls_get_type(H)\n if ot and ot.is_hedgehog():\n return PmlsObject(H)\n\n def anchor(self):\n for H in self.object.children:\n ot = PmlsObject.pmls_get_type(H)\n if ot and ot.is_mesh():\n return PmlsObject(H)\n \n\n \n def to_matlab(self, only_anchors=False, no_hedgehog=False, no_anchor=False):\n if not only_anchors:\n S = super().to_matlab()\n else:\n S = self._to_matlab_skeleton()\n self.set_object_mode()\n obj = self.object\n if not no_anchor and \"anchor\" in obj.vertex_groups.keys():\n vg = obj.vertex_groups[\"anchor\"].index\n S['anchor_i'] = matlab.int32([v.index + 1 for v in obj.data.vertices if vg in [g.group for g in v.groups]])\n if not no_hedgehog:\n H = self.hedgehog()\n if H:\n S[\"hedgehog\"] = H.to_matlab()\n H.object.select = False\n H.object.hide = True\n if not no_anchor:\n H = self.anchor()\n if H:\n S[\"anchor\"] = H.to_matlab()\n H.object.select = False\n H.object.hide = True\n return S\n \n def _split_complicated(self):\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n bpy.ops.object.mode_set(mode=\"EDIT\")\n if \"tmp\" in self.object.vertex_groups.keys():\n bpy.ops.object.vertex_group_set_active(group=\"tmp\")\n bpy.ops.object.vertex_group_remove()\n self.object.vertex_groups.new(name=\"tmp\")\n bpy.ops.object.vertex_group_assign() \n bpy.ops.mesh.duplicate()\n bpy.ops.mesh.separate()\n# bpy.ops.object.vertex_group_assign() \n bpy.ops.mesh.select_all(action=\"DESELECT\")\n obj = bpy.context.selected_objects[0]\n obj.name = \"selector\"\n bpy.ops.object.mode_set()\n bpy.context.scene.objects.active = obj\n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.region_to_loop()\n bpy.ops.mesh.fill()\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.normals_make_consistent()\n# bpy.ops.mesh.select_all(action=\"DESELECT\")\n# bpy.ops.mesh.select_face_by_sides(number=3, type='GREATER', extend=False)\n# bpy.ops.mesh.quads_convert_to_tris()\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n obj[\"pmls_type\"] = \"mesh\"\n mobj = self.to_matlab()\n data = PmlsEngine.split_complicated(mobj, PmlsObject(obj).to_matlab())\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n bpy.context.scene.objects.active = obj\n obj.select = True\n bpy.ops.object.delete()\n bpy.context.scene.objects.active = self.object\n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.object.vertex_group_select() \n bpy.ops.object.vertex_group_remove()\n \n return data\n \n def _split_simple(self):\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n bpy.ops.object.mode_set(mode=\"EDIT\")\n if \"tmp\" in self.object.vertex_groups.keys():\n bpy.ops.object.vertex_group_set_active(group=\"tmp\")\n bpy.ops.object.vertex_group_remove()\n# self.object.vertex_groups.new(name=\"tmp\")\n# bpy.ops.object.vertex_group_assign()\n \n self.set_object_mode()\n bpy.ops.object.duplicate()\n \n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.mesh.separate()\n\n obj1 = bpy.context.selected_objects[0]\n obj2 = bpy.context.selected_objects[1]\n \n obj1.name = \"selector1\"\n bpy.ops.object.mode_set()\n bpy.context.scene.objects.active = obj1\n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.region_to_loop()\n bpy.ops.mesh.fill()\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.normals_make_consistent()\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n obj1[\"pmls_type\"] = \"mesh\"\n obj2.name = \"selector2\"\n bpy.ops.object.mode_set()\n bpy.context.scene.objects.active = obj2\n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.region_to_loop()\n bpy.ops.mesh.fill()\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.normals_make_consistent()\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n obj2[\"pmls_type\"] = \"mesh\"\n \n mobj = self.to_matlab(True)\n obj1 = PmlsObject(obj1)\n obj2 = PmlsObject(obj2)\n data = PmlsEngine.split(mobj, obj1.to_matlab(), obj2.to_matlab())\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n obj1.set_object_mode()\n bpy.ops.object.delete()\n obj2.set_object_mode()\n bpy.ops.object.delete()\n self.set_edit_mode()\n return data\n \n def _split(self):\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n bpy.ops.object.mode_set(mode=\"EDIT\")\n if \"tmp\" in self.object.vertex_groups.keys():\n bpy.ops.object.vertex_group_set_active(group=\"tmp\")\n bpy.ops.object.vertex_group_remove()\n self.object.vertex_groups.new(name=\"tmp\")\n bpy.ops.object.vertex_group_assign()\n self.set_edit_mode()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bpy.ops.mesh.select_face_by_sides(number=3, type='GREATER', extend=False)\n bpy.ops.mesh.quads_convert_to_tris()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bpy.ops.object.vertex_group_select() \n bpy.ops.object.vertex_group_remove()\n# bpy.ops.mesh.loop_to_region()\n \n self.set_object_mode()\n bpy.ops.object.duplicate()\n \n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.mesh.separate()\n\n obj1 = bpy.context.selected_objects[0]\n obj2 = bpy.context.selected_objects[1]\n \n obj1.name = \"selector1\"\n bpy.ops.object.mode_set()\n obj1[\"pmls_type\"] = \"mesh\"\n obj1 = PmlsObject(obj1)\n obj1.set_edit_mode()\n bpy.ops.mesh.select_all(action=\"SELECT\")\n bpy.ops.mesh.region_to_loop()\n loops = obj1.get_selected_loops()\n \n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n obj2.name = \"selector2\"\n bpy.ops.object.mode_set()\n obj2[\"pmls_type\"] = \"mesh\"\n obj2 = PmlsObject(obj2)\n\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n \n mobj = self.to_matlab(True)\n data = PmlsEngine.split(mobj, obj1.to_matlab(), obj2.to_matlab(), loops)\n bpy.ops.object.mode_set()\n bpy.ops.object.select_all(action=\"DESELECT\")\n obj1.set_object_mode()\n bpy.ops.object.delete()\n obj2.set_object_mode()\n bpy.ops.object.delete()\n self.set_edit_mode()\n return data\n \n \n def cut(self, simple=True):\n if simple:\n data = self._split()\n else:\n data = self._split_complicated()\n PmlsObject(data[0])\n return self.update_from_matlab(data[1])\n \n def copy(self, simple=True):\n if simple:\n data = self._split()\n else:\n data = self._split_complicated()\n PmlsObject(data[0])\n# PmlsObject(data[1])\n return self\n\n def split(self, simple=True):\n if simple:\n data = self._split()\n else:\n data = self._split_complicated()\n PmlsObject(data[0])\n PmlsObject(data[1])\n return self\n \n def delete(self, simple=True):\n if simple:\n data = self._split()\n else:\n data = self._split_complicated()\n return self.update_from_matlab(data[1])\n \nclass PmlsVolMesh(PmlsObject):\n pmls_type = \"vol_mesh\"\n\n @classmethod\n def _add_custom_layers(cls, obj, mobj):\n me = mobj[\"elements\"]\n obj.data[\"tet_elements\"] = [e for e in me];\n\n def _init_from_matlab(self, mobj):\n ob = self.add_mesh( self._mvt_to_py(mobj), self._medges_to_py(mobj), [], self.object );\n self._add_custom_layers(ob, mobj)\n self.object = ob\n\n \n def to_matlab(self):\n vertices = self.object.data.vertices\n S = {};\n n = len(vertices)\n mvt = matlab.double(size=(1,3*n))\n vertices.foreach_get('co', mvt[0])\n mvt.reshape((3,n))\n S[\"verts\"] = mvt\n melems = matlab.int32(self.object.data[\"tet_elements\"])\n S[\"elements\"] = melems\n S[\"pmls_type\"] = self.pmls_type\n S[\"pmls_name\"] = self.object.name\n return S\n \nclass PmlsHedgehog(PmlsObject):\n pmls_type = \"hedgehog\"\n \n @classmethod\n def _add_bmesh_layers(cls, bm, mobj):\n mbindices = mobj[\"bindices\"]\n mvzindices = mobj[\"vzindices\"]\n mvid = mobj[\"vid\"]\n lay = bm.verts.layers.int.new(\"is_base\")\n layname = bm.verts.layers.string.new(\"basename\")\n bm.verts.ensure_lookup_table();\n for v in bm.verts:\n v[lay] = 0\n if not isinstance(mbindices, collections.Iterable):\n mbindices = [[mbindices]]\n n = len(mbindices)\n for i in range(n):\n index = mbindices[i][0]\n bm.verts[index][lay] = 1\n bm.verts[index][layname] = bytes(mvid[i],'ascii')\n if not isinstance(mvzindices, collections.Iterable):\n mvzindices = [[mvzindices]]\n n = len(mvzindices)\n for i in range(n):\n index = mvzindices[i][0]\n bm.verts[index][lay] = 2\n \n\n\n @classmethod\n def _add_custom_layers(cls, obj, mobj):\n bpy.ops.object.mode_set(mode='EDIT')\n me = obj.data\n cls._add_bmesh_layers(bmesh.from_edit_mesh( me ), mobj)\n bpy.ops.object.mode_set()\n \n @classmethod\n def _add_custom_props(cls, obj):\n obj[\"disp_base\"] = True;\n obj[\"disp_pnts\"] = True;\n obj[\"disp_zeroshots\"] = True;\n obj[\"disp_edges\"] = True;\n \n \n def _init_from_matlab(self, mobj):\n ob = self.add_mesh( self._mvt_to_py(mobj), self._medges_to_py(mobj), [], self.object );\n self._add_custom_layers( ob, mobj )\n self._add_custom_props(ob)\n self.object = ob\n \n def _clear_objdata(self):\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_base\")\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_pnts\")\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_zeroshots\")\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_edges\")\n \n @staticmethod\n def _update_display_verts_old(obj, bm):\n lay = bm.verts.layers.int[\"is_base\"]\n if not obj['disp_base'] and not obj['disp_pnts']:\n for v in bm.verts:\n PmlsObject.hide(v)\n elif not obj['disp_base'] or not obj['disp_pnts']:\n to_select = True\n if obj['disp_base']:\n to_select = False\n for v in bm.verts:\n if bool(v[lay]) == to_select:\n PmlsObject.hide(v)\n else:\n PmlsObject.un_hide(v)\n else:\n for v in bm.verts:\n PmlsObject.un_hide(v)\n\n @staticmethod\n def _update_display_verts(obj, bm):\n lay = bm.verts.layers.int[\"is_base\"]\n for v in bm.verts:\n if v[lay] != 1:\n v.hide = True\n if obj['disp_pnts'] or obj['disp_zeroshots']:\n overts0 = [e.other_vert(b) for b in bm.verts if b[lay] == 1 and not b.hide for e in b.link_edges]\n if obj['disp_pnts']:\n overts = [v for v in overts0 if v[lay] == 0] \n for v in overts:\n v.hide = False\n if obj['disp_zeroshots']:\n overts = [v for v in overts0 if v[lay] == 2] \n for v in overts:\n v.hide = False\n \n @staticmethod\n def _update_display_edges(obj, bm):\n lay = bm.verts.layers.int[\"is_base\"]\n if ( not obj['disp_edges'] ):\n for v in bm.edges:\n if (not(v.verts[0].hide or v.verts[1].hide)) and (v.verts[0][lay] == 2 or v.verts[1][lay] == 2):\n v.hide = False\n else:\n v.hide = True \n else:\n for v in bm.edges:\n if v.verts[0].hide or v.verts[1].hide:\n v.hide = True\n else:\n v.hide = False\n \n @staticmethod\n def _update_display(obj, bm):\n PmlsHedgehog._update_display_verts(obj, bm)\n PmlsHedgehog._update_display_edges(obj, bm)\n \n \n def update_display(self):\n obj = self.object\n bm = self.get_bmesh()\n seq = [v for v in bm.faces if v.select]\n seq.extend( [v for v in bm.edges if v.select] )\n seq.extend( [v for v in bm.verts if v.select] )\n bpy.ops.mesh.select_all(action='DESELECT')\n self._update_display(obj, bm)\n for v in seq:\n if not v.hide:\n v.select = True\n# seq = [v for v in bm.faces if v.hide]\n# seq.extend( [v for v in bm.edges if v.hide] )\n# seq.extend( [v for v in bm.verts if v.hide] )\n# for v in seq:\n# v.hide = False\n# v.select = False\n# v.hide = True\n \n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.data, tessface=False, destructive=False)\n \n def hide_stations(self,selected):\n obj = self.object\n bm = self.get_bmesh()\n seq = [v for v in bm.faces if v.select]\n seq.extend( [v for v in bm.edges if v.select] )\n seq.extend( [v for v in bm.verts if v.select] )\n bpy.ops.mesh.select_all(action='DESELECT')\n lay = bm.verts.layers.int[\"is_base\"]\n for v in bm.verts:\n if v[lay] == 1 and selected == v.select:\n v.hide = True\n self._update_display(obj, bm)\n for v in seq:\n if not v.hide:\n v.select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.data, tessface=False, destructive=False)\n\n def reveal_stations(self):\n obj = self.object\n bm = self.get_bmesh()\n seq = [v for v in bm.faces if v.select]\n seq.extend( [v for v in bm.edges if v.select] )\n seq.extend( [v for v in bm.verts if v.select] )\n bpy.ops.mesh.select_all(action='DESELECT')\n lay = bm.verts.layers.int[\"is_base\"]\n for v in bm.verts:\n if v[lay] == 1:\n v.hide = False\n self._update_display(obj, bm)\n for v in seq:\n if not v.hide:\n v.select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.data, tessface=False, destructive=False)\n \n def deselect_stations(self):\n obj = self.object\n bm = self.get_bmesh()\n lay = bm.verts.layers.int[\"is_base\"]\n for v in bm.verts:\n if v[lay] == 1:\n v.select = False\n bm.select_flush(False)\n bmesh.update_edit_mesh(obj.data, tessface=False, destructive=False)\n \n \n \n \n def _to_matlab(self, bm, sbase, lay):\n layname = bm.verts.layers.string[\"basename\"]\n S = {}\n S[\"verts\"] = matlab.double([list(v.co) for v in bm.verts])\n S[\"selvt\"] = matlab.logical([v.select for v in bm.verts])\n S[\"vid\"] = [v[layname].decode(\"utf-8\") for v in sbase]\n S[\"base_i\"] = matlab.int32([v.index + 1 for v in sbase])\n S[\"rays_i\"] = [matlab.int32([e.other_vert(b).index + 1 for e in b.link_edges if e.other_vert(b)[lay] != 2 ]) for b in sbase]\n S[\"zeroshots_i\"] = [matlab.int32([e.other_vert(b).index + 1 for e in b.link_edges if e.other_vert(b)[lay] == 2 ]) for b in sbase]\n S[\"pmls_type\"] = self.pmls_type\n S[\"pmls_name\"] = self.object.name\n return S\n \n def to_matlab(self):\n bm = self.get_bmesh()\n lay = bm.verts.layers.int[\"is_base\"]\n sbase = [v for v in bm.verts if v[lay] == 1]\n return self._to_matlab(bm, sbase, lay)\n\n @classmethod\n def is_hedgehog(cls):\n return True\n \n def cut(self):\n self.deselect_stations()\n bpy.ops.mesh.select_more(False)\n bpy.ops.mesh.separate()\n \n def copy(self):\n self.deselect_stations()\n bpy.ops.mesh.select_more(False)\n bpy.ops.mesh.duplicate()\n bpy.ops.mesh.separate()\n\n def split(self):\n self.set_object_mode()\n if \"tmp\" in self.object.vertex_groups.keys():\n bpy.ops.object.vertex_group_set_active(group=\"tmp\")\n bpy.ops.object.vertex_group_remove()\n self.set_edit_mode()\n self.object.vertex_groups.new(name=\"tmp\")\n bpy.ops.object.vertex_group_assign() \n self.deselect_stations()\n bpy.ops.mesh.select_more(False)\n bpy.ops.mesh.duplicate()\n bpy.ops.mesh.separate()\n obj1 = PmlsObject(bpy.context.selected_objects[0])\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bpy.ops.object.vertex_group_select() \n bpy.ops.mesh.select_all(action=\"INVERT\")\n self.deselect_stations()\n bpy.ops.mesh.select_more(False)\n bpy.ops.mesh.duplicate()\n bpy.ops.mesh.separate()\n obj2 = PmlsObject(bpy.context.selected_objects[0])\n bpy.ops.object.vertex_group_select() \n bpy.ops.object.vertex_group_remove()\n if obj1:\n obj1.set_object_mode()\n bpy.ops.object.vertex_group_remove()\n if obj2:\n obj2.set_object_mode()\n bpy.ops.object.vertex_group_remove()\n self.set_edit_mode()\n \n \n \n def delete(self):\n self.deselect_stations()\n bpy.ops.mesh.delete()\n \nclass PmlsTurtle(PmlsHedgehog, PmlsMesh):\n pmls_type = \"turtle\" \n\n @classmethod\n def _add_custom_props(cls, obj):\n super()._add_custom_props(obj)\n obj[\"disp_faces\"] = True;\n\n def _clear_objdata(self):\n super()._clear_objdata()\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_faces\")\n\n @classmethod\n def _add_bmesh_layers(cls, bm, mobj):\n super()._add_bmesh_layers(bm, mobj)\n lay = bm.edges.layers.int.new(\"is_extended\")\n for e in bm.edges:\n e[lay] = -1\n edges = cls._medges_to_py(mobj)\n bm.verts.ensure_lookup_table();\n edges = [bm.edges.get( (bm.verts[e[0]], bm.verts[e[1]]) ) for e in edges]\n for e in edges:\n e[lay] = 0\n \n\n def _init_from_matlab(self, mobj):\n ob = self.add_mesh( self._mvt_to_py(mobj), self._medges_to_py(mobj), self._mfaces_to_py(mobj), self.object );\n self._add_custom_layers(ob, mobj)\n self._add_custom_props(ob)\n self.object = ob\n \n @staticmethod\n def _update_display(obj, bm):\n PmlsHedgehog._update_display_verts(obj, bm)\n vlay = bm.verts.layers.int[\"is_base\"]\n elay = bm.edges.layers.int[\"is_extended\"]\n\n todisp = set() \n if obj['disp_edges']:\n todisp.add(0)\n if obj['disp_faces']:\n todisp.add(-1)\n for v in bm.edges:\n if (not(v.verts[0].hide or v.verts[1].hide)) and (v[elay] in todisp or v.verts[0][vlay] == 2 or v.verts[1][vlay] == 2):\n v.hide = False\n else:\n v.hide = True \n \n \n# if ( not obj['disp_edges'] ):\n# for v in bm.edges:\n# if (not(v.verts[0].hide or v.verts[1].hide)) and (v[elay] == -1 or v.verts[0][vlay] == 2 or v.verts[1][vlay] == 2):\n# v.hide = False\n# else:\n# v.hide = True \n# else:\n# for v in bm.edges:\n# if v.verts[0].hide or v.verts[1].hide:\n# v.hide = True\n# else:\n# v.hide = False\n \n for f in bm.faces:\n f.hide = any([ e.hide for e in f.edges ])\n \n \n def clear_turtles(self):\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.reveal()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bm = self.get_bmesh()\n lay = bm.edges.layers.int[\"is_extended\"]\n for e in bm.edges:\n if e[lay] == -1:\n e.select_set(True)\n bmesh.update_edit_mesh(self.object.data, tessface=False, destructive=False)\n bpy.ops.mesh.delete(type=\"EDGE_FACE\")\n bm = self.get_bmesh()\n lay = bm.edges.layers.int[\"is_extended\"]\n bm.edges.layers.int.remove(lay)\n bmesh.update_edit_mesh(self.object.data, tessface=False, destructive=False)\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_faces\")\n self.object[\"pmls_type\"] = PmlsHedgehog.pmls_type\n ob = self.object\n ob[\"disp_base\"] = True;\n ob[\"disp_pnts\"] = True;\n ob[\"disp_zeroshots\"] = True;\n ob[\"disp_edges\"] = True;\n return PmlsObject(ob) \n\nclass PmlsExtendedHedgehog(PmlsHedgehog):\n pmls_type = \"ehedgehog\"\n \n @classmethod\n def _add_custom_props(cls, obj):\n super()._add_custom_props(obj)\n obj[\"disp_extedges\"] = True;\n\n\n# def _init_from_matlab(self, mobj):\n# super()._init_from_matlab(mobj)\n# self.object[\"disp_extedges\"] = True;\n \n def _clear_objdata(self):\n PmlsHedgehog._clear_objdata(self)\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_extedges\")\n\n \n @classmethod \n def _add_bmesh_layers(cls, bm, mobj):\n super()._add_bmesh_layers(bm, mobj)\n# bm.edges.ensure_lookup_table();\n mextindices = mobj[\"extindices\"]\n lay = bm.edges.layers.int.new(\"is_extended\")\n for e in bm.edges:\n e[lay] = 0\n edges = cls._medges_to_py(mobj)\n edges = [edges[i[0]] for i in mextindices];\n bm.verts.ensure_lookup_table();\n edges = [bm.edges.get( (bm.verts[e[0]], bm.verts[e[1]]) ) for e in edges]\n for e in edges:\n e[lay] = 1\n \n# for i in mextindices:\n# index = i[0]\n# bm.edges[index][lay] = 1\n \n @staticmethod \n def _update_display(obj, bm):\n PmlsHedgehog._update_display(obj, bm)\n lay = bm.edges.layers.int[\"is_extended\"]\n \n if ( not obj['disp_extedges'] ):\n for v in bm.edges:\n if v[lay] == 1:\n v.hide = True\n else:\n for v in bm.edges:\n if v[lay] == 1:\n if v.verts[0].hide or v.verts[1].hide:\n v.hide = True\n else:\n v.hide = False\n \n def downdate(self, todel):\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.reveal()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n if todel:\n bm = self.get_bmesh()\n lay = bm.edges.layers.int[\"is_extended\"]\n for e in bm.edges:\n if e[lay]:\n e.select_set(True)\n bmesh.update_edit_mesh(self.object.data, tessface=False, destructive=False)\n bpy.ops.mesh.delete(type=\"EDGE\")\n bm = self.get_bmesh()\n lay = bm.edges.layers.int[\"is_extended\"]\n bm.edges.layers.int.remove(lay)\n bmesh.update_edit_mesh(self.object.data, tessface=False, destructive=False)\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_extedges\")\n self.object[\"pmls_type\"] = PmlsHedgehog.pmls_type\n ob = self.object\n ob[\"disp_base\"] = True;\n ob[\"disp_pnts\"] = True;\n ob[\"disp_zeroshots\"] = True;\n ob[\"disp_edges\"] = True;\n return PmlsObject(ob)\n \n def _to_matlab(self, bm, sbase, lay):\n S = PmlsHedgehog._to_matlab(self, bm, sbase, lay)\n S[\"erays_i\"] = S[\"rays_i\"]\n elay = bm.edges.layers.int[\"is_extended\"]\n S[\"rays_i\"] = [matlab.int32([e.other_vert(b).index + 1 for e in b.link_edges if (e[elay] == 0 and e.other_vert(b)[lay] != 2) ]) for b in sbase]\n return S\n \nclass PmlsExtendedTurtle(PmlsExtendedHedgehog, PmlsMesh):\n pmls_type = \"eturtle\" \n\n @classmethod\n def _add_custom_props(cls, obj):\n super()._add_custom_props(obj)\n obj[\"disp_faces\"] = True;\n \n def _clear_objdata(self):\n super()._clear_objdata()\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_faces\")\n\n\n def _init_from_matlab(self, mobj):\n ob = self.add_mesh( self._mvt_to_py(mobj), self._medges_to_py(mobj), self._mfaces_to_py(mobj), self.object );\n self._add_custom_layers(ob, mobj)\n self._add_custom_props(ob)\n self.object = ob\n \n @classmethod\n def _add_bmesh_layers(cls, bm, mobj):\n tmp = super(PmlsExtendedHedgehog, cls)\n tmp._add_bmesh_layers(bm, mobj)\n mextindices = mobj[\"extindices\"]\n lay = bm.edges.layers.int.new(\"is_extended\")\n for e in bm.edges:\n e[lay] = -1\n edges = cls._medges_to_py(mobj)\n bm.verts.ensure_lookup_table();\n bmedges = [bm.edges.get( (bm.verts[e[0]], bm.verts[e[1]]) ) for e in edges]\n for e in bmedges:\n e[lay] = 0\n edges = [edges[i[0]] for i in mextindices];\n bm.verts.ensure_lookup_table();\n edges = [bm.edges.get( (bm.verts[e[0]], bm.verts[e[1]]) ) for e in edges]\n for e in edges:\n e[lay] = 1\n \n @staticmethod\n def _update_display(obj, bm):\n PmlsHedgehog._update_display_verts(obj, bm)\n vlay = bm.verts.layers.int[\"is_base\"]\n elay = bm.edges.layers.int[\"is_extended\"]\n todisp = set() \n if obj['disp_edges']:\n todisp.add(0)\n if obj['disp_extedges']:\n todisp.add(1)\n if obj['disp_faces']:\n todisp.add(-1)\n for v in bm.edges:\n if (not(v.verts[0].hide or v.verts[1].hide)) and (v[elay] in todisp or v.verts[0][vlay] == 2 or v.verts[1][vlay] == 2):\n v.hide = False\n else:\n v.hide = True \n# if ( not obj['disp_edges'] and not obj['disp_extedges'] ):\n# for v in bm.edges:\n# if (not(v.verts[0].hide or v.verts[1].hide)) and (v[elay] == -1 or v.verts[0][vlay] == 2 or v.verts[1][vlay] == 2):\n# v.hide = False\n# else:\n# v.hide = True \n# elif not obj['disp_edges']:\n# for v in bm.edges:\n# if (not(v.verts[0].hide or v.verts[1].hide)) and (v[elay] != 0 or v.verts[0][vlay] == 2 or v.verts[1][vlay] == 2):\n# v.hide = False\n# else:\n# v.hide = True \n# elif not obj['disp_extedges']:\n# for v in bm.edges:\n# if (not(v.verts[0].hide or v.verts[1].hide)) and (v[elay] != 1 or v.verts[0][vlay] == 2 or v.verts[1][vlay] == 2):\n# v.hide = False\n# else:\n# v.hide = True \n# else:\n# for v in bm.edges:\n# if v.verts[0].hide or v.verts[1].hide:\n# v.hide = True\n# else:\n# v.hide = False\n \n for f in bm.faces:\n f.hide = any([ e.hide for e in f.edges ])\n edges = [e for e in bm.edges if not e.hide and e[elay] == -1 and not any( [not f.hide for f in e.link_faces] ) ]\n for e in edges:\n e.hide = True\n \n \n def clear_turtles(self):\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.reveal()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bm = self.get_bmesh()\n lay = bm.edges.layers.int[\"is_extended\"]\n for e in bm.edges:\n if e[lay] == -1:\n e.select_set(True)\n bmesh.update_edit_mesh(self.object.data, tessface=False, destructive=False)\n bpy.ops.mesh.delete(type=\"EDGE_FACE\")\n bpy.ops.wm.properties_remove(data_path=\"active_object\", property=\"disp_faces\")\n self.object[\"pmls_type\"] = PmlsExtendedHedgehog.pmls_type\n ob = self.object\n ob[\"disp_base\"] = True;\n ob[\"disp_pnts\"] = True;\n ob[\"disp_zeroshots\"] = True;\n ob[\"disp_edges\"] = True;\n ob[\"disp_extedges\"] = True;\n return PmlsObject(ob)\n \n def downdate(self, todel):\n return self.clear_turtles().downdate(todel) \n \n\nclass PmlsEngine:\n eng = None\n isr = False\n data_counter = 0\n name = \"\"\n islib = False\n \n @classmethod\n def enginename( cls ):\n return cls.eng.matlab.engine.engineName()\n \n @classmethod\n def is_running(cls):\n eng = cls.eng\n if eng and hasattr(matlab, 'engine') and isinstance(eng, matlab.engine.matlabengine.MatlabEngine):\n try:\n x = eng.eye(1)\n cls.isr = True\n cls.islib = False\n return True\n except: #matlab.engine.matlabengine.RejectedExecutionError:\n cls.isr = False\n cls.name = \"\"\n cls.islib = False\n return False\n if eng and pmlslib_module and getattr(eng, 'name', 'nn') == 'pmlslib':\n try:\n x = eng.areyouthere()\n cls.isr = True\n cls.islib = True\n return True\n except: #matlab.engine.matlabengine.RejectedExecutionError:\n cls.isr = False\n cls.name = \"\"\n cls.islib = False\n return False\n else:\n cls.isr = False\n cls.name = \"\"\n cls.islib = False\n return False\n \n @classmethod\n def start(cls):\n if (not cls.is_running()) and hasattr(matlab, 'engine'):\n cls.eng = matlab.engine.start_matlab(\"-desktop\")\n cls.name = \"\"\n cls.is_running()\n \n @classmethod\n def startpoll(cls):\n return (not cls.isr) and hasattr(matlab, 'engine')\n\n @classmethod\n def connect(cls, name):\n if not cls.is_running():\n if hasattr(matlab, 'engine') and name in matlab.engine.find_matlab():\n cls.eng = matlab.engine.connect_matlab( name )\n if pmlslib_module and name == 'pmlslib':\n cls.eng = pmlslib.initialize()\n if cls.is_running():\n cls.name = name\n \n @classmethod\n def stop(cls):\n eng = cls.eng\n if cls.is_running() and not cls.islib:\n eng.eval( 'quit', nargout = 0 )\n if not cls.is_running():\n cls.name = \"\"\n \n @classmethod\n def stoppoll(cls):\n return cls.isr and not cls.islib\n \n\n @classmethod\n def disconnect(cls):\n if cls.is_running():\n if cls.islib:\n if cls.name != getattr(cls.eng, 'name', ''):\n raise AssertionError('name != libname')\n eng=cls.eng\n eng.quit()\n cls.eng = None\n else:\n if cls.name != cls.enginename():\n raise AssertionError('name != enginename')\n eng=cls.eng\n eng.quit()\n cls.eng = None\n if not cls.is_running():\n cls.name = \"\"\n\n @staticmethod\n def findmatlab():\n if hasattr(matlab, 'engine') and pmlslib_module:\n return ('pmlslib',) + matlab.engine.find_matlab()\n if hasattr(matlab, 'engine'):\n return matlab.engine.find_matlab()\n if pmlslib_module:\n return ('pmlslib',)\n\n\n @staticmethod\n def add_mesh(name, verts, edges, faces, ob=None, selvt=None ):\n if bpy.ops.object.mode_set.poll():\n bpy.ops.object.mode_set()\n if not ob:\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.add(type='MESH',location=(0,0,0))\n ob = bpy.context.object\n ob.name = name\n me = ob.data\n me.name = name\n else:\n me = ob.data\n me.from_pydata(verts, edges, faces)\n me.update(calc_edges=True)\n if selvt:\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.reveal()\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n bm=bmesh.from_edit_mesh(me)\n bm.verts.ensure_lookup_table()\n for i in selvt:\n bm.verts[i].select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(me, tessface=False, destructive=False)\n bpy.ops.object.mode_set()\n\n \n return ob\n \n @classmethod\n def load_mat(cls, path):\n data = cls.eng.loadhedgehogs(path)\n for D in data:\n PmlsObject(D)\n\n @classmethod\n def main_sqlite2csv(cls, source, dest):\n cls.eng.mainsqlite2csv(source, dest, nargout=0)\n\n @classmethod\n def sqlite2csvs(cls, sqfile, maincsv, destdir):\n cls.eng.sqlite2csvs(sqfile, maincsv, destdir, nargout=0)\n\n @classmethod\n def get_input(cls, csvfile, polfile, truemin):\n err = io.StringIO()\n \n try:\n if polfile:\n D = cls.eng.getinputpy(csvfile, truemin, polfile, stderr=err)\n else:\n D = cls.eng.getinputpy(csvfile, truemin, stderr=err)\n PmlsObject(D)\n return (True, '', err.getvalue())\n except:\n return (False, '', err.getvalue())\n# @classmethod\n# def get_input(cls, csvfile, polfile):\n# try:\n# if polfile:\n# cls.eng.getinputmain(csvfile, polfile, nargout=0)\n# else:\n# cls.eng.getinputmain(csvfile, nargout=0)\n# except:\n# self.report({'INFO'}, self.message)\n# pass\n \n \n\n @classmethod\n def save_mat(cls, path, data):\n cls.eng.savehedgehogs(path, data, nargout=0)\n\n @classmethod\n def turtles(cls, S):\n return cls.eng.turtlepy(S)\n\n @classmethod\n def separate_turtles(cls, S):\n data = cls.eng.separateturtles(S)\n for D in data:\n PmlsObject(D)\n \n \n\n @classmethod\n def extend_hedgehog(cls, S):\n return cls.eng.extendvisiblepy(S)\n\n @classmethod\n def remeshunion(cls, S, vox, ext, cuda, marcube, shortenrays):\n return cls.eng.remeshunionpy(S, vox, ext, cuda, marcube, shortenrays)\n\n @classmethod\n def tetremeshunion(cls, S, tetgen, par_a, par_q, par_d):\n return cls.eng.tetremeshunionpy(S, tetgen, par_a, par_q, par_d)\n\n @classmethod\n def bihar_pnts(cls, S, unilap, snaptol, voxsiz, to_raycheck):\n return cls.eng.biharpntspy(S, unilap, snaptol, voxsiz, to_raycheck)\n\n @classmethod\n def map_pnts(cls, S):\n return cls.eng.mappntspy(S)\n\n @classmethod\n def get_outliers(cls, S, internal, dihedral):\n return cls.eng.getoutlierspy(S, internal, dihedral)\n\n @classmethod\n def merge_hedgehogs(cls, S):\n return cls.eng.mergeinputpy(S)\n\n @classmethod\n def cutback_at_bridges(cls, S):\n return cls.eng.cutbackpy(S)\n\n @classmethod\n def remesh_union(cls, S, vox, ext, cuda, marcube):\n return cls.eng.remeshunioncpy(S, vox, ext, cuda, marcube)\n\n @classmethod\n def remesh_union_bihar(cls, S, vox, ext, cuda, marcube, unilap, premesh):\n return cls.eng.remeshunionbiharcpy(S, vox, ext, cuda, marcube, unilap, premesh)\n\n @classmethod\n def normal_union(cls, S, premesh, tetgen, par_a, par_q, par_d):\n return cls.eng.libiglunionpy(S, premesh, tetgen, par_a, par_q, par_d)\n\n @classmethod\n def create_vol_mesh(cls, S):\n return cls.eng.surf2meshpy(S)\n\n @classmethod\n def split_complicated(cls, S, H):\n return cls.eng.separatepy(S,H)\n\n @classmethod\n def split_simple(cls, S, H1, H2):\n return cls.eng.separatesimplepy(S,H1, H2)\n\n @classmethod\n def split(cls, S, H1, H2, L):\n return cls.eng.separatetrianglepy(S,H1, H2, L)\n \n @classmethod\n def fill(cls, S):\n return cls.eng.filltripy(S)\n\n\n\nclass PmlsStart(bpy.types.Operator):\n bl_idname = \"pmls.start\"\n bl_label = \"pmls start\"\n \n @classmethod\n def pmls_engine(cls):\n return PmlsEngine\n \n @classmethod\n def poll(cls, context):\n return PmlsEngine.startpoll()\n\n def execute(self, context):\n PmlsEngine.start()\n return {'FINISHED'}\n\nclass PmlsConnect(bpy.types.Operator):\n bl_idname = \"pmls.connect\"\n bl_label = \"pmls connect\"\n \n name = bpy.props.StringProperty()\n \n @classmethod\n def poll(cls, context):\n return not PmlsEngine.isr\n\n def execute(self, context):\n PmlsEngine.connect(self.name)\n return {'FINISHED'}\n\nclass PmlsStop(bpy.types.Operator):\n bl_idname = \"pmls.stop\"\n bl_label = \"pmls stop\"\n \n @classmethod\n def poll(cls, context):\n return PmlsEngine.stoppoll()\n\n def execute(self, context):\n PmlsEngine.stop()\n return {'FINISHED'}\n\nclass PmlsDisconnect(bpy.types.Operator):\n bl_idname = \"pmls.disconnect\"\n bl_label = \"pmls disconnect\"\n \n @classmethod\n def poll(cls, context):\n return PmlsEngine.isr and PmlsEngine.name\n\n def execute(self, context):\n PmlsEngine.disconnect()\n return {'FINISHED'}\n \nclass PmlsLoadMat(bpy.types.Operator):\n \"\"\"Loads mat file\"\"\"\n bl_idname = \"pmls.loadmat\"\n bl_label = \"pmls load mat file\"\n \n filter_glob = bpy.props.StringProperty(default=\"*.mat\", options={'HIDDEN'})\n directory = bpy.props.StringProperty(subtype=\"DIR_PATH\")\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n files = bpy.props.CollectionProperty(name=\"File Path\",type=bpy.types.OperatorFileListElement) \n @classmethod\n def poll(cls, context):\n return PmlsEngine.isr\n\n def execute(self, context):\n for fp in self.files:\n PmlsEngine.load_mat(self.directory + fp.name)\n# PmlsEngine.load_mat(self.filepath)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\nclass PmlsSaveMat(bpy.types.Operator):\n \"\"\"Saves selected PMLS objects to mat file\"\"\"\n bl_idname = \"pmls.savemat\"\n bl_label = \"pmls save mat file\"\n \n filter_glob = bpy.props.StringProperty(default=\"*.mat\", options={'HIDDEN'})\n directory = bpy.props.StringProperty(subtype=\"DIR_PATH\")\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n files = bpy.props.CollectionProperty(name=\"File Path\",type=bpy.types.OperatorFileListElement) \n @classmethod\n def poll(cls, context):\n if not PmlsEngine.isr:\n return False\n if not context.selected_objects:\n return False\n for obj in context.selected_objects:\n if not pmls_is_pmlsobj(obj):\n return False\n return True\n\n def execute(self, context):\n objs = PmlsObject.get_selected_objects()\n data = [o.to_matlab() for o in objs]\n PmlsEngine.save_mat(self.filepath, data)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\ndef pmls_is_pmlsobj( obj ):\n return obj is not None and isinstance(obj, bpy.types.Object) and obj.type == 'MESH' and \"pmls_type\" in obj.keys()\n\ndef pmls_is_hedgehog( obj ):\n return pmls_is_pmlsobj( obj ) and obj[\"pmls_type\"] == \"hedgehog\"\n\ndef pmls_is_ehedgehog( obj ):\n return pmls_is_pmlsobj( obj ) and obj[\"pmls_type\"] == \"ehedgehog\"\n\ndef pmls_is_extended(obj):\n return pmls_is_pmlsobj( obj ) and (obj[\"pmls_type\"] == \"ehedgehog\" or obj[\"pmls_type\"] == \"eturtle\")\n\ndef pmls_is_turtle(obj):\n return pmls_is_pmlsobj( obj ) and (obj[\"pmls_type\"] == \"turtle\" or obj[\"pmls_type\"] == \"eturtle\")\n\ndef pmls_is_mesh(obj):\n return pmls_is_pmlsobj( obj ) and (obj[\"pmls_type\"] == \"mesh\")\n\ndef pmls_is_deform_mesh(obj):\n return pmls_is_pmlsobj( obj ) and (obj[\"pmls_type\"] == \"deform_mesh\")\n\ndef pmls_has_hedgehog(obj):\n for H in obj.children:\n ot = PmlsObject.pmls_get_type(H)\n if ot and ot.is_hedgehog():\n return True\n return False\n \n \n\nclass PmlsUpdateDisplay(bpy.types.Operator):\n \"\"\"Updates display of active object\"\"\"\n bl_idname = \"pmls.updatedisplay\"\n bl_label = \"pmls update display\"\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return PmlsEngine.isr and pmls_is_pmlsobj( obj )\n \n\n def execute(self, context):\n pobj = PmlsObject()\n if pobj:\n pobj.update_display()\n return {'FINISHED'}\n \n\nclass PmlsDowdateEhedgehog(bpy.types.Operator):\n bl_idname = \"pmls.downdate_ehedgehog\"\n bl_label = \"pmls downdate ehedgehog\"\n todel = bpy.props.BoolProperty(default=True)\n \n @classmethod\n def poll(cls, context):\n return pmls_is_extended(context.active_object)\n\n def execute(self, context):\n obj = PmlsExtendedHedgehog(context.active_object)\n obj.downdate(self.todel)\n return {'FINISHED'}\n\nclass PmlsHedgehogUnion(bpy.types.Operator):\n \"\"\"Union by voxelization\"\"\"\n bl_idname = \"pmls.hedgehog_union\"\n bl_label = \"pmls hedgehog union\"\n\n voxel_siz = bpy.props.FloatProperty(\n name=\"Voxel size (cm):\", default=3.0, step=5, min=0.000001, soft_min=1.0, soft_max=1000.0, subtype='DISTANCE')\n extend = bpy.props.FloatProperty(\n name=\"Thicken volume (voxel):\", default=3.0, step=5, min=-15.0, soft_min=-10.0, soft_max=10.0, subtype='DISTANCE')\n cuda = bpy.props.BoolProperty(name=\"Voxelize on GPU\", description=\"Voxelization will be done on the graphics card. Requires CUDA 7.5\", default=True)\n marcub = bpy.props.BoolProperty(name=\"Use marching cubes\", description=\"If true after voxelization the surface will be reconstructed with marching cubes algorithm, else each voxel side will be kept.\", default=True)\n shortenrays = bpy.props.FloatProperty( name=\"Shorten rays (voxel):\", default=0.7, step=1, min=0.0, soft_min=0.0, soft_max=10.0, subtype='DISTANCE')\n\n\n \n @classmethod\n def poll(cls, context):\n return pmls_is_extended(context.active_object)\n\n def execute(self, context):\n obj = PmlsExtendedHedgehog(context.active_object)\n H = obj.to_matlab()\n S = PmlsEngine().remeshunion(H, self.voxel_siz, self.extend, self.cuda, self.marcub, self.shortenrays)\n S = PmlsObject(S)\n S.advance(obj,None)\n return {'FINISHED'}\n\nclass PmlsHedgehogUnionAlec(bpy.types.Operator):\n \"\"\"Union by voxelization\"\"\"\n bl_idname = \"pmls.hedgehog_union_alec\"\n bl_label = \"pmls hedgehog union_alec\"\n\n tetgen = bpy.props.BoolProperty(name=\"Remesh turtles before union\", description=\"Remesh increases the number of triangles, but the mesh will be nicer.\", default=True)\n tetgen_a = bpy.props.FloatProperty(\n name=\"Max volume of tetrahedra (m3)\", description=\"Tetgen command line parameter -a.\",\n default=0.5, step=0.001, min=0.0000001, soft_min=0.0000001, soft_max=100.0, subtype='UNSIGNED', unit='VOLUME')\n tetgen_q = bpy.props.FloatProperty(\n name=\"Max radius-edge ratio\", description=\"First value of tetgen command line parameter -q.\",\n default=2.0, step=0.001, min=1.0, soft_min=1.0, soft_max=100.0, subtype='UNSIGNED')\n tetgen_d = bpy.props.FloatProperty(\n name=\"Min dihedral angle (deg)\", description=\"Second value of tetgen command line parameter -q.\",\n default=0.0, step=5.0, min=0.0, soft_min=0.0, soft_max=70, subtype='UNSIGNED')\n \n @classmethod\n def poll(cls, context):\n return pmls_is_extended(context.active_object)\n\n def execute(self, context):\n obj = PmlsExtendedHedgehog(context.active_object)\n H = obj.to_matlab()\n S = PmlsEngine().tetremeshunion(H, self.tetgen, self.tetgen_a, self.tetgen_q, self.tetgen_d)\n S = PmlsObject(S)\n S.advance(obj,None)\n return {'FINISHED'}\n\nclass PmlsVoxelizedUnion(bpy.types.Operator):\n \"\"\"Union of meshes by voxelization\"\"\"\n bl_idname = \"pmls.voxelized_union\"\n bl_label = \"pmls voxelized union\"\n\n voxel_siz = bpy.props.FloatProperty(\n name=\"Voxel size (cm):\", default=3.0, step=5, min=0.000001, soft_min=1.0, soft_max=1000.0, subtype='DISTANCE')\n extend = bpy.props.FloatProperty(\n name=\"Thin volume (voxel):\", default=3.0, step=5, min=0.0, soft_min=0.0, soft_max=10.0, subtype='DISTANCE')\n\n cuda = bpy.props.BoolProperty(name=\"Voxelize on GPU\", description=\"Voxelization will be done on the graphics card. Requires CUDA 7.5\", default=True)\n marcub = bpy.props.BoolProperty(name=\"Use marching cubes\", description=\"If true after voxelization the surface will be reconstructed with marching cubes algorithm, else each voxel side will be kept.\", default=True)\n\n deform = bpy.props.BoolProperty(name=\"Volumetric deform\", default=True)\n\n# hedgehog = bpy.props.StringProperty()\n unilap = bpy.props.BoolProperty(name=\"Uniform laplacian\", description=\"If true uniform weights will be used instead of edge lenghts. Less conservative deformation.\", default=True)\n premesh = bpy.props.BoolProperty(name=\"Remesh before deform\", description=\"If true remesh done before and after the deform else only after\", default=True)\n\n \n @classmethod\n def poll(cls, context):\n if not PmlsEngine.isr:\n return False\n obj = context.active_object\n if obj is None:\n return False\n# if obj.mode != \"OBJECT\":\n# return False\n if not pmls_is_pmlsobj( obj ):\n return False\n if not pmls_is_deform_mesh(obj) or not pmls_has_hedgehog(obj):\n return False\n if not all([pmls_is_deform_mesh(o) and pmls_has_hedgehog(o) for o in context.selected_objects if o.name != obj.name ]):\n return False\n return True\n\n def execute(self, context):\n obj = context.active_object\n in_place = obj.mode == 'EDIT'\n \n sobjs = PmlsObject.get_selected_objects()\n name = obj.name\n sobjs = [o for o in sobjs if name != o.object.name] \n aobj = PmlsObject(obj)\n objs = [aobj.to_matlab(no_hedgehog=(not sobjs and not self.deform), no_anchor=(not sobjs))]\n for o in sobjs:\n objs.append(o.to_matlab(no_anchor=True))\n if self.deform:\n T = PmlsEngine.remesh_union_bihar(objs, self.voxel_siz, self.extend, self.cuda, self.marcub, self.unilap, self.premesh)\n else:\n T = PmlsEngine.remesh_union(objs, self.voxel_siz, self.extend, self.cuda, self.marcub)\n if in_place:\n aobj = aobj.update_from_matlab(T)\n else:\n T = aobj.complete_matlab_data(T)\n PmlsObject(T)\n return {'FINISHED'}\n\nclass PmlsNormalUnion(bpy.types.Operator):\n \"\"\"Union of meshes by intersecting faces\"\"\"\n bl_idname = \"pmls.normal_union\"\n bl_label = \"pmls normal union\"\n\n premesh = bpy.props.BoolProperty(default=True)\n tetgen = bpy.props.BoolProperty(name=\"Remesh turtles before union\", description=\"Remesh increases the number of triangles, but the mesh will be nicer.\", default=True)\n tetgen_a = bpy.props.FloatProperty(\n name=\"Max volume of tetrahedra (m3)\", description=\"Tetgen command line parameter -a.\",\n default=0.5, step=0.001, min=0.0000001, soft_min=0.0000001, soft_max=100.0, subtype='UNSIGNED', unit='VOLUME')\n tetgen_q = bpy.props.FloatProperty(\n name=\"Max radius-edge ratio\", description=\"First value of tetgen command line parameter -q.\",\n default=2.0, step=0.001, min=1.0, soft_min=1.0, soft_max=100.0, subtype='UNSIGNED')\n tetgen_d = bpy.props.FloatProperty(\n name=\"Min dihedral angle (deg)\", description=\"Second value of tetgen command line parameter -q.\",\n default=0.0, step=5.0, min=0.0, soft_min=0.0, soft_max=70, subtype='UNSIGNED')\n\n \n @classmethod\n def poll(cls, context):\n if not PmlsEngine.isr:\n return False\n obj = context.active_object\n if obj is None:\n return False\n if obj.mode != \"OBJECT\":\n return False\n if not pmls_is_pmlsobj( obj ):\n return False\n if not pmls_is_deform_mesh(obj):\n return False\n if not all([pmls_is_deform_mesh(o) for o in context.selected_objects if o.name != obj.name ]):\n return False\n return True\n\n def execute(self, context):\n obj = context.active_object\n sobjs = PmlsObject.get_selected_objects()\n aobj = PmlsObject(obj)\n objs = [aobj.to_matlab()]\n name = obj.name\n for obj in sobjs:\n if name == obj.object.name:\n continue\n objs.append(obj.to_matlab())\n T = PmlsEngine.normal_union(objs, self.premesh, self.tetgen, self.tetgen_a, self.tetgen_q, self.tetgen_d)\n PmlsObject(T)\n return {'FINISHED'}\n\nclass PmlsCreateVolMesh(bpy.types.Operator):\n \"\"\"Tetrahedralizes the interior of surface mesh\"\"\"\n bl_idname = \"pmls.create_vol_mesh\"\n bl_label = \"pmls create volumetric mesh\"\n\n\n \n @classmethod\n def poll(cls, context):\n if not PmlsEngine.isr:\n return False\n obj = context.active_object\n if obj is None:\n return False\n if obj.mode != \"OBJECT\":\n return False\n if not pmls_is_pmlsobj( obj ):\n return False\n return pmls_is_mesh(obj)\n\n def execute(self, context):\n obj = context.active_object\n obj = PmlsObject(obj)\n obj = obj.to_matlab()\n T = PmlsEngine.create_vol_mesh(obj)\n PmlsObject(T)\n return {'FINISHED'}\n\n\n\nclass PmlsExtendHedgehog(bpy.types.Operator):\n bl_idname = \"pmls.extend_hedgehog\"\n bl_label = \"pmls extend hedgehog\"\n \n @classmethod\n def poll(cls, context):\n return PmlsEngine.isr and pmls_is_hedgehog(context.active_object)\n\n def execute(self, context):\n obj = PmlsHedgehog(context.active_object)\n S = obj.to_matlab()\n T = PmlsEngine.extend_hedgehog(S)\n obj = obj.update_from_matlab(T)\n return {'FINISHED'}\n \nclass PmlsRecalculateTurtles(bpy.types.Operator):\n bl_idname = \"pmls.recalculate_turtles\"\n bl_label = \"pmls recalculate turtles\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return PmlsEngine.isr and ( pmls_is_hedgehog(obj) or pmls_is_ehedgehog(obj) ) \n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n S = obj.to_matlab()\n T = PmlsEngine.turtles(S)\n obj = obj.update_from_matlab(T)\n return {'FINISHED'}\n\nclass PmlsSeparateTurtles(bpy.types.Operator):\n \"\"\"Separates hedgehogs by stations, calculates turtles and transorms them to deformable meshes \"\"\" \n \n bl_idname = \"pmls.separate_turtles\"\n bl_label = \"pmls separate turtles\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return PmlsEngine.isr and pmls_is_pmlsobj( obj )\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n S = obj.to_matlab()\n PmlsEngine.separate_turtles(S)\n return {'FINISHED'}\n\nclass PmlsClearTurtles(bpy.types.Operator):\n bl_idname = \"pmls.clear_turtles\"\n bl_label = \"pmls clear turtles\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return pmls_is_turtle(obj)\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n obj.clear_turtles()\n return {'FINISHED'}\n\nclass PmlsHideSelectedStations(bpy.types.Operator):\n bl_idname = \"pmls.hide_selected_stations\"\n bl_label = \"pmls hide selected stations\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return PmlsEngine.isr and pmls_is_pmlsobj( obj )\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n obj.hide_stations(True)\n return {'FINISHED'}\n\nclass PmlsHideUnselectedStations(bpy.types.Operator):\n bl_idname = \"pmls.hide_unselected_stations\"\n bl_label = \"pmls hide unselected stations\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return PmlsEngine.isr and pmls_is_pmlsobj( obj )\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n obj.hide_stations(False)\n return {'FINISHED'}\n \nclass PmlsRevealStations(bpy.types.Operator):\n bl_idname = \"pmls.reveal_stations\"\n bl_label = \"pmls reveal stations\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return PmlsEngine.isr and pmls_is_pmlsobj( obj )\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n obj.reveal_stations()\n return {'FINISHED'}\n\nclass PmlsDeselectAllStations(bpy.types.Operator):\n bl_idname = \"pmls.deselect_all_stations\"\n bl_label = \"pmls deselect all stations\"\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n return pmls_is_pmlsobj( obj )\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n obj.deselect_stations()\n return {'FINISHED'}\n \nclass PmlsMergeHedgehogs(bpy.types.Operator): \n bl_idname = \"pmls.merge_selected_hedges_to_active\"\n bl_label = \"pmls merge selected hedgehogs to active\"\n \n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj is None:\n return False\n if obj.mode != \"OBJECT\":\n return False\n if not pmls_is_pmlsobj( obj ):\n return False\n if not PmlsObject.pmls_get_type(obj).is_hedgehog():\n return False\n return any([PmlsObject.pmls_get_type(o).is_hedgehog() for o in context.selected_objects if o.name != obj.name and pmls_is_pmlsobj( o )]) \n \n def execute(self, context):\n obj = context.active_object\n sobjs = context.selected_objects\n aobj = PmlsObject(obj)\n objs = [aobj.to_matlab()]\n name = obj.name\n for obj in sobjs:\n if name == obj.name:\n continue\n pobj = PmlsObject(obj)\n if pobj and pobj.is_hedgehog():\n objs.append(pobj.to_matlab())\n T = PmlsEngine.merge_hedgehogs(objs)\n PmlsObject(T)\n# aobj = aobj.update_from_matlab(T) \n return {'FINISHED'}\n\n\nclass PmlsCutBackAtBridges(bpy.types.Operator):\n \"\"\"Cut back splay shots at small features\"\"\" \n bl_idname = \"pmls.cutback_at_bridges\"\n bl_label = \"pmls cutback at bridges\"\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj is None:\n return False\n return pmls_is_deform_mesh(obj)\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n S = obj.to_matlab()\n data = PmlsEngine.cutback_at_bridges(S)\n selvt = data.get(\"selhdgvt\")\n obj = PmlsObject(data)\n if obj:\n if selvt:\n if not isinstance(selvt, collections.Iterable):\n selvt = [[selvt]]\n selvt = [v[0] for v in selvt]\n bm = obj.get_bmesh()\n bm.verts.ensure_lookup_table()\n for v in bm.verts:\n v.hide = False\n v.select = False\n for i in selvt:\n bm.verts[i].hide = False\n bm.verts[i].select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.object.data, tessface=False, destructive=False)\n return {'FINISHED'}\n\n\nclass PmlsSurfaceDeform(bpy.types.Operator):\n \"\"\"Snaps surface to points and interpolates by biharmonic surface\"\"\" \n bl_idname = \"pmls.smoot_by_surface_deform\"\n bl_label = \"pmls smooth by surface deform\"\n# hedgehog = bpy.props.StringProperty()\n# points = bpy.props.StringProperty()\n\n unilap = bpy.props.BoolProperty(\n name=\"Uniform laplacian\", default=True)\n\n snaptol = bpy.props.FloatProperty(\n name=\"Snap tolerance (cm):\", default=5.0, step=5, min=0.000001, soft_min=0.1, soft_max=100000.0, subtype='DISTANCE')\n \n to_raycheck = bpy.props.BoolProperty(\n name=\"Ray check\", default=True)\n \n voxsiz = bpy.props.FloatProperty(\n name=\"Voxel size for ray check (cm):\", default=7.0, step=5, min=0.000001, soft_min=1.0, soft_max=1000.0, subtype='DISTANCE')\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj is None:\n return False\n# if obj.mode != \"OBJECT\":\n# return False\n return pmls_is_deform_mesh(obj)\n# if not pmls_is_pmlsobj( obj ):\n# return False\n# if not PmlsObject().pmls_get_type(obj).is_mesh():\n# return False\n# if pmls_is_deform_mesh(obj):\n# return True\n# hdg = context.scene.pmls_op_smooth_by_surface_deform_hdg\n# if hdg:\n# ot = PmlsObject.pmls_get_type(bpy.data.objects[hdg])\n# if ot and ot.is_hedgehog():\n# return True\n# pnt = context.scene.pmls_op_smooth_by_surface_deform_pnt\n# if pnt and pmls_is_mesh(bpy.data.objects[pnt]):\n# return True\n# return False\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n in_place = obj.object.mode == 'EDIT'\n S = obj.to_matlab()\n# HP = {}\n# hedgehog = None\n# if self.hedgehog:\n# hedgehog = PmlsObject(context.scene.objects[self.hedgehog])\n# HP[\"H\"] = hedgehog.to_matlab()\n# points = None\n# if self.points:\n# points = PmlsObject(context.scene.objects[self.points])\n# HP[\"P\"] = points.to_matlab()\n# data = PmlsEngine.bihar_pnts(S, HP, self.snaptol, self.voxsiz)\n data = PmlsEngine.bihar_pnts(S, self.unilap, self.snaptol, self.voxsiz, self.to_raycheck)\n selvt = data.get(\"selhdgvt\")\n# n = len(data)\n if in_place:\n obj = obj.update_from_matlab(data)\n# if n > 1:\n# points = points.update_from_matlab(data[1])\n else:\n# NH = data.get(\"hedgehog\")\n# if not NH and NH is not None:\n# data[\"hedgehog\"] = obj.hedgehog().object\n \n \n data = obj.complete_matlab_data(data)\n \n obj = PmlsObject(data)\n obj = obj.hedgehog()\n if obj:\n if selvt:\n if not isinstance(selvt, collections.Iterable):\n selvt = [[selvt]]\n selvt = [v[0] for v in selvt]\n bm = obj.get_bmesh()\n bm.verts.ensure_lookup_table()\n for i in selvt:\n bm.verts[i].hide = False\n bm.verts[i].select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.object.data, tessface=False, destructive=False)\n \n# if n > 1:\n# points = PmlsObject(data[1])\n# obj.advance(hedgehog, points)\n return {'FINISHED'}\n\nclass PmlsSelectOutliers(bpy.types.Operator):\n \"\"\"Select outliers\"\"\" \n bl_idname = \"pmls.select_outliers\"\n bl_label = \"pmls select outliers\"\n# hedgehog = bpy.props.StringProperty()\n# points = bpy.props.StringProperty()\n internal = bpy.props.FloatProperty(\n name=\"Min internal angle (deg)\", description=\"Vertices without larger iternal angle in connected triangles will be selected.\",\n default=5.0, step=0.25, min=0.0, soft_min=0.0, soft_max=30, subtype='UNSIGNED')\n dihedral = bpy.props.FloatProperty(\n name=\"Min dihedral angle (deg)\", description=\"Vertices with smaller dihedral angle on connected edges will be selected.\",\n default=5.0, step=0.25, min=0.0, soft_min=0.0, soft_max=45, subtype='UNSIGNED')\n\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj.mode != \"EDIT\":\n return False\n if obj is None:\n return False\n return pmls_is_hedgehog(obj) or pmls_is_ehedgehog(obj)\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n S = obj.to_matlab()\n selvt = PmlsEngine.get_outliers(S, self.internal, self.dihedral)\n if selvt:\n if not isinstance(selvt, collections.Iterable):\n selvt = [[selvt]]\n selvt = [v[0] for v in selvt]\n bm = obj.get_bmesh()\n bm.verts.ensure_lookup_table()\n for i in selvt:\n bm.verts[i].select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.object.data, tessface=False, destructive=False)\n return {'FINISHED'}\n \nclass PmlsMapPointsToMesh(bpy.types.Operator):\n \"\"\"Select mapped points on surface\"\"\" \n bl_idname = \"pmls.map_points_to_mesh\"\n bl_label = \"pmls map points to mesh\"\n# hedgehog = bpy.props.StringProperty()\n# points = bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj.mode != \"EDIT\":\n return False\n if obj is None:\n return False\n return pmls_is_deform_mesh(obj)\n \n def execute(self, context):\n obj = PmlsObject(context.active_object)\n S = obj.to_matlab()\n selvt = PmlsEngine.map_pnts(S)\n if selvt:\n if not isinstance(selvt, collections.Iterable):\n selvt = [[selvt]]\n selvt = [v[0] for v in selvt]\n bm = obj.get_bmesh()\n bm.verts.ensure_lookup_table()\n for i in selvt:\n bm.verts[i].select = True\n bm.select_flush(True)\n bmesh.update_edit_mesh(obj.object.data, tessface=False, destructive=False)\n return {'FINISHED'}\n\n# class PmlsSplit(bpy.types.Operator):\n# \"\"\"Splits mesh into two pieces\"\"\" \n# bl_idname = \"pmls.split\"\n# bl_label = \"pmls split\"\n# selmesh = bpy.props.StringProperty()\n# \n# @classmethod\n# def poll(cls, context):\n# obj = context.active_object\n# if not PmlsEngine.isr:\n# return False\n# if obj is None:\n# return False\n# if obj.mode != \"EDIT\":\n# return False\n# if not pmls_is_pmlsobj( obj ):\n# return False\n# if not pmls_is_mesh(obj):\n# return False\n# smsh = context.scene.pmls_op_split_selector\n# if smsh:\n# return pmls_is_mesh(bpy.data.objects[smsh])\n# return False\n# \n# def execute(self, context):\n# S = PmlsObject(context.active_object).to_matlab()\n# H = PmlsObject(context.scene.objects[self.selmesh]).to_matlab()\n# data = PmlsEngine.split(S, H)\n# for D in data:\n# PmlsObject(D) \n# return {'FINISHED'}\n\nclass PmlsEditOperator(bpy.types.Operator):\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj is None:\n return False\n if obj.mode != \"EDIT\":\n return False\n if not pmls_is_pmlsobj( obj ):\n return False\n return PmlsObject.pmls_get_type(obj).is_hedgehog() or pmls_is_deform_mesh(obj)\n \nclass PmlsCopy(PmlsEditOperator):\n \"\"\"Copy selection to new object\"\"\" \n bl_idname = \"pmls.copy\"\n bl_label = \"pmls copy\"\n simple = bpy.props.BoolProperty()\n\n def execute(self, context):\n obj = PmlsObject(context.active_object)\n if obj.is_hedgehog():\n obj.copy()\n else:\n obj.copy(self.simple)\n return {'FINISHED'}\n\nclass PmlsSplit(PmlsEditOperator):\n \"\"\"Split into 2 new object\"\"\" \n bl_idname = \"pmls.split\"\n bl_label = \"pmls split\"\n simple = bpy.props.BoolProperty()\n\n def execute(self, context):\n obj = PmlsObject(context.active_object)\n if obj.is_hedgehog():\n obj.split()\n else:\n obj.split(self.simple)\n return {'FINISHED'}\n\nclass PmlsCut(PmlsEditOperator):\n \"\"\"Cut selection to new object\"\"\" \n bl_idname = \"pmls.cut\"\n bl_label = \"pmls cut\"\n simple = bpy.props.BoolProperty()\n\n def execute(self, context):\n obj = PmlsObject(context.active_object)\n if obj.is_hedgehog():\n obj.cut()\n else:\n obj.cut(self.simple)\n return {'FINISHED'}\n\nclass PmlsDelete(PmlsEditOperator):\n \"\"\"Delete selection\"\"\" \n bl_idname = \"pmls.delete\"\n bl_label = \"pmls delete\"\n simple = bpy.props.BoolProperty()\n\n def execute(self, context):\n obj = PmlsObject(context.active_object)\n if obj.is_hedgehog():\n obj.delete()\n else:\n obj.delete(self.simple)\n return {'FINISHED'}\n\nclass PmlsFill(bpy.types.Operator):\n \"\"\"Fill selected hole\"\"\" \n bl_idname = \"pmls.fill\"\n bl_label = \"pmls fill\"\n simple = bpy.props.BoolProperty()\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if not PmlsEngine.isr:\n return False\n if obj is None:\n return False\n if obj.mode != \"EDIT\":\n return False\n if not pmls_is_pmlsobj( obj ):\n return False\n return PmlsObject.pmls_get_type(obj).is_mesh()\n \n @classmethod\n def get_loop(cls, vs):\n loop=[]\n if not vs:\n return loop\n v1 = vs[0]\n es = [e for e in v1.link_edges if e.select]\n if not es:\n raise Exception('Bad selecting!')\n v2 = es[0].other_vert(v1)\n loop = [v1,v2]\n vstart = v1;\n while True:\n es = [e for e in v2.link_edges if e.select]\n if not es:\n raise Exception('Bad selecting!')\n vs = [e.other_vert(v2) for e in es if e.other_vert(v2) not in loop ]\n if not vs:\n vs = [e.other_vert(v2) for e in es if e.other_vert(v2) in loop and e.other_vert(v2) != v1]\n if len(vs) != 1 or vs[0] != vstart:\n raise Exception('Bad selecting!')\n return loop\n if len(vs) > 1: \n raise Exception('Bad selecting!')\n v1 = v2\n v2 = vs[0]\n loop.append(v2)\n \n\n\n def execute(self, context):\n obj = PmlsObject(context.active_object)\n bm = obj.get_bmesh()\n vs = [v for v in bm.verts if v.select]\n if not vs:\n raise Exception('Bad selecting!')\n loops = []\n while vs:\n loop = self.get_loop(vs)\n vs = list(set(vs) - set(loop))\n loops.append(matlab.double([list(v.co) for v in loop]))\n \n data = PmlsEngine.fill(loops)\n data2 = []\n for D in data:\n data2.append( PmlsObject(D) )\n obj.select = False\n for obji in data2:\n obji.object.select = True\n bpy.ops.object.duplicate()\n context.scene.objects.active = obj.object;\n bpy.ops.object.join()\n obj.set_edit_mode()\n for obji in data2:\n obji.object.select = True\n \n return {'FINISHED'}\n \n \n\n \nclass PmlsCreateMainCsv(bpy.types.Operator):\n \"\"\"Create main csv\"\"\"\n bl_idname = \"pmls.create_main_csv\"\n bl_label = \"pmls create main csv\"\n \n filter_glob = bpy.props.StringProperty(default=\"*.sqlite\", options={'HIDDEN'})\n directory = bpy.props.StringProperty(subtype=\"DIR_PATH\")\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n @classmethod\n def poll(cls, context):\n return PmlsEngine.isr\n\n def execute(self, context):\n PmlsEngine.main_sqlite2csv(self.filepath, self.directory + \"main.csv\")\n scene = context.scene\n scene.pmls_op_create_survey_csvs_sqlite = self.filepath \n scene.pmls_op_create_survey_csvs_csv = (self.directory + \"main.csv\") \n return {'FINISHED'}\n\n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\nclass PmlsCreateSurveyCsvs(bpy.types.Operator):\n \"\"\"Create survey csvs (main.csv must exist in the directory of sqlite file)\"\"\"\n bl_idname = \"pmls.create_survey_csvs\"\n bl_label = \"pmls create survey csvs\"\n \n \n @classmethod\n def poll(cls, context):\n scene = context.scene\n return PmlsEngine.isr and os.path.isfile(scene.pmls_op_create_survey_csvs_sqlite) and os.path.isfile(scene.pmls_op_create_survey_csvs_csv)\n\n def execute(self, context):\n scene = context.scene\n destdir = os.path.dirname(scene.pmls_op_create_survey_csvs_sqlite)\n PmlsEngine.sqlite2csvs(scene.pmls_op_create_survey_csvs_sqlite, scene.pmls_op_create_survey_csvs_csv, destdir + '\\\\')\n return {'FINISHED'}\n\nclass PmlsImportCsv(bpy.types.Operator):\n \"\"\"Import data from csvs\"\"\"\n bl_idname = \"pmls.import_csv\"\n bl_label = \"pmls import csv\"\n \n filter_glob = bpy.props.StringProperty(default=\"*.csv\", options={'HIDDEN'})\n directory = bpy.props.StringProperty(subtype=\"DIR_PATH\")\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n \n @classmethod\n def poll(cls, context):\n scene = context.scene\n return PmlsEngine.isr and (not scene.pmls_op_import_csv_use or os.path.isfile(scene.pmls_op_import_csv_pol))\n\n def execute(self, context):\n scene = context.scene\n if scene.pmls_op_import_csv_use:\n polfile = scene.pmls_op_import_csv_pol\n else:\n polfile = None\n out = PmlsEngine.get_input(self.filepath, polfile, scene.pmls_op_import_csv_min)\n if not out[0]:\n if out[2]:\n self.report({'ERROR'}, out[2])\n else: \n self.report({'ERROR'}, 'Unknown matlab error')\n return {'FINISHED'}\n\n def invoke(self, context, event):\n context.window_manager.fileselect_add(self)\n return {'RUNNING_MODAL'}\n\nclass PmlsRegisterAsMesh(bpy.types.Operator):\n \"\"\"Register to pmls as deformable mesh\"\"\" \n bl_idname = \"pmls.register_as_mesh\"\n bl_label = \"pmls register as mesh\"\n hedgehog = bpy.props.StringProperty()\n points = bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n obj = context.active_object\n if obj is None:\n return False\n if obj.mode != \"OBJECT\":\n return False\n if obj.type != 'MESH':\n return False\n# if not PmlsObject().pmls_get_type(obj).is_mesh():\n# return False\n if pmls_is_deform_mesh(obj):\n return False\n hdg = context.scene.pmls_op_smooth_by_surface_deform_hdg\n if hdg:\n ot = PmlsObject.pmls_get_type(bpy.data.objects[hdg])\n if not (ot and ot.is_hedgehog()):\n return False\n pnt = context.scene.pmls_op_smooth_by_surface_deform_pnt\n if pnt and bpy.data.objects[pnt].type != 'MESH':\n return False\n return True\n \n def execute(self, context):\n obj = context.active_object\n ot = PmlsObject.pmls_get_type(obj)\n if not ot or not PmlsObject().pmls_get_type(obj).is_mesh():\n obj[\"pmls_type\"] = \"mesh\"\n obj = PmlsObject(obj)\n hedgehog = None\n if self.hedgehog:\n hedgehog = PmlsObject(context.scene.objects[self.hedgehog])\n anchor = None\n if self.points:\n points = context.scene.objects[self.points]\n if not PmlsObject().pmls_get_type(points).is_mesh():\n points[\"pmls_type\"] = \"mesh\"\n anchor = PmlsObject(points)\n obj.advance(hedgehog, anchor)\n \n return {'FINISHED'}\n\nclass MessageOperator(bpy.types.Operator):\n bl_idname = \"error.message\"\n bl_label = \"Message\"\n type = bpy.props.StringProperty()\n message = bpy.props.StringProperty()\n \n def execute(self, context):\n self.report({'INFO'}, self.message)\n print(self.message)\n return {'FINISHED'}\n \n def invoke(self, context, event):\n wm = context.window_manager\n return wm.invoke_popup(self, width=800, height=200)\n \n def draw(self, context):\n self.layout.label(\"A message has arrived\")\n row = self.layout.column(align = True)\n row.prop(self, \"type\")\n row.prop(self, \"message\")\n row.operator(\"error.ok\")\n \n#\n# The OK button in the error dialog\n#\nclass OkOperator(bpy.types.Operator):\n bl_idname = \"error.ok\"\n bl_label = \"OK\"\n def execute(self, context):\n return {'FINISHED'} \n#PANELS\n\nclass PmlsPanel(bpy.types.Panel):\n bl_idname = \"OBJECT_PT_pmls\"\n bl_label = \"MATLAB Engine\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_category = \"Pmls\"\n bl_context = \"objectmode\"\n\n def draw(self, context):\n layout = self.layout\n col = layout.column(align=True)\n col.label(text=\"Start/Connect to:\")\n names = PmlsEngine.findmatlab()\n if names:\n for n in names:\n col.operator(\"pmls.connect\", text=\"Connect to: \" + n).name = n\n else:\n col.label(text=\"Nothing to connect to. Install pmls lib or start matlab engine!\")\n# col.operator(\"pmls.start\", text=\"Start new\")\n col.separator()\n col.label(text=\"Stop/Disconnect:\")\n col.operator(\"pmls.stop\", text=\"Stop\")\n col.operator(\"pmls.disconnect\", text = \"Disconnect \" + PmlsEngine.name )\n col.operator(\"pmls.loadmat\", text=\"Load mat file\")\n\nclass PmlsMultipleObjectPanel(bpy.types.Panel):\n bl_idname = \"OBJECT_PT_pmls_multiple\"\n bl_label = \"PMLS Objects\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_category = \"Pmls\"\n bl_context = \"objectmode\"\n\n def draw(self, context):\n layout = self.layout\n col = layout.column(align=False)\n col.label(text=\"Save selected:\")\n box = col.box()\n col1 = box.column(align=False)\n col1.operator(\"pmls.savemat\", text=\"Save mat file\")\n col.separator()\n col.label(text=\"Hedgehogs:\")\n box = col.box()\n col1 = box.column(align=False)\n col1.operator(\"pmls.merge_selected_hedges_to_active\", text=\"Merge\")\n col1.operator(\"pmls.separate_turtles\", text=\"Separate\")\n col1.operator(\"pmls.cutback_at_bridges\", text=\"Cut back at bridges\")\n col.separator()\n col.label(text=\"Smooth mesh:\")\n box = col.box()\n col1 = box.column(align=False)\n opprops = col1.operator(\"pmls.smoot_by_surface_deform\", text=\"Surface deform\")\n scene = context.scene\n# opprops.hedgehog = scene.pmls_op_smooth_by_surface_deform_hdg\n# opprops.points = scene.pmls_op_smooth_by_surface_deform_pnt\n opprops.unilap = scene.pmls_op_smooth_by_surface_deform_unilap\n opprops.snaptol = scene.pmls_op_smooth_by_surface_deform_tol\n opprops.to_raycheck = scene.pmls_op_smooth_by_surface_deform_ray\n opprops.voxsiz = scene.pmls_op_smooth_by_surface_deform_vox\n# col1.prop_search(scene, \"pmls_op_smooth_by_surface_deform_hdg\", \n# scene, \"objects\", text = \"Hedgehog:\", icon='OBJECT_DATA')\n# col1.prop_search(scene, \"pmls_op_smooth_by_surface_deform_pnt\", \n# scene, \"objects\", text = \"Additional points:\", icon='OBJECT_DATA')\n col1.prop(scene, \"pmls_op_smooth_by_surface_deform_unilap\")\n col1.prop(scene, \"pmls_op_smooth_by_surface_deform_tol\")\n col1.prop(scene, \"pmls_op_smooth_by_surface_deform_ray\")\n if scene.pmls_op_smooth_by_surface_deform_ray:\n box = col1.box()\n col2 = box.column(align=False)\n col2.prop(scene, \"pmls_op_smooth_by_surface_deform_vox\")\n col.separator()\n col.label(text=\"Voxelized union:\")\n box = col.box()\n col1 = box.column(align=False)\n opprops = col1.operator(\"pmls.voxelized_union\", text=\"Union/Remesh by voxelization\")\n opprops.voxel_siz = scene.pmls_op_hedgehog_union_vox\n opprops.extend = scene.pmls_op_hedgehog_union_ext\n opprops.cuda = scene.pmls_op_hedgehog_union_cuda\n opprops.marcub = scene.pmls_op_hedgehog_union_marcub\n opprops.deform = scene.pmls_op_voxelized_union_vol\n# opprops.hedgehog = scene.pmls_op_smooth_by_surface_deform_hdg\n opprops.unilap = scene.pmls_op_voxelized_union_unilap\n opprops.premesh = scene.pmls_op_voxelized_union_pre\n col1.prop( scene, \"pmls_op_hedgehog_union_vox\" )\n col1.prop( scene, \"pmls_op_hedgehog_union_ext\" )\n col1.prop( scene, \"pmls_op_hedgehog_union_cuda\")\n col1.prop( scene, \"pmls_op_hedgehog_union_marcub\")\n col1.prop( scene, \"pmls_op_voxelized_union_vol\")\n if scene.pmls_op_voxelized_union_vol:\n box = col1.box()\n col2 = box.column(align=False)\n# col2.prop_search(scene, \"pmls_op_smooth_by_surface_deform_hdg\", \n# scene, \"objects\", text = \"Hedgehog:\", icon='OBJECT_DATA')\n col2.prop( scene, \"pmls_op_voxelized_union_unilap\" )\n col2.prop( scene, \"pmls_op_voxelized_union_pre\" )\n col.separator()\n col.label(text=\"Normal union:\")\n box = col.box()\n col1 = box.column(align=False)\n opprops = col1.operator(\"pmls.normal_union\", text=\"Union\")\n opprops.premesh = scene.pmls_op_normal_union_pre\n opprops.tetgen = scene.pmls_op_hedgehog_union_tetgen\n opprops.tetgen_a = scene.pmls_op_hedgehog_union_tetgen_a\n opprops.tetgen_q = scene.pmls_op_hedgehog_union_tetgen_q\n opprops.tetgen_d = scene.pmls_op_hedgehog_union_tetgen_d\n \n col1.prop( scene, \"pmls_op_normal_union_pre\" )\n col1.prop( scene, \"pmls_op_hedgehog_union_tetgen\" )\n if scene.pmls_op_hedgehog_union_tetgen:\n col1.prop( scene, \"pmls_op_hedgehog_union_tetgen_a\" )\n col1.prop( scene, \"pmls_op_hedgehog_union_tetgen_q\" )\n col1.prop( scene, \"pmls_op_hedgehog_union_tetgen_d\" )\n\n\n col.separator()\n# col.label(text=\"Split into two pieces:\")\n# box = col.box()\n# col1 = box.column(align=False)\n# opprops = col1.operator(\"pmls.split\", text=\"Split:\")\n# opprops.selmesh = scene.pmls_op_split_selector\n# col1.prop_search(scene, \"pmls_op_split_selector\", \n# scene, \"objects\", text = \"Selector mesh:\", icon='OBJECT_DATA')\n# \n# col.separator()\n col.label(text=\"Register as deformable mesh:\")\n box = col.box()\n col1 = box.column(align=False)\n opprops = col1.operator(\"pmls.register_as_mesh\", text=\"Resgister as mesh\")\n opprops.hedgehog = scene.pmls_op_smooth_by_surface_deform_hdg\n opprops.points = scene.pmls_op_smooth_by_surface_deform_pnt\n col1.prop_search(scene, \"pmls_op_smooth_by_surface_deform_hdg\", \n scene, \"objects\", text = \"Hedgehog:\", icon='OBJECT_DATA')\n col1.prop_search(scene, \"pmls_op_smooth_by_surface_deform_pnt\", \n scene, \"objects\", text = \"Anchor points:\", icon='OBJECT_DATA')\n \n\n# col.separator()\n# col.label(text=\"Create volumetric mesh:\")\n# box = col.box()\n# col1 = box.column(align=False)\n# col1.operator(\"pmls.create_vol_mesh\", text=\"Create\")\n \n \nclass PmlsSqlitePanel(bpy.types.Panel):\n bl_idname = \"OBJECT_PT_pmls_sqlite\"\n bl_label = \"Topodroid sqlite\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_category = \"Pmls\"\n bl_context = \"objectmode\"\n\n def draw(self, context):\n layout = self.layout\n# obj = context.active_object\n scn = context.scene\n col = layout.column(align=False)\n \n col.label(text=\"Main csv:\")\n col.operator(\"pmls.create_main_csv\", text=\"Create\")\n col.label(text=\"Survey csvs:\")\n box = col.box()\n col1 = box.column(align=False)\n col1.prop(scn, \"pmls_op_create_survey_csvs_sqlite\")\n col1.prop(scn, \"pmls_op_create_survey_csvs_csv\")\n col1.operator(\"pmls.create_survey_csvs\", text=\"Create\")\n col.separator()\n col.label(text=\"Import csv\")\n box = col.box()\n col1 = box.column(align=False)\n col1.prop(scn, \"pmls_op_import_csv_use\")\n if scn.pmls_op_import_csv_use:\n col1.prop(scn, \"pmls_op_import_csv_pol\")\n col1.prop(scn, \"pmls_op_import_csv_min\")\n col1.operator(\"pmls.import_csv\", text=\"Import\")\n \n \nclass PmlsObjectPanel(bpy.types.Panel):\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_category = \"Pmls\"\n bl_context = \"mesh_edit\"\n \n @classmethod\n def poll(cls, context):\n if not PmlsEngine.isr:\n return False\n obj = context.active_object\n if obj is None:\n return False\n if obj.type != 'MESH':\n return False\n if \"pmls_type\" not in obj.keys():\n return False\n return cls._poll( context )\n \n def editdisplay(self, context, col0):\n col0.label(text=\"Edit:\")\n box = col0.box()\n col = box.column(align=True)\n col.operator(\"pmls.copy\", text=\"Copy\").simple = context.scene.pmls_edit_simple\n col.operator(\"pmls.split\", text=\"Split\").simple = context.scene.pmls_edit_simple\n col.operator(\"pmls.cut\", text=\"Cut\").simple = context.scene.pmls_edit_simple\n col.operator(\"pmls.delete\", text=\"Delete\").simple = context.scene.pmls_edit_simple\n return col\n \n\nclass PmlsDeformMeshPanel(PmlsObjectPanel): \n bl_idname = \"OBJECT_PT_pmls_deform_mesh\"\n bl_label = \"Deformable mesh\"\n\n @classmethod\n def _poll(cls, context):\n return context.active_object[\"pmls_type\"] == \"deform_mesh\"\n\n def draw(self, context):\n layout = self.layout\n col0 = layout.column(align=False)\n col = self.editdisplay(context, col0)\n col.prop(context.scene, \"pmls_edit_simple\")\n \n col0.separator()\n# col = col0\n# col.label(text=\"Deform mesh:\")\n# box = col.box()\n# col1 = box.column(align=False)\n# opprops = col1.operator(\"pmls.smoot_by_surface_deform\", text=\"Surface deform\")\n# scene = context.scene\n# obj = context.active_object\n# obj = PmlsObject(obj)\n\n\n# h = obj.hedgehog()\n# opprops.hedgehog = \"\"\n# if h:\n# opprops.hedgehog = obj.hedgehog().object.name\n# h = obj.anchor()\n# opprops.points = \"\"\n# if h:\n# opprops.points = obj.anchor().object.name\n\n\n# opprops.snaptol = scene.pmls_op_smooth_by_surface_deform_tol\n# opprops.voxsiz = scene.pmls_op_smooth_by_surface_deform_vox\n# col1.prop(scene, \"pmls_op_smooth_by_surface_deform_tol\") \n# col1.prop(scene, \"pmls_op_smooth_by_surface_deform_vox\")\n# col.separator()\n col = col0\n col.label(text=\"Map points to mesh:\")\n box = col.box()\n col1 = box.column(align=False)\n col1.operator(\"pmls.map_points_to_mesh\", text=\"Select mapped\")\n \n \n\nclass PmlsHedgehogPanelBase(PmlsObjectPanel):\n \n @classmethod\n def _poll(cls, context):\n return context.active_object[\"pmls_type\"] == \"hedgehog\"\n \n def drawdisplay(self, context, col0):\n obj = context.active_object\n col0.label(text=\"Display:\")\n box = col0.box()\n col = box.column(align=True)\n# col.operator(\"pmls.recalculate_turtles\", text=\"Recalculate turtles\")\n# col.separator()\n# col.label(text=\"Stations\")\n# col.operator(\"pmls.deselect_all_stations\", text=\"Deselect all\")\n# col.operator(\"pmls.hide_selected_stations\", text=\"Hide selected\")\n# col.operator(\"pmls.hide_unselected_stations\", text=\"Hide unselected\")\n# col.operator(\"pmls.reveal_stations\", text=\"Reveal all\")\n# col.separator()\n# col.prop( obj.users_scene[0], \"pmls_disp_base\" )\n\n opprop = col.operator(\"pmls.select_outliers\", text=\"Select outliers\")\n opprop.internal = obj.users_scene[0].pmls_op_select_outliers_internal\n opprop.dihedral = obj.users_scene[0].pmls_op_select_outliers_dihedral\n col.prop( obj.users_scene[0], \"pmls_op_select_outliers_internal\" )\n col.prop( obj.users_scene[0], \"pmls_op_select_outliers_dihedral\" )\n \n col.prop( obj.users_scene[0], \"pmls_disp_pnts\" )\n col.prop( obj.users_scene[0], \"pmls_disp_zeroshots\" )\n col.prop( obj.users_scene[0], \"pmls_disp_edges\" )\n return col\n\n def draw(self, context):\n layout = self.layout\n col0 = layout.column(align=False)\n self.drawdisplay(context, col0)\n col0.separator()\n self.editdisplay(context, col0)\n col0.separator()\n col0.label(text=\"Extend by visibility:\")\n box = col0.box()\n col = box.column(align=True)\n col.operator(\"pmls.extend_hedgehog\", text=\"Extend\")\n \n\nclass PmlsHedgehogPanel(PmlsHedgehogPanelBase):\n bl_idname = \"OBJECT_PT_pmls_hedgehog\"\n bl_label = \"Hedgehog\"\n \n @classmethod\n def _poll(cls, context):\n return context.active_object[\"pmls_type\"] == \"hedgehog\"\n \n# def drawdisplay(self, context, col0):\n# obj = context.active_object\n# col0.label(text=\"Display:\")\n# box = col0.box()\n# col = box.column(align=True)\n# col.operator(\"pmls.recalculate_turtles\")\n# col.prop( obj.users_scene[0], \"pmls_disp_base\" )\n# col.prop( obj.users_scene[0], \"pmls_disp_pnts\" )\n# col.prop( obj.users_scene[0], \"pmls_disp_edges\" )\n# return col\n\n# def draw(self, context):\n# layout = self.layout\n# col0 = layout.column(align=False)\n# self.drawdisplay(context, col0) \n\nclass PmlsTurtlePanel(PmlsHedgehogPanelBase):\n bl_idname = \"OBJECT_PT_pmls_turtle\"\n bl_label = \"Turtle\"\n \n @classmethod\n def _poll(cls, context):\n return context.active_object[\"pmls_type\"] == \"turtle\"\n \n def drawdisplay(self, context, col0):\n col = super().drawdisplay(context, col0)\n obj = context.active_object\n col.prop( obj.users_scene[0], \"pmls_disp_faces\" )\n col.separator()\n col.operator(\"pmls.clear_turtles\", text=\"Clear turtle\")\n return col\n \n \n\n \nclass PmlsExtendedHedgehogPanelBase(PmlsHedgehogPanelBase):\n def drawdisplay(self, context, col0):\n col = super().drawdisplay(context, col0)\n obj = context.active_object\n col.prop( obj.users_scene[0], \"pmls_disp_extedges\" )\n return col\n\n\n def draw(self, context):\n layout = self.layout\n col0 = layout.column(align=False)\n self.drawdisplay(context, col0) \n obj = context.active_object\n col0.separator()\n self.editdisplay(context, col0)\n col0.separator()\n\n col0.label(text=\"Downdate to hedgehog:\")\n box = col0.box()\n col = box.column(align=True)\n col.operator(\"pmls.downdate_ehedgehog\", text=\"Downdate\").todel = obj.users_scene[0].pmls_op_ehedgehog_downdate\n col.prop( obj.users_scene[0], \"pmls_op_ehedgehog_downdate\" )\n\n col0.label(text=\"Union by voxelization:\")\n box = col0.box()\n col = box.column(align=True)\n opprop = col.operator(\"pmls.hedgehog_union\", text=\"Create\")\n opprop.voxel_siz = obj.users_scene[0].pmls_op_hedgehog_union_vox\n opprop.extend = obj.users_scene[0].pmls_op_hedgehog_union_ext\n opprop.cuda = obj.users_scene[0].pmls_op_hedgehog_union_cuda\n opprop.marcub = obj.users_scene[0].pmls_op_hedgehog_union_marcub\n opprop.shortenrays = obj.users_scene[0].pmls_op_hedgehog_union_shorten\n \n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_vox\" )\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_ext\" )\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_cuda\")\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_marcub\")\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_shorten\")\n\n col0.label(text=\"Union:\")\n box = col0.box()\n col = box.column(align=True)\n opprop = col.operator(\"pmls.hedgehog_union_alec\", text=\"Create\")\n opprop.tetgen = obj.users_scene[0].pmls_op_hedgehog_union_tetgen\n opprop.tetgen_a = obj.users_scene[0].pmls_op_hedgehog_union_tetgen_a\n opprop.tetgen_q = obj.users_scene[0].pmls_op_hedgehog_union_tetgen_q\n opprop.tetgen_d = obj.users_scene[0].pmls_op_hedgehog_union_tetgen_d\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_tetgen\" )\n if obj.users_scene[0].pmls_op_hedgehog_union_tetgen:\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_tetgen_a\" )\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_tetgen_q\" )\n col.prop( obj.users_scene[0], \"pmls_op_hedgehog_union_tetgen_d\" )\n\nclass PmlsExtendedHedgehogPanel(PmlsExtendedHedgehogPanelBase):\n bl_idname = \"OBJECT_PT_pmls__extended_hedgehog\"\n bl_label = \"Extended hedgehog\"\n\n @classmethod\n def _poll(cls, context):\n return context.active_object[\"pmls_type\"] == \"ehedgehog\"\n\nclass PmlsExtendedTurtlePanel(PmlsExtendedHedgehogPanelBase):\n bl_idname = \"OBJECT_PT_pmls__extended_turtle\"\n bl_label = \"Extended turtle\"\n\n def drawdisplay(self, context, col0):\n col = super().drawdisplay(context, col0)\n obj = context.active_object\n col.prop( obj.users_scene[0], \"pmls_disp_faces\" )\n col.separator()\n col.operator(\"pmls.clear_turtles\", text=\"Clear turtle\")\n return col\n\n @classmethod\n def _poll(cls, context):\n return context.active_object[\"pmls_type\"] == \"eturtle\"\n\ndef get_pmls_disp_base(self):\n obj = bpy.context.active_object\n return obj[\"disp_base\"]\n\n\ndef set_pmls_disp_base(self, value):\n obj = bpy.context.active_object\n if obj[\"disp_base\"] != value:\n obj[\"disp_base\"] = value\n bpy.ops.pmls.updatedisplay()\n \ndef get_pmls_disp_pnts(self):\n obj = bpy.context.active_object\n return obj[\"disp_pnts\"]\n\n\ndef set_pmls_disp_pnts(self, value):\n obj = bpy.context.active_object\n if obj[\"disp_pnts\"] != value:\n obj[\"disp_pnts\"] = value\n bpy.ops.pmls.updatedisplay()\n \ndef get_pmls_disp_zeroshots(self):\n obj = bpy.context.active_object\n return obj[\"disp_zeroshots\"]\n\n\ndef set_pmls_disp_zeroshots(self, value):\n obj = bpy.context.active_object\n if obj[\"disp_zeroshots\"] != value:\n obj[\"disp_zeroshots\"] = value\n bpy.ops.pmls.updatedisplay()\n \ndef get_pmls_disp_edges(self):\n obj = bpy.context.active_object\n return obj[\"disp_edges\"]\n\n\ndef set_pmls_disp_edges(self, value):\n obj = bpy.context.active_object\n if obj[\"disp_edges\"] != value:\n obj[\"disp_edges\"] = value\n bpy.ops.pmls.updatedisplay()\n\ndef get_pmls_disp_faces(self):\n obj = bpy.context.active_object\n return obj[\"disp_faces\"]\n\n\ndef set_pmls_disp_faces(self, value):\n obj = bpy.context.active_object\n if obj[\"disp_faces\"] != value:\n obj[\"disp_faces\"] = value\n bpy.ops.pmls.updatedisplay()\n \ndef get_pmls_disp_extedges(self):\n obj = bpy.context.active_object\n return obj[\"disp_extedges\"]\n\n\ndef set_pmls_disp_extedges(self, value):\n obj = bpy.context.active_object\n if obj[\"disp_extedges\"] != value:\n obj[\"disp_extedges\"] = value\n bpy.ops.pmls.updatedisplay()\n \n\n\n\n# Register and add to the file selector\ndef register():\n bpy.types.Scene.pmls_disp_base = bpy.props.BoolProperty(name=\"Display stations\",\n get=get_pmls_disp_base, set=set_pmls_disp_base)\n\n bpy.types.Scene.pmls_disp_pnts = bpy.props.BoolProperty(name=\"Display shots\",\n get=get_pmls_disp_pnts, set=set_pmls_disp_pnts)\n\n bpy.types.Scene.pmls_disp_zeroshots = bpy.props.BoolProperty(name=\"Display zeroshots\",\n get=get_pmls_disp_zeroshots, set=set_pmls_disp_zeroshots)\n\n bpy.types.Scene.pmls_disp_edges = bpy.props.BoolProperty(name=\"Display edges\",\n get=get_pmls_disp_edges, set=set_pmls_disp_edges)\n\n bpy.types.Scene.pmls_disp_extedges = bpy.props.BoolProperty(name=\"Display extended edges\",\n get=get_pmls_disp_extedges, set=set_pmls_disp_extedges)\n\n bpy.types.Scene.pmls_disp_faces = bpy.props.BoolProperty(name=\"Display faces\",\n get=get_pmls_disp_faces, set=set_pmls_disp_faces)\n \n bpy.types.Scene.pmls_op_ehedgehog_downdate = bpy.props.BoolProperty(name=\"Delete extended edges\", default=True)\n\n bpy.types.Scene.pmls_op_create_survey_csvs_csv = bpy.props.StringProperty(name=\"Main csv:\", subtype=\"FILE_PATH\")\n bpy.types.Scene.pmls_op_create_survey_csvs_sqlite = bpy.props.StringProperty(name=\"Sqlite:\", subtype=\"FILE_PATH\")\n\n bpy.types.Scene.pmls_op_import_csv_use = bpy.props.BoolProperty(name=\"Use poligon file\", default=False)\n bpy.types.Scene.pmls_op_import_csv_pol = bpy.props.StringProperty(name=\"Poligon file:\", subtype=\"FILE_PATH\")\n bpy.types.Scene.pmls_op_import_csv_min = bpy.props.IntProperty(name=\"Min splay/station:\", default=20, soft_min=0)\n \n bpy.types.Scene.pmls_op_hedgehog_union_vox = bpy.props.FloatProperty(\n name=\"Voxel size (cm):\", default=3.0, step=5, min=0.000001, soft_min=1.0, soft_max=1000.0, subtype='DISTANCE')\n bpy.types.Scene.pmls_op_hedgehog_union_shorten = bpy.props.FloatProperty(\n name=\"Shorten rays (voxel):\", default=0.7, step=1, min=0.0, soft_min=0.0, soft_max=10.0, subtype='DISTANCE')\n bpy.types.Scene.pmls_op_hedgehog_union_ext = bpy.props.FloatProperty(\n name=\"Thin volume (voxel):\", default=1.0, step=5, min=-15.0, soft_min=-10.0, soft_max=10.0, subtype='DISTANCE')\n bpy.types.Scene.pmls_op_hedgehog_union_cuda = bpy.props.BoolProperty(name=\"Voxelize on GPU\", description=\"Voxelization will be done on the graphics card. Requires CUDA 7.5\", default=True)\n bpy.types.Scene.pmls_op_hedgehog_union_marcub = bpy.props.BoolProperty(name=\"Use marching cubes\", description=\"If true after voxelization the surface will be reconstructed with marching cubes algorithm, else each voxel side will be kept.\", default=True)\n\n bpy.types.Scene.pmls_op_hedgehog_union_tetgen = bpy.props.BoolProperty(name=\"Remesh before union\", description=\"Remesh increases the number of triangles, but the mesh will be nicer.\", default=True)\n bpy.types.Scene.pmls_op_hedgehog_union_tetgen_a = bpy.props.FloatProperty(\n name=\"Max volume of tetrahedra (m3)\", description=\"Tetgen command line parameter -a.\",\n default=0.5, step=0.001, min=0.0000001, soft_min=0.0000001, soft_max=100.0, subtype='UNSIGNED', unit='VOLUME')\n bpy.types.Scene.pmls_op_hedgehog_union_tetgen_q = bpy.props.FloatProperty(\n name=\"Max radius-edge ratio\", description=\"First value of tetgen command line parameter -q.\",\n default=2.0, step=0.001, min=1.0, soft_min=1.0, soft_max=100.0, subtype='UNSIGNED')\n bpy.types.Scene.pmls_op_hedgehog_union_tetgen_d = bpy.props.FloatProperty(\n name=\"Min dihedral angle (deg)\", description=\"Second value of tetgen command line parameter -q.\",\n default=0.0, step=5.0, min=0.0, soft_min=0.0, soft_max=70, subtype='UNSIGNED')\n bpy.types.Scene.pmls_op_select_outliers_internal = bpy.props.FloatProperty(\n name=\"Min internal angle (deg)\", description=\"Vertices without larger iternal angle in connected triangles will be selected.\",\n default=5.0, step=25, min=0.0, soft_min=0.0, soft_max=30, subtype='UNSIGNED')\n bpy.types.Scene.pmls_op_select_outliers_dihedral = bpy.props.FloatProperty(\n name=\"Min dihedral angle (deg)\", description=\"Vertices with smaller dihedral angle on connected edges will be selected.\",\n default=5.0, step=25, min=0.0, soft_min=0.0, soft_max=45, subtype='UNSIGNED')\n\n\n bpy.types.Scene.pmls_op_smooth_by_surface_deform_hdg = bpy.props.StringProperty()\n bpy.types.Scene.pmls_op_smooth_by_surface_deform_pnt = bpy.props.StringProperty()\n\n bpy.types.Scene.pmls_op_smooth_by_surface_deform_unilap = bpy.props.BoolProperty(name=\"Uniform laplacian\", description=\"If true uniform weights will be used instead of edge lenghts. Less conservative deformation.\", default=True)\n bpy.types.Scene.pmls_op_smooth_by_surface_deform_tol = bpy.props.FloatProperty(\n name=\"Snap tolerance (cm):\", default=5.0, step=5, min=0.1, soft_min=0.1, soft_max=10000.0, subtype='DISTANCE')\n bpy.types.Scene.pmls_op_smooth_by_surface_deform_vox = bpy.props.FloatProperty(\n name=\"Voxel size for ray check (cm):\", default=7.0, step=5, min=0.000001, soft_min=1.0, soft_max=1000.0, subtype='DISTANCE')\n \n bpy.types.Scene.pmls_op_voxelized_union_vol = bpy.props.BoolProperty(name=\"Volumetric deform\", default=True)\n bpy.types.Scene.pmls_op_voxelized_union_unilap = bpy.props.BoolProperty(name=\"Uniform laplacian\", description=\"If true uniform weights will be used instead of edge lenghts. Less conservative deformation.\", default=True)\n bpy.types.Scene.pmls_op_voxelized_union_pre = bpy.props.BoolProperty(name=\"Remesh before deform\", description=\"If true remesh done before and after the deform else only after\", default=True)\n\n bpy.types.Scene.pmls_op_normal_union_pre = bpy.props.BoolProperty(name=\"Meshfix before union\", description=\"If true meshfix is done for input to guarantee success\", default=True)\n\n bpy.types.Scene.pmls_op_smooth_by_surface_deform_ray = bpy.props.BoolProperty(name=\"Ray check\", description=\"If true constraint 2 will be guaranteed\", default=True)\n\n bpy.types.Scene.pmls_op_split_selector = bpy.props.StringProperty()\n\n bpy.types.Scene.pmls_edit_simple = bpy.props.BoolProperty(name=\"Simple mode\", description=\"Faster, but less robus\", default=True)\n \n bpy.utils.register_class(PmlsStart)\n bpy.utils.register_class(PmlsStop)\n bpy.utils.register_class(PmlsConnect)\n bpy.utils.register_class(PmlsDisconnect)\n bpy.utils.register_class(PmlsLoadMat)\n bpy.utils.register_class(PmlsSaveMat)\n bpy.utils.register_class(PmlsUpdateDisplay)\n bpy.utils.register_class(PmlsDowdateEhedgehog)\n bpy.utils.register_class(PmlsRecalculateTurtles)\n bpy.utils.register_class(PmlsHideSelectedStations)\n bpy.utils.register_class(PmlsHideUnselectedStations)\n bpy.utils.register_class(PmlsRevealStations)\n bpy.utils.register_class(PmlsClearTurtles)\n bpy.utils.register_class(PmlsDeselectAllStations)\n bpy.utils.register_class(PmlsExtendHedgehog)\n bpy.utils.register_class(PmlsMergeHedgehogs)\n bpy.utils.register_class(PmlsCutBackAtBridges)\n bpy.utils.register_class(PmlsCreateMainCsv)\n bpy.utils.register_class(PmlsCreateSurveyCsvs)\n bpy.utils.register_class(PmlsImportCsv)\n bpy.utils.register_class(PmlsHedgehogUnion)\n bpy.utils.register_class(PmlsHedgehogUnionAlec)\n bpy.utils.register_class(PmlsSurfaceDeform)\n bpy.utils.register_class(PmlsSeparateTurtles)\n bpy.utils.register_class(PmlsVoxelizedUnion)\n bpy.utils.register_class(PmlsCreateVolMesh)\n bpy.utils.register_class(PmlsNormalUnion)\n bpy.utils.register_class(PmlsRegisterAsMesh)\n bpy.utils.register_class(PmlsSplit)\n bpy.utils.register_class(PmlsCopy)\n bpy.utils.register_class(PmlsCut)\n bpy.utils.register_class(PmlsDelete)\n bpy.utils.register_class(PmlsFill)\n bpy.utils.register_class(PmlsMapPointsToMesh)\n bpy.utils.register_class(PmlsSelectOutliers)\n\n bpy.utils.register_class(MessageOperator)\n bpy.utils.register_class(OkOperator)\n\n\n bpy.utils.register_class(PmlsPanel)\n bpy.utils.register_class(PmlsHedgehogPanel)\n bpy.utils.register_class(PmlsExtendedHedgehogPanel)\n bpy.utils.register_class(PmlsTurtlePanel)\n bpy.utils.register_class(PmlsExtendedTurtlePanel)\n bpy.utils.register_class(PmlsMultipleObjectPanel)\n bpy.utils.register_class(PmlsSqlitePanel)\n bpy.utils.register_class(PmlsDeformMeshPanel)\n \n\ndef unregister():\n bpy.utils.unregister_class(PmlsStart)\n bpy.utils.unregister_class(PmlsStop)\n bpy.utils.unregister_class(PmlsConnect)\n bpy.utils.unregister_class(PmlsDisconnect)\n bpy.utils.unregister_class(PmlsLoadMat)\n bpy.utils.unregister_class(PmlsSaveMat)\n bpy.utils.unregister_class(PmlsUpdateDisplay)\n bpy.utils.unregister_class(PmlsDowdateEhedgehog)\n bpy.utils.unregister_class(PmlsRecalculateTurtles)\n bpy.utils.unregister_class(PmlsHideSelectedStations)\n bpy.utils.unregister_class(PmlsHideUnselectedStations)\n bpy.utils.unregister_class(PmlsRevealStations)\n bpy.utils.unregister_class(PmlsClearTurtles)\n bpy.utils.unregister_class(PmlsDeselectAllStations)\n bpy.utils.unregister_class(PmlsExtendHedgehog)\n bpy.utils.unregister_class(PmlsMergeHedgehogs)\n bpy.utils.unregister_class(PmlsCutBackAtBridges)\n bpy.utils.unregister_class(PmlsCreateMainCsv)\n bpy.utils.unregister_class(PmlsCreateSurveyCsvs)\n bpy.utils.unregister_class(PmlsImportCsv)\n bpy.utils.unregister_class(PmlsHedgehogUnionAlec)\n bpy.utils.unregister_class(PmlsHedgehogUnion)\n bpy.utils.unregister_class(PmlsSurfaceDeform)\n bpy.utils.unregister_class(PmlsSeparateTurtles)\n bpy.utils.unregister_class(PmlsVoxelizedUnion)\n bpy.utils.unregister_class(PmlsCreateVolMesh)\n bpy.utils.unregister_class(PmlsNormalUnion)\n bpy.utils.unregister_class(PmlsRegisterAsMesh)\n bpy.utils.unregister_class(PmlsSplit)\n bpy.utils.unregister_class(PmlsCopy)\n bpy.utils.unregister_class(PmlsCut)\n bpy.utils.unregister_class(PmlsDelete)\n bpy.utils.unregister_class(PmlsFill)\n bpy.utils.unregister_class(PmlsMapPointsToMesh)\n bpy.utils.unregister_class(PmlsSelectOutliers)\n\n \n bpy.utils.unregister_class(MessageOperator)\n bpy.utils.unregister_class(OkOperator)\n \n \n bpy.utils.unregister_class(PmlsPanel)\n bpy.utils.unregister_class(PmlsHedgehogPanel)\n bpy.utils.unregister_class(PmlsExtendedHedgehogPanel)\n bpy.utils.unregister_class(PmlsTurtlePanel)\n bpy.utils.unregister_class(PmlsExtendedTurtlePanel)\n bpy.utils.unregister_class(PmlsMultipleObjectPanel)\n bpy.utils.unregister_class(PmlsSqlitePanel)\n bpy.utils.unregister_class(PmlsDeformMeshPanel)\n \n\nif __name__ == \"__main__\":\n register()\n \n","repo_name":"poormanslaserscanner/pmls4matlab","sub_path":"blender_addon/pmls.py","file_name":"pmls.py","file_ext":"py","file_size_in_byte":119366,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"7190476008","text":"# 42586. 기능 개발\n\ndef solution(progresses, speeds):\n ans, temp = [], []\n \n for i in range(len(progresses)): # 작업 일수 계산\n x = 100 - progresses[i]\n \n if (x//speeds[i]) == (x/speeds[i]): # 나누어 떨어지는 경우\n temp.append(x // speeds[i])\n else: # 나누어 떨어지지 않는 경우\n temp.append((x//speeds[i]) + 1)\n \n idx = 0 # 현재의 idx 기록\n for i in range(len(temp)): # 배포 계산(progresses가 없는 경우 실행 안됨)\n if not ans: # 값이 없는 경우(처음에만 실행됨)\n ans.append(1)\n else:\n if temp[i] <= temp[idx]: # 작업 일수가 더 작은 경우\n ans[-1] += 1\n else: # 더 큰 경우\n ans.append(1) # 새로 1을 추가\n idx = i\n \n return ans\n","repo_name":"Yookaser/Algorithm","sub_path":"Programmers/Level_2/pr42586.py","file_name":"pr42586.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"9545592672","text":"import json, os, uuid, datetime\n\nfrom flask import session, jsonify, redirect, render_template, safe_join, g\nfrom flask_restful import Resource, fields, reqparse\nfrom redis import Redis\nfrom flask_sse import sse\n\nfrom xiyoumenapp import webapp, db\nfrom xiyoumenapp.models import Classroom, Users, Users_Classroom, Chatroom\nfrom xiyoumenapp.conference import Conference\nfrom xiyoumenapp.jsonapp import JsonManage\n\npage_path = safe_join(\"templates\", \"frontend\")\nfilename_teacher = \"webinar_teacher.html\"\nfilename_student = \"webinar_student.html\"\npage_teacher = safe_join(page_path, filename_teacher)\npage_student = safe_join(page_path, filename_student)\n\nparser = reqparse.RequestParser()\nparser.add_argument('txt')\nparser.add_argument('teaname')\nparser.add_argument('stuname')\nparser.add_argument('tealinkstatus')\nparser.add_argument('stulinkstatus')\nparser.add_argument('whiteboard')\n# parser.add_argument('whiteboard')\nparser.add_argument('pptposition')\nparser.add_argument('pptinfo')\n\n\n\nredis_store = Redis(charset='utf-8', decode_responses=True)\n\n\nclass Test(Resource):\n \"\"\"\n # Class Test test the connection of website\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET\n \"\"\"\n try:\n notice_resp = \"wellcome to xiyoumen.com\"\n return notice_resp\n except Exception as err:\n print(err)\n\n\nclass Login(Resource):\n \"\"\"\n # Class Login check access to link into ClassRoom and build session\n \"\"\"\n def get(self, classid, userid):\n \"\"\"\n # function get response http GET with login.html\n \"\"\"\n try:\n notice_err = \"You have no available for this class\"\n ins_json = JsonManage()\n ins_json.save_classinfo()\n ins_con = Conference(classid, userid)\n\n if ins_con.check_access() is None:\n return notice_err\n else:\n classroom = Classroom.query.filter_by(id=classid).all()\n classname = classroom[0].name\n users = Users.query.filter_by(id=userid).all()\n username = users[0].name\n roleid = users[0].roleid\n\n session['classid'] = classid\n session['userid'] = userid\n tempclassstr = classid.split(\"-\")\n session['classstr'] = ''.join(tempclassstr)\n session['roleid'] = roleid\n print(session['classstr'])\n redis_store.set('classname:'+classid, classname)\n redis_store.set('username:'+userid, username)\n redis_store.set('roleid:'+userid, roleid)\n\n # print(session) \n return redirect(fields.url_for('classapi.class_ep'))\n except Exception as err:\n print(err)\n\n\nclass ClassRoom(Resource):\n \"\"\"\n # Class classRoom is resource of \"class_ep\"\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET with classroom.html\n \"\"\"\n try:\n if ('classid' in session) and ('userid' in session):\n classid =session['classid']\n userid = session['userid']\n roleid = int(redis_store.get(\"roleid:\"+userid))\n if roleid == 1:\n redis_store.delete('tea_list:' + classid)\n redis_store.delete('stu_list:' + classid)\n redis_store.delete('tealinkstatus_dict:' + classid)\n redis_store.delete('stulinkstatus_dict:' + classid)\n # redis_store.delete('ass_list:' + classid)\n # redis_store.delete('asslinkstatus_dict:' + classid)\n # redis_store.delete('teavideostatus_dict:' + classid)\n # redis_store.delete('assvideostatus_dict:' + classid)\n # redis_store.delete('stuvideostatus_dict:' + classid)\n # redis_store.delete('teasoundstatus_dict:' + classid)\n # redis_store.delete('asssoundstatus_dict:' + classid)\n # redis_store.delete('stusoundstatus_dict:' + classid)\n redis_store.delete(\"chatnum:\"+classid)\n redis_store.delete('chatcontent:' + classid)\n redis_store.delete('whiteboard:' + classid)\n redis_store.delete('ppt:' + classid)\n redis_store.set(\"chatnum:\"+classid, 0)\n\n return webapp.send_static_file(page_teacher)\n elif roleid == 2:\n return webapp.send_static_file(page_student)\n else:\n return \"There are some problems\"\n except Exception as err:\n print(\"Fail to get classroom info\")\n print(err)\n\n\nclass Token(Resource):\n \"\"\"\n # This class returns token json\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET with token json\n \"\"\"\n try:\n if ('classid' in session) and ('userid' in session):\n classid = session['classid']\n userid = session['userid']\n username = redis_store.get('username:'+userid)\n ins_conference = Conference(classid, userid)\n tmptoken = ins_conference.get_accesstoken()\n # tmptoken = jsonify(identity=tmptoken.identity,\n # token=tmptoken.to_jwt())\n mytoken = dict(identity=username,\n token=tmptoken.to_jwt())\n print('Success to create token')\n print(mytoken)\n return mytoken\n else:\n # return redirect(fields.url_for('login_ep'))\n return \"There are some problme about token\"\n except Exception as err:\n print(err)\n\n\nclass Whiteboard(Resource):\n \"\"\"\n # This class returns whiteboard json\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET with token json\n \"\"\"\n try:\n if ('classid' in session) and ('userid' in session):\n classid = session['classid']\n mydrawing = redis_store.hgetall('whiteboard:' + classid)\n # print(\"Start to get whiteboard object json {0}\".format(mydrawing))\n print('Success to get whiteboard')\n return mydrawing\n else:\n # return redirect(fields.url_for('login_ep'))\n return \"There are some problme about token\"\n except Exception as err:\n print(err)\n\n def post(self):\n\n \"\"\"\n # function post response http POST whiteboard\n \"\"\"\n try:\n print(\"ready to receive post message\")\n args = parser.parse_args()\n if ('classid' in session) and ('userid' in session):\n classid = session['classid']\n whiteboard = args['whiteboard']\n print(\"Start to parse whiteboard object json {0}\".format(whiteboard))\n drawing_dict = json.loads(whiteboard)\n redis_store.hmset(\"whiteboard:\" + classid, drawing_dict)\n return \"Success to add new whiteboard\"\n else:\n return \"You have no right to do this\"\n except Exception as err:\n print(\"Fail to add whiteboard\")\n print(err)\n\n\nclass PPT(Resource):\n \"\"\"\n # Class State is resource of \"state_ep\"\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET\n \"\"\"\n try:\n if ('classid' in session) and ('userid' in session):\n classid = session['classid']\n ppt_info = {}\n dir_file = os.path.dirname(os.path.abspath(__file__))\n dir_ppt = safe_join(dir_file, \"static\")\n dir_ppt = safe_join(dir_ppt, \"courseware\")\n dir_ppt = safe_join(dir_ppt, session['classid'])\n dir_ppt = safe_join(dir_ppt, \"ppt\")\n print(\"PPT directory is \" + dir_ppt)\n filelist = os.listdir(dir_ppt)\n ppt_info[\"pptlist\"] = filelist\n if (redis_store.exists(\"ppt:\"+classid)):\n ppt_info[\"pptinfo\"] = redis_store.get(\"ppt:\" + classid)\n return ppt_info\n else:\n return \"You have no right to do this\"\n except Exception as err:\n print(\"Fail to get info\")\n print(err)\n\n def post(self):\n\n \"\"\"\n # function post response http POST position with info\n \"\"\"\n try:\n print('Begin to post')\n print(session)\n args = parser.parse_args()\n classid = session['classid']\n userid = session['userid']\n classstr = session['classstr']\n roleid = int(redis_store.get(\"roleid:\"+userid))\n print(roleid)\n if ('classid' in session) and ('userid' in session):\n if (roleid == 1):\n pptposition = args['pptposition']\n pptinfo = args['pptinfo']\n if (pptposition is not None):\n sse.publish({\"pptposition\":pptposition},\n type=\"newposition\"+classstr,\n channel=\"changed.ppt\" )\n if (pptinfo is not None):\n sse.publish({\"pptinfo\":pptinfo},\n type=\"pptinfo\"+classstr,\n channel=\"changed.ppt\" )\n redis_store.set('ppt:'+classid, pptinfo)\n except Exception as err:\n print(\"Fail to get info\")\n print(err)\n\n\nclass Info(Resource):\n \"\"\"\n # Class Info is resource of \"info_ep\"\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET with classroom.html\n \"\"\"\n try:\n userinfo = {}\n if ('classid' in session) and ('userid' in session):\n print(\"Begin to get /info/\")\n print(session)\n classid = session['classid']\n userid = session['userid']\n classstr = session['classstr']\n\n userinfo[\"classname\"] = redis_store.get(\"classname:\"+classid)\n userinfo[\"username\"] = redis_store.get(\"username:\"+userid)\n userinfo[\"classid\"] = session[\"classid\"]\n userinfo[\"userid\"] = session[\"userid\"]\n userinfo[\"classstr\"] = session['classstr']\n userinfo[\"roleid\"] = int(redis_store.get(\"roleid:\"+userid))\n roleid = userinfo[\"roleid\"]\n\n if ( not redis_store.exists(\"stu_list:\"+classid) and roleid == 1 ):\n print(\"first time load redis store\")\n print(session)\n uc = Users_Classroom.query.filter_by(classid=classid).all()\n tid_list = [ti.userid for ti in uc]\n print(tid_list)\n\n teachers = Users.query.filter_by(roleid=1).all()\n tea_list = []\n for tea in teachers:\n if tea.id in tid_list:\n tea_list.append(tea.name)\n\n students = Users.query.filter_by(roleid=2).all()\n stu_list = []\n for stu in students:\n if stu.id in tid_list:\n stu_list.append(stu.name)\n\n for ti in tea_list:\n redis_store.lpush(\"tea_list:\"+classid, ti)\n redis_store.hset('tealinkstatus_dict:'+classid, ti, 0)\n\n for si in stu_list:\n redis_store.lpush(\"stu_list:\"+classid, si)\n redis_store.hset('stulinkstatus_dict:'+classid, si, 0)\n\n tea_list = redis_store.lrange('tea_list:'+classid, 0, -1)\n stu_list = redis_store.lrange('stu_list:'+classid, 0, -1)\n\n tealinkstatus_hash = redis_store.hgetall('tealinkstatus_dict:'+classid)\n stulinkstatus_hash = redis_store.hgetall('stulinkstatus_dict:'+classid)\n\n tealinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n stulinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n\n for (k, v) in tealinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n tealinkstatus_dict[str(ni)].append(k)\n break\n\n for (k, v) in stulinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n stulinkstatus_dict[str(ni)].append(k)\n break\n\n if userinfo['roleid'] == 1:\n userinfo[\"teacher\"] = tea_list\n userinfo[\"student\"] = stu_list\n\n userinfo[\"tealinkstatuslist\"] = tealinkstatus_dict\n userinfo[\"stulinkstatuslist\"] = stulinkstatus_dict\n\n\n if userinfo['roleid'] == 2:\n userinfo[\"tealinkstatuslist\"] = tealinkstatus_dict\n userinfo[\"teacher\"] = tea_list\n\n print(userinfo)\n print('Success to get userinfo')\n return userinfo\n else:\n return userinfo\n except Exception as err:\n print(\"Fail to get info\")\n print(err)\n\n def post(self):\n \"\"\"\n # function post response http POST teastatus with info\n \"\"\"\n try:\n print('Begin to post')\n print(session)\n args = parser.parse_args()\n classid = session['classid']\n userid = session['userid']\n classstr = session['classstr']\n username = redis_store.get('username:'+userid)\n roleid = int(redis_store.get(\"roleid:\"+userid))\n print(roleid)\n\n userinfo = {}\n userinfo[\"classname\"] = redis_store.get(\"classname:\"+classid)\n userinfo[\"username\"] = redis_store.get(\"username:\"+userid)\n userinfo[\"classid\"] = session[\"classid\"]\n userinfo[\"userid\"] = session[\"userid\"]\n userinfo[\"classstr\"] = session['classstr']\n userinfo[\"roleid\"] = int(redis_store.get(\"roleid:\"+userid))\n\n if ('classid' in session) and ('userid' in session):\n\n if roleid == 1:\n teaname = args['teaname']\n tealinkstatus = args['tealinkstatus']\n print(\"print post info\");\n print(teaname);\n print(tealinkstatus);\n\n if tealinkstatus is not None:\n print(\"Teacher status changes to {0}\".format(str(tealinkstatus)))\n redis_store.hset('tealinkstatus_dict:'+classid,\n teaname, tealinkstatus)\n\n tealinkstatus_hash = redis_store.hgetall('tealinkstatus_dict:'+classid)\n tealinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n for (k, v) in tealinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n tealinkstatus_dict[str(ni)].append(k)\n break\n\n sse.publish({\"tealinkstatus\":tealinkstatus_dict},\n type=\"newtealinkstatus\"+classstr,\n channel=\"changed.status\")\n print(tealinkstatus_dict)\n print(\"newtealinkstatus\"+classstr)\n\n\n stuname = args['stuname']\n stulinkstatus = args['stulinkstatus']\n\n if stulinkstatus is not None:\n print(\"Student status changes to {0}\".format(str(stulinkstatus)))\n redis_store.hset('stulinkstatus_dict:'+classid,\n stuname, stulinkstatus)\n\n stulinkstatus_hash = redis_store.hgetall('stulinkstatus_dict:'+classid)\n stulinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n for (k, v) in stulinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n stulinkstatus_dict[str(ni)].append(k)\n break\n\n sse.publish({\"stulinkstatus\":stulinkstatus_dict},\n type=\"newstulinkstatus\"+classstr,\n channel=\"changed.status\")\n print(stulinkstatus_dict)\n print(\"newstulinkstatus\"+classstr)\n\n tea_list = redis_store.lrange('tea_list:'+classid, 0, -1)\n stu_list = redis_store.lrange('stu_list:'+classid, 0, -1)\n\n tealinkstatus_hash = redis_store.hgetall('tealinkstatus_dict:'+classid)\n stulinkstatus_hash = redis_store.hgetall('stulinkstatus_dict:'+classid)\n\n tealinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n stulinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n\n for (k, v) in tealinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n tealinkstatus_dict[str(ni)].append(k)\n break\n\n for (k, v) in stulinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n stulinkstatus_dict[str(ni)].append(k)\n break\n\n userinfo[\"teacher\"] = tea_list\n userinfo[\"student\"] = stu_list\n userinfo[\"tealinkstatuslist\"] = tealinkstatus_dict\n userinfo[\"stulinkstatuslist\"] = stulinkstatus_dict\n\n print(\"Success to update status in session\")\n return userinfo\n elif roleid == 2:\n stuname = args['stuname']\n stulinkstatus = args['stulinkstatus']\n print(\"Student status changes to {0}\".format(str(stulinkstatus)))\n redis_store.hset('stulinkstatus_dict:'+classid,\n stuname, stulinkstatus)\n\n stulinkstatus_hash = redis_store.hgetall('stulinkstatus_dict:'+classid)\n stulinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n for (k, v) in stulinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n stulinkstatus_dict[str(ni)].append(k)\n break\n\n sse.publish({\"stulinkstatus\":stulinkstatus_dict},\n type=\"newstulinkstatus\"+classstr,\n channel=\"changed.status\")\n print(stulinkstatus_dict)\n print(\"newstulinkstatus\"+classstr)\n\n tea_list = redis_store.lrange('tea_list:'+classid, 0, -1)\n tealinkstatus_hash = redis_store.hgetall('tealinkstatus_dict:'+classid)\n tealinkstatus_dict = {\"0\": [], \"1\": [], \"2\": [], \"3\": []}\n for (k, v) in tealinkstatus_hash.items():\n for ni in range(4):\n if v == str(ni):\n tealinkstatus_dict[str(ni)].append(k)\n break\n\n userinfo[\"teacher\"] = tea_list\n userinfo[\"tealinkstatuslist\"] = tealinkstatus_dict\n\n print(\"Success to update status in session\")\n return userinfo\n else:\n return \"role id is not existed\"\n else:\n print(\"You have no right to do this\")\n return \"You have no right to do this\"\n except Exception as err:\n print(\"Fail to update status\")\n print(err)\n\n\nclass ChatList(Resource):\n \"\"\"\n # Class ChatList is resource of \"chat_ep\"\n \"\"\"\n def get(self):\n \"\"\"\n # function get response http GET with classroom.html\n \"\"\"\n try:\n dic_chatlist = {}\n nowtime = datetime.datetime.now()\n if ('classid' in session) and (\n 'userid' in session):\n classid = session['classid']\n classname = redis_store.get('classname:'+classid)\n\n dic_chatlist = dict(classname=classname, chatcontent=[])\n\n chatcontent_list = redis_store.lrange(\"chatcontent:\"+classid, 0, -1)\n print(\"Read from redis\")\n print(chatcontent_list)\n for i in range(0,len(chatcontent_list),5):\n dic_chatitem = dict(chatnum=chatcontent_list[i+4],\n username=chatcontent_list[i+3],\n createtime=chatcontent_list[i+2],\n rolename=chatcontent_list[i+1],\n question=chatcontent_list[i+0])\n dic_chatlist[\"chatcontent\"].append(dic_chatitem)\n print(dic_chatlist)\n return dic_chatlist\n except Exception as err:\n print(\"Fail to get info\")\n print(err)\n\n def post(self):\n\n \"\"\"\n # function post response http POST txt to chatlist\n \"\"\"\n try:\n print(\"ready to receive post message\")\n args = parser.parse_args()\n if ('classid' in session) and ('userid' in session):\n classid = session['classid']\n userid = session['userid']\n classstr = session['classstr']\n question = args['txt']\n chatnum = str(int(redis_store.get(\"chatnum:\"+classid))+1)\n username = redis_store.get(\"username:\"+userid)\n roleid = redis_store.get(\"roleid:\"+userid)\n createtime = datetime.datetime.now()\n\n if roleid == '1':\n rolename = \"teacher\"\n elif roleid == '2':\n rolename = \"student\"\n createtimestr = createtime.strftime(\"%Y-%m-%d %H:%M:%S\")\n newmessage = dict(chatnum=chatnum,\n username=username,\n createtime=createtimestr,\n rolename=rolename,\n question=question)\n print(\"Ready to save redis\")\n redis_store.lpush(\"chatcontent:\"+classid, chatnum)\n redis_store.lpush(\"chatcontent:\"+classid, username)\n redis_store.lpush(\"chatcontent:\"+classid, createtimestr)\n redis_store.lpush(\"chatcontent:\"+classid, rolename)\n redis_store.lpush(\"chatcontent:\"+classid, question)\n redis_store.set(\"chatnum:\"+classid, chatnum)\n print(newmessage)\n sse.publish({\"message\":newmessage},\n type=(\"newchatmessage\"+classstr),\n channel=\"changed.chatroom\")\n # print(\"newchatmessage\"+classstr)\n # print(len(\"newchatmessage\"+classstr))\n return newmessage\n else:\n return \"You have no right to do this\"\n except Exception as err:\n print(\"Fail to add chat text\")\n print(err)\n","repo_name":"xiyoumenwebdev/xiyoumen_test","sub_path":"xiyoumenapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26026579679","text":"# referencia: https: // github.com/INF800/Numpy-FFNN/blob/master/FFNN.py\n\nimport numpy as np\nfrom sklearn.datasets import make_classification\n\n\ndef nonlin(x, deriv=False):\n if deriv == True:\n return (x*(1-x))\n return (1/(1+np.exp(-x)))\n\n\nx, y = make_classification(n_samples=100, n_features=3,\n n_informative=3, n_redundant=0, n_classes=2)\n\nnp.random.seed(1)\n# feedforward\nw1 = 2*np.random.random((3, 1)) - 1\nw2 = 2*np.random.random((1, 100)) - 1\n\nfor j in range(60000):\n\n l0 = x\n l1 = nonlin(np.dot(l0, w1))\n l2 = nonlin(np.dot(l1, w2))\n\n # BACKPROPGATION\n l2_error = y - l2\n\n # printing status\n if(j % 10000) == 0:\n print('Error : ' + str(np.mean(np.abs(l2_error))))\n\n # calculte deltas\n l2_delta = l2_error*nonlin(l2, deriv=True)\n l1_error = l2_delta.dot(w2.T)\n l1_delta = l1_error*nonlin(l1, deriv=True)\n\n # update our synapses\n w2 += l1.T.dot(l2_delta)\n w1 += l0.T.dot(l1_delta)\n\nprint('Output after training')\nprint(l2)\n","repo_name":"MarcoRamirezGT/LAB04_IA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72715755666","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\n\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom .models import Product, Movement\n# Create your tests here.\n\n\nclass MovementTests(APITestCase):\n\n def setUp(self):\n self.products = [\n {'name': 'Water', 'price': \"2.50\", 'sku': 'WATER-500'},\n {'name': 'Soda', 'price': \"4.00\", 'sku': 'SODA-350'},\n {'name': 'Sugar', 'price': \"3.00\", 'sku': 'SUGAR-1000'}\n ]\n for values, klass in [(self.products, Product)]:\n for props in values:\n klass.objects.create(**props)\n\n def test_create_mov(self):\n url = reverse('movement-list')\n data = {\n 'product': reverse('product-detail', args=[Product.objects.get(name='Water').id]),\n 'quantity': 5,\n 'kind': Movement.IN\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Movement.objects.count(), 1)\n self.assertEqual(Product.objects.get(name='Water').quantity, 5)\n\n data = {\n 'product': reverse('product-detail', args=[Product.objects.get(name='Water').id]),\n 'quantity': 6,\n 'kind': Movement.OUT\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(Movement.objects.count(), 1)\n self.assertEqual(Product.objects.get(name='Water').quantity, 5)\n","repo_name":"gilvanleal/inventory","sub_path":"stock/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10771603129","text":"'''\r\nTP1\r\n1) Generar un mazo de cartas españolas al azar, se entiende por mazo de cartas a un\r\nconjunto de 48 cartas no ordenadas y almacenarlas en una pila de cartas.\r\n(Pista: investigar el tipo de datos set para obtener las 48 cartas sin repeticiones)\r\n2) A continuación se pide programar el siguiente juego:\r\nRepartir una carta a cada uno de 4 jugadores formando así una mano, gana la mano el\r\njugador que tenía la carta de mayor valor. El premio del ganador de la mano es\r\nacumular las cartas de la mano en su propia pila, es decir, que cada jugador poseerá\r\nuna pila de cartas inicialmente en estado vacío. Repetir este proceso hasta que no\r\nqueden cartas en el mazo. Gana el juego el jugador cuya pila de cartas tenga\r\nacumulado el mayor valor sumando el valor numérico de todas las cartas que se\r\nencuentran en su pila. Si hay dos o más jugadores con igual suma se tendrá en cuenta\r\nla cantidad de manos ganadas para desempatar el juego.\r\n'''\r\nfrom random import randint\r\n\r\n\r\ndef generarCarta():\r\n '''Devuelve una tupla conformada por el valor de carta (1 a 12) y el palo'''\r\n palo = ('O','B','C','E')\r\n return (randint(1,12),palo[randint(0,3)])\r\n\r\n\r\ndef generarMazo():\r\n ''' Devuelve un mazo que es un set con 48 tuplas'''\r\n mazo = set()\r\n while len(mazo) < 48:\r\n mazo.add(generarCarta())\r\n return mazo\r\n\r\n\r\ndef repartirCarta(mazo):\r\n '''saca el ultimo item del set mazo y lo devuelve, es una tupla'''\r\n return mazo.pop()\r\n\r\n\r\ndef generarManos(mazo):\r\n ''' genera una lista con cuatr cartas(tuplas) cada una pertenece a un jugador distinto'''\r\n manos = []\r\n while len(manos) < 4:\r\n manos.append(repartirCarta(mazo))\r\n return manos\r\n\r\n\r\ndef cartaMayor(manos):\r\n '''Devulve indice de jugador que gana'''\r\n cMayor = 0\r\n j = 0 \r\n for i in range(4):\r\n # como no dice que pasa si hay dos valores iguales en la mano gana el primero que se registra\r\n if cMayor < manos[i][0]:\r\n cMayor = manos[i][0]\r\n j = i\r\n return j\r\n\r\n\r\ndef ganadorFinal(jgds,contMG):\r\n sumas = {'1': 0,'2': 0,'3': 0,'4': 0}\r\n for j in jgds:\r\n suma = 0\r\n for t in jgds[j]:\r\n suma += t[0]\r\n sumas[j] = suma\r\n maxi = 0\r\n i = ''\r\n for j in sumas:\r\n if maxi < sumas[j]:\r\n maxi = sumas[j]\r\n i = j\r\n elif maxi == sumas[j]:\r\n if contMG[i] < contMG[j]:\r\n i = j \r\n # print(sumas)\r\n return i\r\n\r\n\r\ndef juegoCartas():\r\n jgds = {'1': [],'2': [],'3': [],'4': []}\r\n mazo = generarMazo()\r\n contMG = {'1': 0,'2': 0,'3': 0,'4': 0}\r\n while len(mazo) > 0:\r\n manos = generarManos(mazo)\r\n ganaMano = str(cartaMayor(manos)+1)\r\n contMG[ganaMano] += 1 \r\n jgds[ganaMano].extend(manos)\r\n print('El ganador es el jugador {}'.format(ganadorFinal(jgds, contMG)))\r\n # print(contMG)\r\n\r\n\r\njuegoCartas()\r\n\r\n","repo_name":"CdoubleO/Python_Practica","sub_path":"TPs/cartas.py","file_name":"cartas.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"74926909584","text":"\"\"\"webprogramming URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom cricketclubinformation.views import*\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', home, name = 'home'),\n path('player/registration/form/', player_registration, name = \"player_registration\"),\n path('contact/form/', contact, name = \"contact\"),\n path('club/registration/form/', club_registration, name = 'club_registraoin'),\n path('player/performance/form/', player_performance, name = 'player_performance'),\n path('match/information/form/', match_information, name = 'match_information' ),\n path('team/information/form/', team_information, name = 'team_information'),\n\n]\n","repo_name":"alhasib/webprogramming","sub_path":"webprogramming/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"23808268490","text":"import asyncio\nimport itertools\nimport logging\nimport queue\nimport sys\nimport threading\nimport time\nimport traceback\nfrom collections import deque\nfrom typing import Generic, List, Optional, Union\n\nimport av\nfrom aiortc import MediaStreamTrack\nfrom aiortc.mediastreams import MediaStreamError\n\nfrom .models import AudioProcessorT, FrameT, ProcessorT, VideoProcessorT\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nclass MediaProcessTrack(MediaStreamTrack, Generic[ProcessorT, FrameT]):\n def __init__(self, track: MediaStreamTrack, processor: ProcessorT):\n super().__init__() # don't forget this!\n self.track = track\n self.processor: ProcessorT = processor\n\n @self.track.on(\"ended\")\n def on_input_track_ended():\n logger.debug(\"Input track %s ended. Stop self %s\", self.track, self)\n self.stop()\n\n async def recv(self):\n if self.readyState != \"live\":\n raise MediaStreamError\n\n frame = await self.track.recv()\n\n new_frame = self.processor.recv(frame)\n new_frame.pts = frame.pts\n new_frame.time_base = frame.time_base\n\n return new_frame\n\n def stop(self):\n super().stop()\n\n if hasattr(self.processor, \"on_ended\"):\n self.processor.on_ended()\n\n\nclass VideoProcessTrack(MediaProcessTrack[VideoProcessorT, av.VideoFrame]):\n kind = \"video\"\n processor: VideoProcessorT\n\n\nclass AudioProcessTrack(MediaProcessTrack[AudioProcessorT, av.AudioFrame]):\n kind = \"audio\"\n processor: AudioProcessorT\n\n\n__SENTINEL__ = \"__SENTINEL__\"\n\n# See https://stackoverflow.com/a/42007659\nmedia_processing_thread_id_generator = itertools.count()\n\n\nclass AsyncMediaProcessTrack(MediaStreamTrack, Generic[ProcessorT, FrameT]):\n def __init__(\n self,\n track: MediaStreamTrack,\n processor: ProcessorT,\n stop_timeout: Optional[float] = None,\n ):\n super().__init__() # don't forget this!\n\n self.track = track\n self.processor: ProcessorT = processor\n\n self._last_out_frame: Union[FrameT, None] = None\n\n self.stop_timeout = stop_timeout\n\n self._thread = None\n\n def _start(self):\n if self._thread:\n return\n\n self._in_queue: queue.Queue = queue.Queue()\n self._out_lock = threading.Lock()\n self._out_deque: deque = deque([])\n\n self._thread = threading.Thread(\n target=self._run_worker_thread,\n name=f\"async_media_processor_{next(media_processing_thread_id_generator)}\",\n daemon=True,\n )\n self._thread.start()\n\n @self.track.on(\"ended\")\n def on_input_track_ended():\n logger.debug(\"Input track %s ended. Stop self %s\", self.track, self)\n self.stop()\n\n def _run_worker_thread(self):\n try:\n self._worker_thread()\n except Exception:\n logger.error(\"Error occurred in the WebRTC thread:\")\n\n exc_type, exc_value, exc_traceback = sys.exc_info()\n for tb in traceback.format_exception(exc_type, exc_value, exc_traceback):\n for tbline in tb.rstrip().splitlines():\n logger.error(tbline.rstrip())\n\n async def _fallback_recv_queued(self, frames: List[FrameT]) -> FrameT:\n \"\"\"\n Used as a fallback when the processor does not have its own `recv_queued`.\n \"\"\"\n if len(frames) > 1:\n logger.warning(\n \"Some frames have been dropped. \"\n \"`recv_queued` is recommended to use instead.\"\n )\n if self.processor.recv:\n return [self.processor.recv(frames[-1])]\n\n return frames[-1]\n\n def _worker_thread(self):\n loop = asyncio.new_event_loop()\n\n tasks: List[asyncio.Task] = []\n\n while True:\n # Read frames from the queue\n item = self._in_queue.get()\n if item == __SENTINEL__:\n break\n\n queued_frames = [item]\n\n stop_requested = False\n while not self._in_queue.empty():\n item = self._in_queue.get_nowait()\n if item == __SENTINEL__:\n stop_requested = True\n break\n else:\n queued_frames.append(item)\n if stop_requested:\n break\n\n if len(queued_frames) == 0:\n raise Exception(\"Unexpectedly, queued frames do not exist\")\n\n # Set up a task, providing the frames.\n if hasattr(self.processor, \"recv_queued\"):\n coro = self.processor.recv_queued(queued_frames)\n else:\n coro = self._fallback_recv_queued(queued_frames)\n\n task = loop.create_task(coro=coro)\n tasks.append(task)\n\n # NOTE: If the execution time of recv_queued() increases\n # with the length of the input frames,\n # it increases exponentially over the calls.\n # Then, the execution time has to be monitored.\n start_time = time.monotonic()\n done, not_done = loop.run_until_complete(\n asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)\n )\n elapsed_time = time.monotonic() - start_time\n\n if (\n elapsed_time > 10\n ): # No reason for 10 seconds... It's an ad-hoc decision.\n raise Exception(\n \"recv_queued() or recv() is taking too long to execute, \"\n f\"{elapsed_time}s.\"\n )\n\n if len(done) > 1:\n raise Exception(\"Unexpectedly multiple tasks have finished\")\n\n done_idx = tasks.index(task)\n old_tasks = tasks[:done_idx]\n for old_task in old_tasks:\n logger.info(\"Cancel an old task %s\", task)\n old_task.cancel()\n tasks = [t for t in tasks if not t.done()]\n\n finished = done.pop()\n new_frames = finished.result()\n\n with self._out_lock:\n if len(self._out_deque) > 1:\n logger.warning(\n \"Not all the queued frames have been consumed, \"\n \"which means the processing and consuming threads \"\n \"seem not to be synchronized.\"\n )\n firstitem = self._out_deque.popleft()\n self._out_deque.clear()\n self._out_deque.append(firstitem)\n\n self._out_deque.extend(new_frames)\n\n def stop(self):\n super().stop()\n\n self.track.stop()\n self._in_queue.put(__SENTINEL__)\n self._thread.join(self.stop_timeout)\n\n if hasattr(self.processor, \"on_ended\"):\n self.processor.on_ended()\n\n async def recv(self):\n if self.readyState != \"live\":\n raise MediaStreamError\n\n self._start()\n\n frame = await self.track.recv()\n self._in_queue.put(frame)\n\n new_frame = None\n with self._out_lock:\n if len(self._out_deque) > 0:\n new_frame = self._out_deque.popleft()\n\n if new_frame is None:\n new_frame = self._last_out_frame\n\n if new_frame:\n self._last_out_frame = new_frame\n new_frame.pts = frame.pts\n new_frame.time_base = frame.time_base\n\n return new_frame\n\n return frame\n\n\nclass AsyncVideoProcessTrack(AsyncMediaProcessTrack[VideoProcessorT, av.VideoFrame]):\n kind = \"video\"\n processor: VideoProcessorT\n\n\nclass AsyncAudioProcessTrack(AsyncMediaProcessTrack[AudioProcessorT, av.AudioFrame]):\n kind = \"audio\"\n processor: AudioProcessorT\n","repo_name":"whitphx/streamlit-webrtc","sub_path":"streamlit_webrtc/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":7778,"program_lang":"python","lang":"en","doc_type":"code","stars":978,"dataset":"github-code","pt":"48"}
+{"seq_id":"7433789346","text":"import sys\nfrom collections import defaultdict\nfrom collections import Counter\nfrom collections import deque\n\nclass Solution:\n\n \n def allCellsDistOrder(self, R: int, C: int, r0: int, c0: int):\n res = []\n for i in range(R):\n for j in range(C):\n res.append([i, j])\n\n ret = [[i, j] for i in range(R) for j in range(C)]\n\n print(res, ret)\n #res.sort(key=lambda x: abs(x[0] - r0) + abs(x[1] - c0))\n\n return res\n\n\n def allCellsDistOrder_1(self, R: int, C: int, r0: int, c0: int):\n res = []\n visit = [[0] * C for _ in range(R)]# [[0, 0] for _ in range(R) for _ in range(C)]\n \n #print(visit)\n queue = deque()\n\n queue.append([r0, c0])\n\n lens = R * C\n i = 0\n while i < lens and queue:\n #print(queue)\n\n point = queue.popleft()\n \n tr = point[0]\n tc = point[1]\n\n if tr < 0 or tr >= R:\n continue\n\n if tc < 0 or tc >= C:\n continue\n \n #print(point, visit[tr][tc], visit)\n if visit[tr][tc] == 0:\n res.append(point)\n #print(res)\n visit[tr][tc] = 1\n\n \n #queue += [[tr + 1, tc], [tr - 1, tc], [tr, tc + 1], [tr, tc - 1]]\n queue.append([tr + 1, tc])\n queue.append([tr - 1, tc])\n queue.append([tr, tc + 1])\n queue.append([tr, tc - 1])\n\n i += 1\n \n\n\n return res\n \n \n \n\n\nif __name__ == \"__main__\":\n solution = Solution()\n nums1 = 2\n m = 2\n\n nums2 = 0\n n = 1\n\n result = solution.allCellsDistOrder_1(nums1, m, nums2, n)\n\n #print(solution.ls)\n\n print( result)","repo_name":"geniuscynic/leetcode","sub_path":"python/1030. 距离顺序排列矩阵单元格.py","file_name":"1030. 距离顺序排列矩阵单元格.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41725621437","text":"import random\nfrom typing import List\n\nimport torch\n\n\nclass AddNoise:\n\n def __init__(self,\n noise_samples: List[torch.Tensor],\n noise_class_indices: List[int],\n p: float = 0.8,\n max_noise_level: float = 1.0):\n \"\"\"Randomly add noise to a data sample, where noise is only added if the input sample is not\n already a noise signal itself. Data is clipped at (-1, 1) when noise is added. Use together\n with `torch_mate.data.utils.LabelDependentTransform` to apply to any dataset.\n\n Args:\n noise_samples (List[torch.Tensor]): List of noisy data to add to input data\n noise_class_indices (List[int]): Indices of classes that represent noise classes\n p (float, optional): Probability of applying noise. Defaults to 0.8.\n max_noise_level (float, optional): Maximum noise level multiplier. Defaults to 1.0.\n \"\"\"\n self.noise_samples = noise_samples\n self.noise_class_indices = noise_class_indices\n\n self.p = p\n self.max_noise_level = max_noise_level\n\n def __call__(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n noise_level = random.random() * self.max_noise_level\n\n # https://github.com/castorini/honk/blob/c3aae750c428520ba340961bddd526f9c999bb93/utils/model.py#L301\n if not y in self.noise_class_indices:\n if random.random() < self.p:\n bg_noise = random.choice(self.noise_samples)\n\n return torch.clip(noise_level * bg_noise + x, -1, 1)\n else:\n return x\n else:\n return torch.clip(noise_level * x, -1, 1)\n","repo_name":"V0XNIHILI/torch-mate","sub_path":"src/torch_mate/data/transforms/AddNoise.py","file_name":"AddNoise.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"29226418009","text":"\"\"\"\nTake the number 192 and multiply it by each of 1, 2, and 3:\n\n 192 × 1 = 192\n 192 × 2 = 384\n 192 × 3 = 576\n\nBy concatenating each product we get the 1 to 9 pandigital,\n192384576. We will call 192384576 the concatenated product of\n192 and (1,2,3)\n\nThe same can be achieved by starting with 9 and multiplying by\n1, 2, 3, 4, and 5, giving the pandigital, 918273645,\nwhich is the concatenated product of 9 and (1,2,3,4,5).\n\nWhat is the largest 1 to 9 pandigital 9-digit number that can be\nformed as the concatenated product of an integer with (1,2, ... , n)\nwhere n > 1?\n\"\"\"\n\ndef isPandigital(number):\n\n # must be divisible by 9\n # (sum of all digits should be 45)\n if number % 9 != 0 : return False\n\n digits = [0]*10; digits[0] = 1\n\n while number:\n digit = number % 10; number //= 10\n if digits[digit] : return False # early double check\n digits[digit] = 1\n\n if all(digits) : return True\n\n return False\n\ndef concatenate_numbers(*numbers):\n str_buffer = \"\"\n for number in numbers: str_buffer += (str(number))\n return int(str_buffer)\n\n# try a number\nupper_limit = 987654321\n\n# 9 already results in 91 .......\n# next hihger number must start with at least 92\nvalid_pandigitals = []\n\nfor number in range(92,9876):\n\n concatenated_sum = 0\n\n # just try (1,2, ... , n)\n # stop when new n multiplication results in a number > upper limit\n # check result for valid pandigital\n for n in range(1,10):\n\n tmp = concatenate_numbers(concatenated_sum, number*n)\n\n if tmp > upper_limit :\n break\n else:\n concatenated_sum = tmp\n\n if isPandigital(concatenated_sum) :\n valid_pandigitals.append(concatenated_sum)\n\n\n\nprint(max(valid_pandigitals))\n\n\n","repo_name":"mccornet/project_euler_2014","sub_path":"problem_038.py","file_name":"problem_038.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73872957264","text":"import sys\nimport itertools\nimport copy\nimport astar\nimport json\n\n\ndef is_possible(floors):\n for f in floors:\n reactors = set()\n chips = set()\n for item in f:\n v = item[0]\n t = item[1]\n if t == 'G':\n reactors.add(v)\n else:\n chips.add(v)\n if len(reactors) > 0 and len(chips.difference(reactors)) > 0:\n return False\n return True\n\n\ndef hash(v):\n for i in range(len(v['floors'])):\n v['floors'][i] = sorted(v['floors'][i])\n return json.dumps(v, sort_keys=True)\n\n\ndef unhash(v):\n return json.loads(v)\n\n\ndef get_neighbors(v):\n v = unhash(v)\n elevator = v['e']\n floors = v['floors']\n for i in range(2, 0, -1):\n selections = itertools.permutations(floors[elevator], i)\n\n for selection in selections:\n if elevator > 0:\n f = copy.deepcopy(floors)\n f[elevator] = [item for item in floors[elevator] if item not in selection]\n f[elevator-1].extend(selection)\n if is_possible(f):\n yield hash({'e': elevator-1, 'floors': f})\n if elevator < 3:\n f = copy.deepcopy(floors)\n f[elevator] = [item for item in floors[elevator] if item not in selection]\n f[elevator+1].extend(selection)\n if is_possible(f):\n yield hash({'e': elevator+1, 'floors': f})\n\n\ndef heuristic_cost(a, goal):\n a = unhash(a)\n return 1\n #return len(a['floors'][0]) + len(a['floors'][1])*2 + len(a['floors'][2])*4 + len(a['floors'][3]*8)\n\n\ndef distance(a, b):\n return 1\n\n\ndef run1(inp): \n #start = {'e': 0, 'floors': [['HM', 'LM'], ['HG'], ['LG'], []]}\n #end = {'e': 3, 'floors': [[], [], [], ['HM', 'LM', 'HG', 'LG']]}\n start = {'e': 0, 'floors': [['PG', 'SG'], ['PM', 'SM'], ['RG', 'RM', 'UG', 'UM'], []]}\n end = {'e': 3, 'floors': [[], [], [], ['PG', 'SG', 'PM', 'SM', 'RG', 'RM', 'UG', 'UM']]}\n #start = {'e': 0, 'floors': [['EG', 'EM', 'DG', 'DM', 'TG', 'TM', 'PG', 'SG'], ['PM', 'SM'], ['RG', 'RM', 'UG', 'UM'], []]}\n #end = {'e': 3, 'floors': [[], [], [], ['EG', 'EM', 'DG', 'DM', 'TG', 'TM', 'PG', 'SG', 'PM', 'SM', 'RG', 'RM', 'UG', 'UM']]}\n \n v = list(astar.find_path(hash(start), hash(end), get_neighbors, False, heuristic_cost, distance))\n for item in v:\n print(item)\n return len(v)-1\n\n\nif __name__==\"__main__\":\n with open(\"day11.txt\", \"r\") as f:\n inp = f.readlines()\n print(run1(inp))","repo_name":"citiral/aoc","sub_path":"2016/day11-2.py","file_name":"day11-2.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"32429616208","text":"import torch\nimport csv\nimport time\nfrom sklearn.metrics import precision_recall_fscore_support, confusion_matrix\n\ndef test(testloader, model, classes, load_model_path, out_path, multi_test_size, device):\n # model load\n if device == \"cuda\":\n model.load_state_dict(torch.load(load_model_path))\n else : \n model.load_state_dict(torch.load(load_model_path, map_location=torch.device(device)))\n\n model.eval()\n\n correct = 0\n total = 0\n correct_pred = {classname: 0 for classname in classes}\n total_pred = {classname: 0 for classname in classes}\n pred = []\n y_true = []\n y_pred = []\n outs = []\n multi_test_len = len(multi_test_size)\n\n start = time.time()\n with torch.no_grad():\n for i, (data, labels, image_name) in enumerate(testloader):\n if device == \"cuda\":\n data = data.cuda()\n outputs = model(data)\n\n outs.append(outputs)\n\n if (i+1) % len(multi_test_size) == 0 :\n tmp = 0\n for k in range(multi_test_len):\n tmp = tmp + outs[k]\n outs = []\n\n _, predicted = torch.max(tmp.data, 1)\n\n y_pred.append(predicted.cpu().tolist())\n y_true.append(labels.tolist())\n\n total += labels.size(0)\n correct += (predicted.cpu() == labels).sum().item()\n pred.append([image_name[0], predicted.cpu().item()])\n\n for label, prediction in zip(labels, predicted.cpu()):\n if label == prediction:\n correct_pred[classes[label]] += 1\n total_pred[classes[label]] += 1\n\n print(\"inference time :\", time.time() - start)\n precision, recall, fscore, support = precision_recall_fscore_support(y_true, y_pred, average=None)\n\n # precision, recall, f1score 출력\n print('precision: \\t{}'.format(precision))\n print('recall: \\t{}'.format(recall))\n print('fscore: \\t{}'.format(fscore))\n print('support: \\t{}'.format(support))\n\n # 결과 출력\n print('Accuracy of the all test images: %d %%' % (100 * correct / total))\n # for classname, correct_count in correct_pred.items():\n # accuracy = 100 * float(correct_count) / total_pred[classname]\n # print(\"Accuracy for class {:5s} is: {:.1f} %\".format(classname, accuracy))\n \n # confusion matrix\n cf = confusion_matrix(y_true, y_pred)\n print(\"Confusion Matrix\")\n print(cf)\n\n # prediction csv파일로 저장\n f = open(out_path, 'w', newline='')\n wr = csv.writer(f)\n wr.writerows(pred)\n f.close()\n print(out_path, 'file saved!!')\n","repo_name":"JOOCHANN/image_classification","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73329761107","text":"import logging\nimport odoo\nfrom odoo import SUPERUSER_ID\nfrom odoo.addons.connector.connector import ConnectorUnit\n# from odoo.addons.queue_job.exception import FailedJobError\nfrom odoo.addons.connector.unit.synchronizer import Importer\n\nfrom ..backend import exchange_2010\n\nfrom contextlib import closing, contextmanager\n\n_logger = logging.getLogger(__name__)\n\nRETRY_ON_ADVISORY_LOCK = 1 # seconds\nRETRY_WHEN_CONCURRENT_DETECTED = 1 # seconds\n\n\nRETRY_ON_ADVISORY_LOCK = 1 # seconds\nRETRY_WHEN_CONCURRENT_DETECTED = 1 # seconds\n\n\nclass ExchangeImporter(Importer):\n \"\"\" Exchange Importer \"\"\"\n\n # Name of the field which contains the ID\n _id_field = None # set in sub-classes\n\n def run(self, *args, **kwargs):\n \"\"\" The connectors have to implement the _run method \"\"\"\n return self._run(*args, **kwargs)\n\n def __init__(self, environment):\n \"\"\"\n :param environment: current environment (backend, session, ...)\n :type environment: :py:class:`connector.connector.Environment`\n \"\"\"\n super(ExchangeImporter, self).__init__(environment)\n self.external_id = None\n self.external_record = None\n\n def external_id_from_record(self, record):\n assert self._id_field, \"_id_field must be defined\"\n return record[self._id_field]\n\n def _before_import(self):\n \"\"\" Hook called before the import, when we have the external\n data\"\"\"\n\n def _import_dependency(self, subrecord, binding_model,\n importer_class=None, always=False,\n **kwargs):\n \"\"\" Import a dependency.\n\n The importer class is a class or subclass of\n :class:`ExchangeImporter`. A specific class can be defined.\n\n :param subrecord: subrecord to import\n :param binding_model: name of the binding model for the relation\n :type binding_model: str | unicode\n :param importer_cls: :class:`odoo.addons.connector.\\\n connector.ConnectorUnit`\n class or parent class to use for the export.\n By default: ExchangeImporter\n :type importer_cls: :class:`odoo.addons.connector.\\\n connector.MetaConnectorUnit`\n :param always: if True, the record is updated even if it already\n exists, note that it is still skipped if it has\n not been modified on the backend since the last\n update. When False, it will import it only when\n it does not yet exist.\n :type always: boolean\n :param **kwargs: additional args are propagated to the importer\n \"\"\"\n if importer_class is None:\n importer_class = ExchangeImporter\n importer = self.unit_for(importer_class, model=binding_model)\n external_id = importer.external_id_from_record(subrecord)\n binder = self.binder_for(binding_model)\n if always or not binder.to_openerp(external_id):\n importer.run(subrecord, **kwargs)\n\n def _import_dependencies(self):\n \"\"\" Import the dependencies for the record\n\n Import of dependencies can be done manually or by calling\n :meth:`_import_dependency` for each dependency.\n \"\"\"\n return\n\n def _validate_data(self, data):\n \"\"\" Check if the values to import are correct\n\n Pro-actively check before the ``_create`` or\n ``_update`` if some fields are missing or invalid.\n\n Raise `InvalidDataError`\n \"\"\"\n return\n\n def _must_skip(self):\n \"\"\" Hook called right after we read the data from the backend.\n\n If the method returns a message giving a reason for the\n skipping, the import will be interrupted and the message\n recorded in the job (if the import is called directly by the\n job, not by dependencies).\n\n If it returns None, the import will continue normally.\n\n :returns: None | str | unicode\n \"\"\"\n return\n\n def _get_binding(self):\n \"\"\"Return the binding id from the external id\"\"\"\n return self.binder.to_openerp(self.external_id)\n\n def _skip_create(self, map_record, values):\n \"\"\" Defines if a create import should be skipped\n\n A reason can be returned in string\n \"\"\"\n return\n\n def _create_data(self, map_record, **kwargs):\n return map_record.values(for_create=True, **kwargs)\n\n def _create_context_keys(self, keys=None):\n if keys and 'connector_no_export' in keys:\n context_keys = dict(**keys or {})\n else:\n context_keys = dict(\n connector_no_export=True,\n **keys or {}\n )\n if self.env.user.id == SUPERUSER_ID:\n context_keys['mail_create_nosubscribe'] = True\n\n return context_keys\n\n def _create(self, data, context_keys=None):\n \"\"\" Create the Odoo record \"\"\"\n # special check on data before import\n self._validate_data(data)\n context_keys = self._create_context_keys(keys=context_keys)\n binding = self.model.with_context(**context_keys).create(data)\n\n _logger.debug('%s %d created from %s %s',\n self.model._name, binding.id,\n self.backend_record._name, self.external_id)\n return binding\n\n def _skip_update(self, map_record, values):\n \"\"\" Defines if an update import should be skipped\n\n A reason can be returned in string\n \"\"\"\n return\n\n def _update_data(self, map_record, **kwargs):\n return map_record.values(**kwargs)\n\n def _update_context_keys(self, keys=None):\n context_keys = dict(\n connector_no_export=True,\n __deduplicate_no_name_search=True,\n __changeset_rules_source_model=self.backend_record._name,\n __changeset_rules_source_id=self.backend_record.id)\n\n if keys:\n context_keys.update(keys)\n\n if self.env.user.id == SUPERUSER_ID:\n context_keys['tracking_disable'] = True\n\n return context_keys\n\n def _update(self, binding, data, context_keys=None):\n \"\"\" Update an Odoo record \"\"\"\n # special check on data before import\n self._validate_data(data)\n\n context_keys = self._update_context_keys(keys=context_keys)\n binding.with_context(**context_keys).write(data)\n _logger.debug('%s %d updated from %s %s',\n self.model._name, binding.id,\n self.backend_record._name, self.external_id)\n return\n\n def _after_import(self, binding):\n \"\"\" Hook called at the end of the import \"\"\"\n return\n\n @contextmanager\n def do_in_new_connector_env(self, model_name=None):\n \"\"\" Context manager that yields a new connector environment\n\n Using a new Odoo Environment thus a new PG transaction.\n\n This can be used to make a preemptive check in a new transaction,\n for instance to see if another transaction already made the work.\n \"\"\"\n with odoo.api.Environment.manage():\n registry = odoo.modules.registry.RegistryManager.get(\n self.env.cr.dbname\n )\n with closing(registry.cursor()) as cr:\n try:\n new_env = odoo.api.Environment(cr, self.env.uid,\n self.env.context)\n connector_env = self.connector_env.create_environment(\n self.backend_record.with_env(new_env),\n self.env,\n model_name or self.model._name,\n connector_env=self.connector_env\n )\n yield connector_env\n except Exception as exp:\n cr.rollback()\n raise exp\n else:\n cr.commit()\n\n def _run(self, item_id, user):\n \"\"\" Beginning of the synchronization\n\n The first thing we do is to try to acquire an advisory lock\n on Postgresql. If it can't be acquired it means that another job\n does the same import at the same moment.\n The goal is to prevent 2 jobs to create the same binding because\n they each job is not aware of the other binding.\n It happens easily when 2 jobs import the same dependencies (such\n as partner categories for an import of partners).\n\n :param item_id: item_id\n \"\"\"\n self.openerp_user = user\n self.external_id = item_id\n # lock_name = 'import({}, {}, {}, {})'.format(\n # self.backend_record._name,\n # self.backend_record.id,\n # self.model._name,\n # self.external_id,\n # )\n # Keep a lock on this import until the transaction is committed\n # self.advisory_lock_or_retry(lock_name,\n # retry_seconds=RETRY_ON_ADVISORY_LOCK)\n\n skip = self._must_skip()\n if skip:\n return skip\n\n self._before_import()\n\n # import the missing linked resources\n # self._import_dependencies()\n\n contact_id = self.external_id\n\n data = self._map_data()\n data.update(user_id=self.openerp_user.id,\n backend_id=self.backend_record.id)\n\n # try to find a exchange.res.partner with the same\n # Id/user_id/backend_id\n # if found, update it\n # otherwise, create it\n backend = self.backend_record\n args = [('backend_id', '=', backend.id),\n ('user_id', '=', self.openerp_user.id),\n ('external_id', '=', contact_id)]\n exchange_partners = self.env['exchange.res.partner'].search(args)\n\n partners = self.env['res.partner']\n if data.get('company_name'):\n partners = self.env['res.partner'].search(\n [('name', '=', data['company_name'])])\n del data['company_name']\n\n if not exchange_partners:\n GENERIC = self.env.ref('connector_exchange.res_partner_GENERIC').id\n _logger.debug('does not exist --> CREATE')\n data['active'] = False\n binding = exchange_partners._create(data)\n write_dict = {\n 'active': True,\n 'parent_id': partners and partners[0].id or GENERIC\n }\n binding_rs = exchange_partners.browse(binding)\n self._update(binding_rs, write_dict)\n # self.move_contact(contact_id)\n else:\n # if not self.external_record:\n # _logger.debug('deleted in Exchange')\n # # self.binding.openerp_id.with_context(\n # # connector_no_export=True).unlink()\n # else:\n _logger.debug('exists --> UPDATE')\n binding = exchange_partners[0]\n self._update(binding, data)\n\n def _map_data(self):\n raise NotImplementedError('Must be implemented in subclasses')\n\n # def move_contact(self, contact_id):\n # ews_service = self.backend_adapter.ews\n # ews_service.get_root_folder()\n # contact_folder = ews_service.root_folder.FindFolderByDisplayName(\n # \"Contacts\",\n # types=[FolderClass.Contacts])\n # if contact_folder:\n # contact_folder = contact_folder[0]\n # ews_service.MoveItems(contact_folder.Id, [contact_id])\n # else:\n # raise FailedJobError(\n # _('Unable to find folder \"Contacts\" in Exchange')\n # )\n\n\ndef add_checkpoint(env, model_name, record_id,\n backend_model_name, backend_id):\n checkpoint_model = env['connector.checkpoint']\n return checkpoint_model.create_from_name(model_name, record_id,\n backend_model_name, backend_id)\n\n\n@exchange_2010\nclass AddCheckpoint(ConnectorUnit):\n \"\"\" Add a connector.checkpoint on the underlying model\n (not the exchange.* but the _inherits'ed model) \"\"\"\n\n _model_name = ['exchange.res.partner']\n\n def run(self, openerp_binding_id):\n binding = self.model.browse(openerp_binding_id)\n record = binding.openerp_id\n add_checkpoint(self.env,\n record._model._name,\n record.id,\n self.backend_record.id)\n","repo_name":"camptocamp/connector-exchange","sub_path":"connector_exchange/unit/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":12451,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"}
+{"seq_id":"16173835914","text":"import os\nimport sys\nimport time\n\nimport umsgpack\n\nfrom uwsgi_asgi.uwsgi_asgi import channel_layer, get_pipe_name\n\nsys.path.append(os.getcwd())\ntry:\n import uwsgi\n log = uwsgi.log\nexcept ImportError:\n log = print\n\n\nclass LayerWrapperWriter:\n def __init__(self):\n self._channels = {}\n # open pipe to get new channels\n self.name = get_pipe_name('reader_mule')\n if os.path.exists(self.name):\n os.remove(self.name)\n os.mkfifo(self.name)\n self.pipeinfd = os.open(self.name, os.O_RDONLY | os.O_NONBLOCK)\n self.pipein = os.fdopen(self.pipeinfd, 'rb')\n\n def close(self):\n os.close(self.pipeinfd)\n if os.path.exists(self.name):\n os.remove(self.name)\n\n def read(self):\n # uwsgi.wait_fd_read(self.pipeinfd)\n # uwsgi.suspend()\n # if self.pipeinfd == uwsgi.ready_fd():\n msgdata = self.pipein.read()\n if msgdata:\n chname = umsgpack.unpackb(msgdata)\n if chname.startswith('-'):\n print('removing channel from reader {} {}'.format(chname, len(self.channels)))\n chname = chname[1:]\n try:\n fd = self._channels.pop(chname)\n os.close(fd)\n except KeyError:\n pass\n else:\n self.remove(chname)\n\n def send(self, chname, message):\n msgdata = umsgpack.packb(message)\n try:\n os.write(self._channels[chname], msgdata)\n except BrokenPipeError:\n self.remove(chname)\n\n def remove(self, chname):\n if chname not in self._channels:\n if not os.path.exists(get_pipe_name(chname)):\n print('pipe does not exists!! {}'.format(get_pipe_name(chname)))\n self._channels[chname] = os.open(get_pipe_name(chname), os.O_WRONLY)\n print('new channel {} {}'.format(chname, len(self.channels)))\n else:\n print('got channel name already in dict {}'.format(chname))\n\n @property\n def channels(self):\n return self._channels.keys()\n\n\ndef reader():\n \"\"\"\n todo: run this should run in a uwsgi programmed mule and send the received messages to the thread using named pipes\n pipe will be named using the reply_channel name eg '/tmp/{}'.format(reply_channel)\n \"\"\"\n layer_wrapper = LayerWrapperWriter()\n while True:\n layer_wrapper.read()\n channel, message = channel_layer.receive(layer_wrapper.channels, block=False)\n if channel:\n # Deal with the message\n try:\n # unknown_message_keys = set(message.keys()) - {\"bytes\", \"text\", \"close\"}\n # if unknown_message_keys:\n # raise ValueError(\n # \"Got invalid WebSocket reply message on %s - contains unknown keys %s\" % (\n # channel,\n # unknown_message_keys,\n # )\n # )\n # print('got message from channel layer {} {} {}'.format(type(message), channel, message))\n try:\n layer_wrapper.send(channel, message)\n # wsipc = ipc.get(message['reply_channel'], IPC(message['reply_channel'], reader=False))\n # wsipc.send_message(message)\n except (ConnectionRefusedError, FileNotFoundError):\n pass # ws closed, ignore message\n except Exception as e:\n log(\"HTTP/WS send decode error: %s\" % e)\n raise\n else:\n # print(len(layer_wrapper.channels), end='', flush=True)\n time.sleep(0.05)\n uwsgi.log('finished reader mule!!!')\n\nif __name__ == '__main__':\n reader()\n","repo_name":"tovmeod/uwsgi-asgi","sub_path":"reader_mule.py","file_name":"reader_mule.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"}
+{"seq_id":"26158247192","text":"\"\"\"\nRead dsi config. (config.yml or dsi_config.yml)\n\"\"\"\n\nimport os\nimport yaml\n\n\ndef find_config_file():\n # Highest priority first:\n search_paths = [\n os.path.join(os.path.dirname(os.path.dirname(__file__)), \"config.yml\"),\n os.path.expanduser(\"~/.dsi_config.yml\")\n ]\n for path in search_paths:\n if os.path.exists(path):\n return path\n\n raise IOError(\"Did not find config.yml in repo root nor ~/.dsi_config.yml. \"\n \"Please see /example_config.yml for a template.\")\n\n\ndef read_config():\n with open(find_config_file()) as config_file:\n return yaml.load(config_file)\n","repo_name":"mdcallag/dsi","sub_path":"test_lib/dsi_config.py","file_name":"dsi_config.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"31872103017","text":"from tabulate import tabulate\nstudents = [\n {\"sNumber\": 101, \"name\": \"Ali\", \"surname\": \"Ak\", \"phone\": \"12345\",\"wallet\":1000},\n {\"sNumber\": 102, \"name\": \"Ayşe\", \"surname\": \"Al\", \"phone\": \"99999\",\"wallet\":500},\n {\"sNumber\": 103, \"name\": \"Tina\", \"surname\": \"Schwarz\", \"phone\": \"1312345\",\"wallet\":1000},\n {\"sNumber\": 104, \"name\": \"Hans\", \"surname\": \"Wolter\", \"phone\": \"993199\",\"wallet\":500},\n]\n\n\n\n\ndef saveStudent(sNumber, name, surname, sPhone,wallet):\n sNumberlist=[]\n for i in students:\n sNumberlist.append(i[\"sNumber\"])\n\n sNumberlist.sort()\n if sNumber in sNumberlist:\n print(f\"there is a student with student number{sNumber}\")\n print(f\"student could not added to Datebase\")\n print(f\"last student number is:{sNumberlist[-1]}\")\n else:\n students.append({\"sNumber\":sNumber,\"name\":name,\n \"surname\":surname,\"phone\":sPhone,\"wallet\":wallet})\n \n \n \n # students.append({\"sNumber\": sNumber, \"name\": name,\n # \"surname\": surname, \"phone\": sPhone, \"wallet\":wallet})\n\n\ndef listStudents():\n print(tabulate(students,headers=\"keys\"))\n\n\ndef addMoneyToWallet(sNumber, amount):\n for i in students:\n if i[\"sNumber\"] == sNumber:\n i[\"wallet\"]+=amount\n\n\n\n\nprint(\"\\n\")\nprint(\"*\".center(50, \"-\"))\nprint(\"Welcome to our School Database\".center(50, \"-\"))\n\nwhile True:\n print(\"To see student list, type '1': \")\n print(\"To add new student, type '2': \")\n print(\"To add balance to a student wallet type '3': \")\n print(\"To quit, type 'q': \")\n choice = input(\"Type your choice : \")\n if choice == \"q\":\n break\n elif choice == \"1\":\n listStudents()\n elif choice == \"2\":\n sNumber = int(input(\"Student Number : \"))\n name = input(\"Student Name : \")\n surname = input(\"Student Surname: \")\n sPhone = input(\"Student Phone : \")\n wallet = int(input(\"wallet balence:\"))\n saveStudent(sNumber, name, surname, sPhone, wallet)\n elif choice == \"3\":\n sNumber = int(input(\"Student Number :\"))\n amount =int(input(\"Balence to add :\"))\n addMoneyToWallet(sNumber,amount)\n else:\n continue\n\n print(\"\\n \\n \") # new line","repo_name":"python1818/python_principles","sub_path":"school_D.B2..py","file_name":"school_D.B2..py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"45714237696","text":"import re\n\nimport localdatetime\n\nfrom Products.SilvaFind import schema\nfrom Products.SilvaFind.interfaces import IResultField, IQuery, IResultView\nfrom Products.SilvaMetadata.interfaces import IMetadataService\nfrom Products.SilvaMetadata.interfaces import IMetadataElement\nfrom Products.SilvaMetadata.Index import createIndexId\nfrom Products.ZCTextIndex.ParseTree import ParseError\n\nfrom five import grok\nfrom silva.core.interfaces import IVersion, IImage\nfrom silva.core.interfaces.adapters import IIconResolver\nfrom zope.component import getMultiAdapter, getUtility\nfrom zope.interface import Interface\nfrom zope.traversing.browser import absoluteURL\nfrom zope.traversing.browser.interfaces import IAbsoluteURL\n\n\nclass ResultView(grok.MultiAdapter):\n grok.implements(IResultView)\n grok.adapts(IResultField, IQuery, Interface)\n grok.provides(IResultView)\n\n def __init__(self, result, context, request):\n self.context = context\n self.result = result\n self.request = request\n\n def update(self, view):\n pass\n\n def render(self, item):\n value = getattr(item.getObject(), self.result.getName())()\n if not value:\n return\n\n if hasattr(value, 'strftime'):\n # what the hell are these things?,\n # they don't have a decent type\n value = value.strftime('%d %b %Y %H:%M')\n\n value = '%s' % value\n title = '%s' % (\n self.title)\n return '%s%s' % (title, value)\n\n\nclass MetatypeResultView(ResultView):\n grok.adapts(schema.MetatypeResultField, IQuery, Interface)\n\n def update(self, view):\n self.get_icon = IIconResolver(self.request).get_tag\n\n def render(self, item):\n content = item.getObject()\n if IVersion.providedBy(content):\n content = content.get_silva_object()\n return self.get_icon(content)\n\n\nclass RankingResultView(ResultView):\n grok.adapts(schema.RankingResultField, IQuery, Interface)\n\n def __init__(self, *args):\n super(RankingResultView, self).__init__(*args)\n self.rankings = {}\n self.highest = 1.0\n\n def update(self, view):\n query = self.request.form.get('fulltext')\n self.highest = 1.0\n self.rankings = {}\n if query:\n query = unicode(query, 'utf8')\n # XXX should use getUtility\n catalog = self.context.service_catalog\n index = catalog.Indexes['fulltext']\n try:\n max_index = view.results.start + len(view.results) + 1\n rankings = index.query(query, max_index)[0]\n if rankings:\n self.highest = rankings[0][1]/100.0\n self.rankings = dict(rankings)\n except ParseError:\n pass\n\n self.img = '
' % view.static['ranking.gif']()\n\n def render(self, item):\n rid = item.getRID()\n if rid in self.rankings:\n return '%s %.1f%%' % (\n self.img, (self.rankings[rid] / self.highest))\n return None\n\n\nclass TotalResultCountView(ResultView):\n grok.adapts(schema.TotalResultCountField, IQuery, Interface)\n\n def render(self, item):\n # the actual count is calculated in the pagetemplate\n # this is only here, so it can be enabled / disabled\n # in the smi.\n\n # Please note that enabling that showing the total\n # number of search results might be a security risk\n # since it can be figured out that certain objects\n # were ommitted from the search\n return None\n\n\nclass ResultCountView(ResultView):\n grok.adapts(schema.ResultCountField, IQuery, Interface)\n\n def render(self, item):\n # the actual count is calculated in the pagetemplate\n # this is only here, so it can be enabled / disabled\n # in the smi.\n return\n\n\nclass LinkResultView(ResultView):\n grok.adapts(schema.LinkResultField, IQuery, Interface)\n\n def render(self, item):\n content = item.getObject()\n title = content.get_title_or_id()\n if IVersion.providedBy(content):\n url = absoluteURL(content.get_silva_object(), self.request)\n else:\n url = absoluteURL(content, self.request)\n ellipsis = '…'\n if len(title) > 50:\n title = title[:50] + ellipsis\n return '%s' % (url, title)\n\n\nclass DateResultView(ResultView):\n grok.adapts(schema.DateResultField, IQuery, Interface)\n\n def update(self, view):\n self.locale = localdatetime.get_locale_info(self.request)\n\n def render(self, item):\n content = item.getObject()\n # XXX we should use publication_datetime on publishable, but\n # it is currently broken\n date = content.get_modification_datetime()\n datestr = ''\n if date:\n if hasattr(date, 'asdatetime'):\n date = date.asdatetime()\n datestr = localdatetime.get_formatted_date(\n date, size=\"medium\", locale=self.locale)\n\n return '%s' % datestr\n return None\n\n\nclass ThumbnailResultView(ResultView):\n grok.adapts(schema.ThumbnailResultField, IQuery, Interface)\n\n def render(self, item):\n content = item.getObject()\n\n if not IImage.providedBy(content):\n return\n\n if content.thumbnail_image is None:\n return\n\n anchor = '
' % (\n item.getURL(), content.thumbnail_image.get_download_url())\n return '%s
' % anchor\n\n\nclass FullTextResultView(ResultView):\n grok.adapts(schema.FullTextResultField, IQuery, Interface)\n\n def render(self, item):\n ellipsis = '…'\n maxwords = 40\n searchterm = unicode(self.request.form.get('fulltext', ''), 'utf8')\n catalog = self.context.service_catalog\n fulltext = catalog.getIndexDataForRID(item.getRID()).get('fulltext', [])\n\n if not fulltext:\n # no fulltext available, probably an image\n return ''\n\n content = item.getObject()\n\n # since fulltext always starts with id and title, lets remove that\n idstring = content.id\n if IVersion.providedBy(content):\n idstring = content.get_silva_object().id\n skipwords = len(('%s %s' % (idstring, content.get_title())).split(' '))\n fulltext = fulltext[skipwords:]\n fulltextstr = ' '.join(fulltext)\n\n searchterms = searchterm.split()\n\n if not searchterms:\n # searchterm is not specified,\n # return the first 20 words\n text = ' '.join(fulltext[:maxwords])\n if IVersion.providedBy(content) and hasattr(content, 'fulltext'):\n realtext = ' '.join(content.fulltext()[2:])\n # replace multiple whitespace characters with one space\n realtext = re.compile('[\\ \\n\\t\\xa0]+').sub(' ', realtext)\n text = ' '.join(realtext.split()[:maxwords])\n if len(fulltext) > maxwords:\n text += ' ' + ellipsis\n else:\n words = maxwords / len(searchterms)\n text = []\n lowestpos = len(fulltext)\n highestpos = 0\n\n hilite_terms = []\n for searchterm in searchterms:\n term = re.escape(searchterm)\n\n if '?' in term or '*' in term:\n termq = term.replace('\\\\?', '.')\n termq = termq.replace('\\\\*', '.[^\\ ]*')\n term_found = re.compile(termq).findall(fulltextstr)\n if term_found:\n hilite_terms += term_found\n searchterms.remove(searchterm)\n term = term_found[0]\n searchterms.append(term.strip())\n else:\n hilite_terms.append(term)\n else:\n hilite_terms.append(term)\n\n if not term in fulltext:\n # term matched probably something in the title\n # return the first n words:\n line = ' '.join(fulltext[:words])\n text.append(line)\n lowestpos = 0\n highestpos = words\n continue\n\n pos = fulltext.index(term)\n if pos < lowestpos:\n lowestpos = pos\n if pos > highestpos:\n highestpos = pos\n start = pos -(words/2)\n end = pos + (words/2) + 1\n if start < 0 :\n end += -start\n start = 0\n\n pre = ' '.join(fulltext[start:pos])\n post = ' '.join(fulltext[pos+1:end])\n\n if not text and start != 0:\n # we're adding the first (splitted) result\n # and it's not at the beginning of the fulltext\n # lets add an ellipsis\n pre = ellipsis + pre\n\n\n text.append('%s %s %s %s' % (\n pre,\n fulltext[pos],\n post,\n ellipsis)\n )\n # if all the terms that are found are close together,\n # then use this, otherwise, we would end\n # up with the same sentence for each searchterm\n # this code will create a new text result list, which\n # does not have 'split' results.\n if lowestpos < highestpos:\n if highestpos - lowestpos < maxwords:\n padding = (maxwords-(highestpos - lowestpos ))/2\n lowestpos -= padding\n highestpos += padding\n if lowestpos < 0:\n highestpos += -lowestpos\n lowestpos = 0\n\n text = fulltext[lowestpos:highestpos]\n if not lowestpos == 0:\n text[0] = '%s %s' % (ellipsis, text[0])\n if highestpos < len(fulltext)-1:\n text[-1] += ' %s' % ellipsis\n\n # do some hiliting, use original text\n # (with punctuation) if this is a silva document\n text = ' '.join(text)\n if IVersion.providedBy(content) and hasattr(content, 'fulltext'):\n realtext = ' '.join(content.fulltext()[2:])\n # replace multiple whitespace characters with one space\n realtext = re.compile('[\\ \\n\\t\\xa0]+').sub(' ', realtext)\n textparts = text.split(ellipsis)\n new = []\n for textpart in textparts:\n if textpart == '':\n new.append('')\n continue\n textpart = textpart.strip()\n find = textpart.replace(' ', '[^a-zA-Z0-9]+')\n textexpr = re.compile(find, re.IGNORECASE)\n text = textexpr.findall(realtext)\n if text:\n text = text[0]\n else:\n # somehow we can't find a match in original text\n # use the one from the catalog\n text = textpart\n new.append(text)\n text = ellipsis.join(new)\n\n for term in hilite_terms:\n if term.startswith('\"'):\n term = term[1:]\n if term.endswith('\"'):\n term = term[:-1]\n term = re.escape(term)\n text = ' ' + text\n regexp = re.compile(\n '([^a-zA-Z0-9]+)(%s)([^a-zA-Z0-9]+)' % term.lower(),\n re.IGNORECASE)\n sub = ('\\g<1>'\n '\\g<2>\\g<3>')\n text = regexp.sub(sub, text)\n return '%s
' % text.strip()\n\n\nclass BreadcrumbsResultView(ResultView):\n grok.adapts(schema.BreadcrumbsResultField, IQuery, Interface)\n\n def render(self, item):\n content = item.getObject()\n part = []\n breadcrumb = getMultiAdapter((content, self.request), IAbsoluteURL)\n for crumb in breadcrumb.breadcrumbs()[:-1]:\n part.append('%s' % (crumb['url'], crumb['name']))\n part = ' · '.join(part)\n return '%s' % part\n\n\nclass MetadataResultView(ResultView):\n grok.adapts(schema.MetadataResultField, IQuery, Interface)\n\n def update(self, view):\n self.set_name, self.element_name = self.result.getId().split(':')\n service = getUtility(IMetadataService)\n metadata_set = service.getMetadataSet(self.set_name)\n metadata_element = metadata_set.getElement(self.element_name)\n assert IMetadataElement.providedBy(metadata_element),\\\n u\"Unknow metadata element %s\" % self.result.getId()\n self.renderValue = metadata_element.renderView\n\n if metadata_element.metadata_in_catalog_p:\n # If the metadata is available on the brain, directly use it\n metadata_key = createIndexId(metadata_element)\n self.getValue = lambda item: getattr(item, metadata_key)\n else:\n self.getValue = lambda item: service.getMetadataValue(\n item.getObject(), self.set_name, self.element_name)\n\n def render(self, item):\n # self.context should item.getObject()\n result = self.renderValue(self.context, self.getValue(item))\n if not result:\n return\n\n css_class = \"metadata-%s-%s\" % (self.set_name, self.element_name)\n return ''.join(['' % css_class,\n '',\n self.result.getTitle(),\n '',\n '',\n result,\n '',\n ''])\n\n","repo_name":"silvacms/Products.SilvaFind","sub_path":"Products/SilvaFind/results/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":14545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73278393425","text":"#!/usr/bin/env python3\n# Ross Gray\n# ross.gray12@imperial.ac.uk\n#\n# imageanalysis4.py\n#\n\n\"\"\" Script for importing orthomosaic tiff files, conducting individual\ntree crown delineation and analysing the ITC segments for pixel data. \"\"\"\n\n\n#################\n##Loading modules\n#################\n\nimport sys #For module writing\nimport subprocess #For bash and R operations\nimport os #For directory access\nimport glob #For file selection\nimport re #For manipulation of filenames\n\nimport cv2 #For image manipulation\nfrom PIL import Image #For image analysis\nimport tifffile as tiff #For 16-bit tiff files\n\nfrom osgeo import gdal, osr, ogr #For gdal spatial manipulation\n# import fiona #For GIS manipulation\n# import rasterio #For raster manipulaiton\n# from rasterio.mask import mask #For raster manipulaiton\n# from rasterio import Affine # or from affine import Affine\nimport geopandas as gpd #For shapefile manipulation\nfrom shapely.geometry import mapping, point #For changing shapely geometries\n\nimport numpy as np #For image analysis and data manipulation\nimport csv #For saving the results table\nimport pandas as pd #For saving the results table and conversion to tables\nimport statistics #For calculating standard deviation\n\nimport time #For timing processing\nfrom tqdm import tqdm #Produces progress bar\n\n#################\n###Georeferencing\n#################\n\n#~ ###Batch Georeferencing\n#~ #Unable to do so mannaul georeferencing\n#~ for f in glob.glob(\"../Data/Test/\"+\"*.tif\"):\n\n\t#~ ##Filenames\n\t#~ filename = os.path.split(f)[1]\n\t#~ temp = \"../Data/tmp/\" + filename\n\t#~ out_file = \"../Data/Geotest/\" + filename\n\n\t#~ ##Translating original raster to new GCPs\n\t#~ #Command line\n\t#~ cmd1 = [\"gdal_translate\", \"-of\", \"GTiff\",\n\t\t\t#~ #GCPs Source X, Source Y, Dest X, Dest Y\n\t\t\t#~ \"-gcp\", \"566055\", \"-522336\", \"566058\", \"522336\",\n\t\t\t#~ \"-gcp\", \"566074\", \"-522269\", \"566077\", \"522269\",\n\t\t\t#~ \"-gcp\", \"566269\", \"-522273\", \"566271\", \"522273\",\n\t\t\t#~ f, temp]\n\t#~ subprocess.call(cmd1)\n\n\t#~ ##Warping to create new raster\n\t#~ #Command line\n\t#~ cmd2 = [\"gdalwarp\", \"-r\", \"near\", \"-tps\", \"-co\", \"COMPRESS=LZW\", temp, out_file]\n\t#~ subprocess.call(cmd2)\n\n\n#----------------------------------------------------------------------#\n#----------------------------------------------------------------------#\n\n###################################################\n### TREE CROWN IDENTIFICAION AND PIXEL ANALYSIS ###\n###################################################\n\ndef main(argv):\n\n\t###################################################\n\t## RASTERIZING LIDAR POLYGONS TO 16-BIT TEMPLATE ##\n\t###################################################\n\n\t## Timing the program\n\tstarttime = time.time()\n\n\t#Update\n\tprint(\"Starting image analysis.\")\n\n\t##Interate through Orthomosaics\n\tdef image_analysis(f):\n\t# for f in tqdm(glob.glob(\"../Data/LongTermStudy/Orthomosaics/Georeferenced/\"+\"*.tif\")):\n\t## Translating RGB image into 16-bit template\n\t\trgb_img = gdal.Open(f)\n\n\t\t#Getting filenames\n\t\tfullname = os.path.split(f)[1]\n\t\tfilename = os.path.splitext(fullname)[0]\n\n\t\t#Output to new format\n\t\ttemplate = gdal.Translate(\"../Data/LongTermStudy/Templates/NewGeoref/\" + filename + \"_ITC.tif\", rgb_img,\n\t\t\t\t\t\t\t\t format=\"GTiff\", outputType= gdal.GDT_UInt16,\n\t\t\t\t\t\t\t\t creationOptions=['COMPRESS=PACKBITS'])\n\n\t\t#Properly close the datasets to flush to disk\n\t\trgb_img = None\n\t\ttemplate = None\n\n\t\t##Rasterizing tree crown polygons onto template\n\t\t#Open RGB image, raster template and polygons to burn\n\t\tbgr_img = cv2.imread(f, cv2.IMREAD_COLOR)\n\n\t\t#Getting dimension of rgb image\n\t\t[height, width, dim] = bgr_img.shape\n\n\t\t#Burn tree crown polygons onto template\n\t\tif argv[2] == 'Manual':\n\t\t\t#For manual delineation\n\t\t\trasterizeOptions = gdal.RasterizeOptions(format = \"GTiff\", width = width,\n\t\t\t\t\t\t\t\t\t\t\t\t\t height = height, attribute = \"Tree_ID\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t outputType = gdal.GDT_UInt16)\n\t\t\trasterpoly = gdal.Rasterize(\"../Data/LongTermStudy/Templates/NewGeoref/\" + filename + \"_ITC.tif\",\n\t\t\t\t\t\t\t\t \"../Data/LongTermStudy/ITCSegmentation/Manual/ManualDelineation.shp\",\n\t\t\t\t\t\t\t\t options = rasterizeOptions)\n\t\telif argv[2] == 'VTree':\n\t\t\t#For VTrees\n\t\t\trasterizeOptions = gdal.RasterizeOptions(format = \"GTiff\", width = width,\n\t\t\t\t\t\t\t\t\t\t\t\t\t height = height, attribute = \"id\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t outputType = gdal.GDT_UInt16)\n\t\t\trasterpoly = gdal.Rasterize(\"../Data/LongTermStudy/Templates/NewGeoref/\" + filename + \"_ITC.tif\",\n\t\t\t\t\t\t\t\t \"../Data/LongTermStudy/ITCSegmentation/VTreeDelineation.shp\",\n\t\t\t\t\t\t\t\t options = rasterizeOptions)\n\t\telif argv[2] == '2DSFM':\n\t\t\t#For 3D Point Cloud delineation\n\t\t\trasterizeOptions = gdal.RasterizeOptions(format = \"GTiff\", width = width,\n\t\t\t\t\t\t\t\t\t\t\t\t\t height = height, attribute = \"ID\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t outputType = gdal.GDT_UInt16)\n\t\t\trasterpoly = gdal.Rasterize(\"../Data/LongTermStudy/Templates/NewGeoref/\" + filename + \"_ITC.tif\",\n\t\t\t\t\t\t\t\t \"../Data/LongTermStudy/ITCSegmentation/itc2DSFM/2DSFMDelineation.shp\",\n\t\t\t\t\t\t\t\t options = rasterizeOptions)\n\t\telif argv[2] == 'LiDAR':\n\t\t\t#For LiDAR delineation\n\t\t\trasterizeOptions = gdal.RasterizeOptions(format = \"GTiff\", width = width,\n\t\t\t\t\t\t\t\t\t\t\t\t\t height = height, attribute = \"ID\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t outputType = gdal.GDT_UInt16)\n\t\t\trasterpoly = gdal.Rasterize(\"../Data/LongTermStudy/Templates/NewGeoref/\" + filename + \"_ITC.tif\",\n\t\t\t\t\t\t\t\t \"../Data/LongTermStudy/ITCSegmentation/itcLiDAR/LiDARDelineation.shp\",\n\t\t\t\t\t\t\t\t options = rasterizeOptions)\n\n\t\t#Properly close the datasets to flush to disk\n\t\trasterpoly = None\n\t\t#~ rgb_img = None\n\t\tbgr_img = None\n\n\t\tprint(\"Tree crown delineation successful.\")\n\n#----------------------------------------------------------------------#\n#----------------------------------------------------------------------#\n\n\t\t#####################################\n\t\t## EXTRACTING FILENAME INFORMATION ##\n\t\t#####################################\n\n\t\t##Create empty arrays for the data\n\t\tDate = []\n\t\tStart_Time = []\n\t\tFlightNo = []\n\t\tLocation = []\n\t\tSoftware = []\n\t\tLight = []\n\t\tCloud = []\n\t\tWind = []\n\t\tHeight = []\n\n\t\t##Splitting path from filename\n\t\tfullname = os.path.split(f)[1]\n\t\tfilename = os.path.splitext(fullname)[0]\n\n\t\t##When the image was taken\n\t\t#Date\n\t\tfind_date = re.compile(r\"_([0-9]+-[0-9]+-[0-9]+)_\")\n\t\tdate = find_date.search(filename).group(1)\n\t\tDate.append(date)\n\n\t\t#Start Time\n\t\tfind_time = re.compile(r\"_([0-9]+.[0-9]+)_\")\n\t\tstart_time = find_time.search(filename).group(1)\n\t\tstart_time.replace(\".\", \":\")\n\t\tStart_Time.append(start_time)\n\n\t\t##Where the image was taken\n\t\tfind_where = re.compile(r\"^([a-zA-Z]+\\d+){1}_\")\n\t\twhere = find_where.search(filename).group(1)\n\t\twhere = re.split('(\\d+)',where)\n\t\tFlightNo.append(where[1])\n\t\tLocation.append(where[0])\n\n\t\t##Software used to stitch\n\t\tfind_software = re.compile(r\"_([a-z]+)_\")\n\t\tsoftware = find_software.search(filename).group(1)\n\t\tif software[0] == \"o\":\n\t\t\tSoftware.append(\"OpenDroneMap\")\n\t\tif software[0] == \"p\":\n\t\t\tSoftware.append(\"Pix4D\")\n\t\tif software[0] == \"d\":\n\t\t\tSoftware.append(\"DroneDeploy\")\n\n\t\t##Conditions it was taken in\n\t\tfind_cond = re.compile(r\"_([A-Z]+)\")\n\t\tcond = find_cond.search(filename).group(1)\n\t\t#Explaning conditions\n\t\tif cond[0] == \"D\":\n\t\t\tLight.append(\"Dull\")\n\t\tif cond[0] == \"B\":\n\t\t\tLight.append(\"Bright\")\n\t\tif cond[0] == \"S\":\n\t\t\tLight.append(\"Sunny\")\n\t\tif cond[1] == \"N\":\n\t\t\tCloud.append(\"None\")\n\t\tif cond[1] == \"S\":\n\t\t\tCloud.append(\"Some\")\n\t\tif cond[1] == \"C\":\n\t\t\tCloud.append(\"Cloudy\")\n\t\tif cond[1] == \"O\":\n\t\t\tCloud.append(\"Overcast\")\n\t\tif cond[2] == \"N\":\n\t\t\tWind.append(\"None\")\n\t\tif cond[2] == \"L\":\n\t\t\tWind.append(\"Light\")\n\t\tif cond[2] == \"M\":\n\t\t\tWind.append(\"Medium\")\n\t\tif cond[2] == \"H\":\n\t\t\tWind.append(\"High\")\n\n\t\t##() Height of flight in ft or m\n\t\tfind_height = re.compile(r\"_(\\d+[a-zA-Z]+)_\")\n\t\theight = find_height.search(filename).group(1)\n\t\tHeight.append(height)\n\n\t\tprint(\"Variables have been added.\")\n\n#----------------------------------------------------------------------#\n#----------------------------------------------------------------------#\n\t\t####################################\n\t\t## PIXEL ANALYSIS OF ITC SEGMENTS ##\n\t\t####################################\n\n\t\t##Reading 16 bit ITC tiff file and bgr image\n\t\ttreecrowns = tiff.imread(\"../Data/LongTermStudy/Templates/NewGeoref/\" + filename + \"_ITC.tif\")\n\t\timg = cv2.imread(f)\n\t\t#Converting bgr to rgb\n\t\tb,g,r = cv2.split(img)\n\t\timg = cv2.merge([r,g,b])\n\n\t\t##Empty lists to populate\n\t\tTrees = []\n\t\tR_mean = []\n\t\tG_mean = []\n\t\tB_mean = []\n\t\tR_SD = []\n\t\tG_SD = []\n\t\tB_SD = []\n\t\tRCC = []\n\t\tGCC = []\n\t\tBCC = []\n\t\tExG = []\n\n\t\t##Pixel Analysis\n\t\tprint(\"Starting pixel analysis.\")\n\t\tdef pixel_analysis(i):\n\t\t# for i in tqdm(range(1, np.amax(treecrowns+1))):\n\n\t\t\t#Individual Tree Number\n\t\t\tTrees.append(i)\n\n\t\t\t#Finding tree crowns in array\n\t\t\tlocations = np.where(treecrowns == i)\n\n\t\t\t#Extract based on tree crown cells\n\t\t\tvalues = img[locations]\n\t\t\tvalues = np.ma.masked_equal(values, 0)\n\t\t\tvalues_table = pd.DataFrame(values, columns = [\"R\", \"G\", \"B\"]) #Edit to speed up\n\n\t\t\t#Calculating mean for each colour channel\n\t\t\tRmean = values_table[\"R\"].mean()\n\t\t\tGmean = values_table[\"G\"].mean()\n\t\t\tBmean = values_table[\"B\"].mean()\n\n\t\t\t#Calculating standard deviation for each colour channel\n\t\t\tRsd = values_table[\"R\"].std()\n\t\t\tGsd = values_table[\"G\"].std()\n\t\t\tBsd = values_table[\"B\"].std()\n\n\t\t\t#Appending results\n\t\t\tR_mean.append(Rmean)\n\t\t\tG_mean.append(Gmean)\n\t\t\tB_mean.append(Bmean)\n\t\t\tR_SD.append(Rsd)\n\t\t\tG_SD.append(Gsd)\n\t\t\tB_SD.append(Bsd)\n\n\t\t\t#Calculating overall brightness\n\t\t\trgb = (Rmean + Gmean + Bmean)\n\n\t\t\t#Calculating chromatic coordinates for each channel\n\t\t\trcc = Rmean/rgb\n\t\t\tgcc = Gmean/rgb\n\t\t\tbcc = Bmean/rgb\n\t\t\texg = (2*Gmean)/(Rmean+Bmean)\n\n\t\t\t#Appending chromatic coordinates to lists\n\t\t\tGCC.append(gcc)\n\t\t\tRCC.append(rcc)\n\t\t\tBCC.append(bcc)\n\t\t\tExG.append(exg)\n\t\t# *map(pixel_analysis, tqdm(range(1, np.amax(treecrowns+1)))),\n\t\t[pixel_analysis(i) for i in tqdm(range(1, np.amax(treecrowns+1)))]\n\n\t\tprint(\"Pixel Analysis Completed.\")\n#~ #----------------------------------------------------------------------#\n#~ #----------------------------------------------------------------------#\n\n\t\t#########################\n\t\t## CREATING DATAFRAMES ##\n\t\t#########################\n\n\t\t##Converting results table to dataframe\n\t\tpixels_df = pd.DataFrame({\"Tree_Crown_ID\": Trees,\n\t\t\t\t\t\t\t\t \"R_Mean\": R_mean, \"G_Mean\": G_mean, \"B_Mean\": B_mean,\n\t\t\t\t\t\t\t\t \"R_StDev\": R_SD, \"G_StDev\": G_SD, \"B_StDev\": B_SD,\n\t\t\t\t\t\t\t\t \"RCC\": RCC, \"GCC\": GCC, \"BCC\": BCC, \"ExG\": ExG})\n\t\tvariables_df = pd.DataFrame({\"Date\" : Date, \"Start_Time\": Start_Time,\n\t\t\t\t\t\t\t\t\t \"Flight_Number\": FlightNo, \"Location\": Location,\n\t\t\t\t\t\t\t\t\t \"Software\": Software, \"Light\": Light, \"Cloud\": Cloud,\n\t\t\t\t\t\t\t\t\t \"Wind\": Wind, \"Height\": Height})\n\n\t\t##Matching dataframe lenghts\n\t\trepeat_variables_df = pd.concat([variables_df]*len(pixels_df), ignore_index=True)\n\n\t\t##Combining dataframe\n\t\tcombined_df = pd.concat([repeat_variables_df, pixels_df], axis = 1)\n\n\t\t##Rearranging dataframe\n\t\t#~ results_table = results_table[[\"Number\", \"Date\", \"Location\", \"Software\", \"Start_Time\", \"Height\", \"Light\", \"Cloud\", \"Wind\", \"Mean_Green\", \"GCC\", \"Mean_Red\", \"RCC\", \"Mean_Blue\", \"BCC\", \"ExG\"]]\n\n\t\t## Saving dataframe to new csv or existing csv\n\t\t#Command line arguments to name the csv file\n\t\tif not os.path.isfile(argv[1]):\n\t\t\tcombined_df.to_csv(argv[1], index=False)\n\t\telse:\n\t\t\twith open(argv[1], \"a\") as f:\n\t\t\t\tcombined_df.to_csv(f, header = False, index=False)\n\n\t##Using map or list comprehension\n\t# *map(image_analysis, tqdm(glob.glob(\"../Data/LongTermStudy/Orthomosaics/Georeferenced/\"+\"*.tif\"))),\n\t[image_analysis(f) for f in tqdm(glob.glob(\"../Data/LongTermStudy/Orthomosaics/Georeferenced/\"+\"*.tif\"))]\n\n\t##Calculating time elapsed\n\tendtime = time.time()\n\thours, rem = divmod(endtime-starttime, 3600)\n\tminutes, seconds = divmod(rem, 60)\n\tprint(\"{:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds))\n\n#----------------------------------------------------------------------#\n#----------------------------------------------------------------------#\n\nif(__name__ == \"__main__\"):\n\tstatus = main(sys.argv)\n\n#End\n","repo_name":"DroneEcology/tropical-forest-phenology","sub_path":"imageanalysis4.py","file_name":"imageanalysis4.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"40275627976","text":"from django.urls import path\nfrom . import views\n\napp_name = \"authenticator\"\n\nurlpatterns = [\n path('', views.signup, name=\"new_user\"),\n path('login/', views.signin, name=\"existing_user\"),\n path('authenticated/dashboard/', views.dashboard, name=\"my_dashboard\"),\n path('logout/', views.signout, name=\"logout_user\")\n]\n","repo_name":"abdulrahim-uj/school_store","sub_path":"authenticator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72813439507","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 15 11:44:55 2023\n\n@author: 28706664\n\"\"\"\n\n\"\"\"\non prend en entré une une liste de triple (ri,pi,qi) et on retourne un ordonancement\n\n\"\"\"\n#import networkx as nx\nimport copy\nimport random\nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom graphe_from_tache import *\n\n\ndef tri_liste(li):\n ordre=[0]*len(li)\n for i in range(len(li)):\n ordre[i]=i\n #source:https://stackoverflow.com/questions/13979714/heap-sort-how-to-sort\n def swap(i, j): \n li[i], li[j] = li[j], li[i] \n ordre[i], ordre[j] = ordre[j], ordre[i]\n\n def heapify(end,i): \n l=2 * i + 1 \n r=2 * (i + 1) \n max=i \n if l < end and li[i][2] < li[l][2]: \n max = l \n if r < end and li[max][2] < li[r][2]: \n max = r \n if max != i: \n swap(i, max) \n heapify(end, max) \n\n def heap_sort(): \n end = len(li) \n start = end // 2 - 1 # use // instead of /\n for i in range(start, -1, -1): \n heapify(end, i) \n for i in range(end-1, 0, -1): \n swap(i, 0) \n heapify(i, 0)\n \n def sift_down(start, end): \n root = start \n\n while (root * 2 + 1) <= end: \n child = root * 2 + 1 \n temp = root \n if li[temp] < li[child]: \n temp = child+1 \n if temp != root: \n swap(root, temp) \n root = temp \n else: \n return\n heap_sort()\n # print(li)\n return li,ordre\n\ndef ordo_from_tache(li,m :int,G,pri=True):#return un résiduel(graphe de avec des valeurs de flow)\n ordo_fontionnel=True\n \n node_pos_G=copy.deepcopy(nx.get_node_attributes(G,'pos'))\n arc_capacity_G=copy.deepcopy(nx.get_edge_attributes(G,'capacity'))\n if(pri):\n nx.draw_networkx_edges(G, node_pos_G,edge_color= \"black\")\n nx.draw_networkx_edge_labels(G, node_pos_G, edge_labels=arc_capacity_G,label_pos=0.7)\n\n li_trie,ordre=tri_liste(li)#trie nos tâches par ordre croissant des deadlines\n R = G # résiduel retourné\n arretes_de_bases=list(G.edges) #on save les arrêtes du début\n capacity_de_bases=list(G.edges.data('capacity'))\n usage=len(capacity_de_bases)*[0] #controlleur de surcharge sur les dernières arretes\n nb_nodes=R.number_of_nodes()\n for noeud1 in range(len(li_trie)): #on enlève juste les arrêtes qui sont au millieu\n for noeud2 in range(nb_nodes-2-len(li_trie)):\n if((noeud1+1, noeud2+1+len(li_trie)) in list(G.edges)):\n R.remove_edge(noeud1+1, noeud2+1+len(li_trie))\n for i in range(len(li_trie)): #on parcours toutes les tâches\n for j in range(len(li_trie)): #on parcours toutes les tâches\n if(ordre[j]==i): #si je suis sur le noeud que je dois traiter (par ordre de priorite sur la deadline)\n qt_flot=G[0][j+1][\"capacity\"]\n for intervalle in range(nb_nodes-2-len(li_trie)): #on parcours les intervalles\n intervalle_pos=intervalle+len(li_trie)+1\n arrete_dest=0\n for a in range(len(capacity_de_bases)): #on cherche l'arrete qui est derrière\n if((capacity_de_bases[a][0]==intervalle_pos and capacity_de_bases[a][1]==nb_nodes-1)or(capacity_de_bases[a][1]==intervalle_pos and capacity_de_bases[a][0]==nb_nodes-1)):\n arrete_dest=a\n if(((j+1,intervalle_pos) in arretes_de_bases) and qt_flot>0 and capacity_de_bases[arrete_dest][2]-usage[arrete_dest]>0): #si l'arrête existe et que il nous reste du flot et que 'arrête suivante n'est pas déjà surchargée\n for arrete_act in capacity_de_bases:\n if(j+1==arrete_act[0] and intervalle_pos==arrete_act[1]): #on cherche ici à obtenir la capacity de notre arrete\n new_cap=min(arrete_act[2], qt_flot, capacity_de_bases[arrete_dest][2]-usage[arrete_dest]) #on calcule alors le flot qui va aller dans cette arrete\n usage[arrete_dest]+=new_cap #on met à jour le controlleur\n if(pri):\n print(j+1,intervalle_pos,\" : \",new_cap)\n R.add_edge(j+1, intervalle_pos, capacity=new_cap) #on envoi tt la capacité ou le flot qu'il nous reste ou suffisament pour remplir l'arrête suivante\n qt_flot-=new_cap #on met à jour notre flot restant\n if (qt_flot>0): #si on à pas réussi à vider le flot alors ça ne fonctionne pas et on rajoute une croix rouge\n # source : https://www.includehelp.com/python/cross-x-scatter-marker-in-matplotlib.aspx\n x,y=node_pos_G[j+1]\n ss = 200\n c = 1\n plt.scatter(x/2,y/2, s=ss, c=c, marker='X', cmap='Reds_r')\n ordo_fontionnel=False\n\n if(pri):\n if(ordo_fontionnel):\n print(\"ordo fonctionnel\")\n else:\n print(\"ordo non fonctionnel\")\n\n #source : http://avinashu.com/tutorial/pythontutorialnew/NetworkXBasics.html\n node_pos=nx.get_node_attributes(R,'pos')\n # The edge capacitys of each arcs are stored in a dictionary\n arc_capacity=nx.get_edge_attributes(R,'capacity')\n # If the nodes is in the shortest path, set it to red, else set it to white color\n node_col = 'white'\n # If the edge is in the shortest path set it to red, else set it to white color\n edge_col = 'blue'\n # Draw the nodes\n nx.draw_networkx(R, node_pos,node_color= node_col, node_size=450)\n # Draw the edges\n nx.draw_networkx_edges(R, node_pos,edge_color= edge_col)\n # Draw the edge labels\n nx.draw_networkx_edge_labels(R, node_pos, edge_labels=arc_capacity,label_pos=0.7,font_color='red')\n # Remove the axis\n plt.axis('off')\n # Show the plot\n plt.show()\n return R\n# lt=[(0,4,4),(0,3,8),(0,5,9)]\n#lt=[(3, 2, 7), (3, 2, 5), (1, 3, 5)]\n#lt=random_values(3,10)\n#G,s,t=graphe_from_taches(lt,2)\n#ordo_from_tache(lt,2,G)","repo_name":"MalenferFrncs/projet_de_recherche_2023_ordonancement","sub_path":"ordo_from_tache.py","file_name":"ordo_from_tache.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"40419354743","text":"from game import Game\n\ndef test_saveScore(tmpdir):\n data_in = \"13213\"\n fpath = f\"{tmpdir}/test.txt\"\n Game.saveScore(fpath,data_in)\n\n with open(fpath) as file_out:\n data_out = file_out.read()\n assert data_in == 'data_out'","repo_name":"sonhm3029/Mediapipe-Game-collections","sub_path":"dragFigure/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"18467577400","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nimport math\nimport random\nimport Network as nt\n\n\ndef accuracy(array,array_labels, weights1,weights2_1,weights2_2,weights2_3,weights3,biases2_1,biases2_2,biases2_3,biases3):\n count=0\n for i in range(array.shape[0]):\n output_softmax=nt.forward_propagation(array[i,:], weights1,weights2_1,weights2_2,weights2_3,weights3,biases2_1,biases2_2,biases2_3,biases3)\n if(np.argmax(output_softmax)==array_labels[i]):\n count=count+1\n return count/array.shape[0]\n \ntrain=np.load(\"data/data/train_inputs.npy\")\ntrain_labels=np.load(\"data/data/train_targets.npy\")\n\nvalid=np.load(\"data/data/valid_inputs.npy\")\nvalid_labels=np.load(\"data/data/valid_targets.npy\")\n\n\ntrain_zip = list(zip(train, train_labels))\nrandom.shuffle(train_zip)\ntrain, train_labels = zip(*train_zip)\ntrain=np.asarray(train)\ntrain_labels=np.asarray(train_labels)\n\n\nval_zip = list(zip(valid, valid_labels))\nrandom.shuffle(val_zip)\nvalid, valid_labels = zip(*val_zip)\nvalid=np.asarray(valid)\nvalid_labels=np.asarray(valid_labels)\n\nweights1, weights2_1,biases2_1,weights2_2,biases2_2,weights2_3,biases2_3,weights3,biases3=nt.initialization()\n\nlearning_rate=0.01\nepoch=10\nfor epoch in range(epoch):\n count=0\n for j in range(train.shape[0]): \n weights1,weights2_1,weights2_2,weights2_3,weights3,biases2_1,biases2_2,biases2_3,biases3=nt.forward_backward_propagation(train[j,:],train_labels[j],learning_rate,weights1,weights2_1,weights2_2,weights2_3,weights3,biases2_1,biases2_2,biases2_3,biases3)\n print(\"Epoch:\"+ str(epoch+1)+\"\\n\")\n print(\" Training Accuracy:\"+str(accuracy(train,train_labels, weights1,weights2_1,weights2_2,weights2_3,weights3,biases2_1,biases2_2,biases2_3,biases3)))\n print(\" Validation Accuracy:\"+str(accuracy(valid,valid_labels, weights1,weights2_1,weights2_2,weights2_3,weights3,biases2_1,biases2_2,biases2_3,biases3))) \n \nnp.savez('model.npz', model_1=weights1,model_2=weights2_1,model_3=weights2_2,model_4=weights2_3,model_5=weights3,model_6=biases2_1,model_7=biases2_2,model_8=biases2_3,model_9=biases3)\n \n\n","repo_name":"barisbb/Neural-Language-Model","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"36841334572","text":"# TODO: efficiency of runtime\n# defaultdict?\n\n\nclass Solution(object):\n def dfs_search(self, eqmap, dag, visited, sfrom, sto):\n #print(sfrom)\n #if (sfrom, sto) not in eqmap:\n # return -1.0, False\n \n #if sto == sfrom: # arrived the end of one query path\n # return 1.0, True\n # keep searching, mark sfrom node as visited \n #visited[sfrom] = 1\n # traverse the children node of sfrom\n #print(visited)\n if sfrom not in visited:\n return -1.0, False\n if sfrom == sto:\n return 1.0, True\n visited[sfrom] = 1\n #print(sfrom)\n for it in dag[sfrom]:\n #print(dag[sfrom])\n #print(sfrom, it)\n #print(visited)\n if (sfrom, it) in eqmap and visited[it] == 0:\n #visited[it] = 1\n #print((sfrom, it))\n res, flag = self.dfs_search(eqmap, dag, visited, it, sto)\n if flag == True: # if one path is obtain\n return res*eqmap[(sfrom, it)], True\n # there is no path seen from sfrom, return false and unmark visited of sfrom\n visited[sfrom] = 0\n return -1.0, False\n\n \n def calcEquation(self, equations, values, queries):\n ret = []\n eqmap = dict()\n dag = dict()\n visited = dict()\n sz = len(values)\n for it in range(0, sz):\n (sfrom, sto) = equations[it]\n if sfrom not in dag:\n dag[sfrom] = []\n dag[sfrom].append(sto)\n visited[sfrom] = 0\n eqmap[(sfrom, sto)] = values[it]\n if sto not in dag:\n dag[sto] = []\n dag[sto].append(sfrom)\n visited[sto] = 0\n eqmap[(sto, sfrom)] = 1.0/values[it]\n \n #print(eqmap)\n\n #print(dag)\n \n for it in queries:\n (sfrom, sto) = it\n res, flag = self.dfs_search( eqmap, dag, visited, sfrom, sto)\n for it in visited:\n visited[it]=0\n \n #print(visited)\n if flag == False:\n ret.append(-1.0)\n else:\n ret.append(res)\n return ret\n\n\n\n def calcEquationFloydWarshall(self, equations, values, queries):\n mm = collections.defaultdict(dict)\n for it in range(0, len(values)):\n (sfrom, sto) = equations[it]\n mm[sfrom][sto] = values[it]\n mm[sto][sfrom] = 1.0/values[it]\n mm[sfrom][sfrom] = 1\n mm[sto][sto] = 1\n \n #for it in queries:\n # (sfrom, sto) = it \n for k in mm:\n for i in mm[k]:\n for j in mm[k]:\n mm[i][j] = mm[i][k] * mm[k][j]\n \n return [mm[sfrom].get(sto, -1.0) for sfrom, sto in queries ]\n \n\n \n def calcEquationDFS(self, equations, values, queries):\n import collections\n Adj = collections.defaultdict(set)\n weight = {}\n for (x, y), z in zip(equations, values):\n weight[(x,y)]= z\n weight[(y,x)] = 1.00/z\n Adj[x].add(y)\n Adj[y].add(x)\n\n\n def DFS(u, v, product = 1.0, visited=set()):\n if u == v and Adj[u] :\n return product\n p = None\n visited.add(u)\n for x in Adj[u]:\n if x not in visited:\n p = DFS(x, v, product*weight[(u, x)], visited)\n if p:\n break\n visited.remove(u)\n return p\n\n ret = []\n for s, t in queries:\n p = DFS(s,t)\n ret.append(p if p else -1.0)\n return ret\n\n \"\"\"\n Adj = collections.defaultdict(list)\n weights = {} \n for (t, s), v in zip(equations, values):\n Adj[s] += t,\n Adj[t] += s,\n weights[(s, t)] = v\n weights[(t, s)] = 1. / v\n\n def DFS_visit(u, t, product=1., visited=set()):\n if u == t and Adj[u]: \n return product\n \n visited.add(u)\n p = None\n for v in Adj[u]:\n if v not in visited:\n p = DFS_visit(v, t, product * weights[(u, v)], visited)\n \n # If any search reaches t, then we are done. Otherwise, try others.\n if p:\n break\n visited.remove(u)\n return p\n\n result = []\n for t, s in queries:\n p = DFS_visit(s, t) \n result.append(p if p else -1.0)\n \n return result \n \"\"\"\n \n# https://leetcode.com/problems/evaluate-division\n\n\na = Solution()\nequations = [ [\"a\", \"b\"], [\"b\", \"c\"] ] \nvalues = [2.0, 3.0]\nqueries = [ [\"a\", \"c\"], [\"b\", \"a\"], [\"a\", \"e\"], [\"a\", \"a\"], [\"x\", \"x\"] ]\n\nprint(equations)\nprint(values)\nprint(queries)\nprint(a.calcEquationDFS(equations, values, queries))\nequations = [[\"a\",\"b\"],[\"b\",\"c\"],[\"bc\",\"cd\"]]\nvalues=[1.5,2.5,5.0]\nqueries = [[\"a\",\"c\"],[\"c\",\"b\"],[\"bc\",\"cd\"],[\"cd\",\"bc\"]]\nprint(a.calcEquationDFS(equations, values, queries))\n","repo_name":"zhuangh/OJ","sub_path":"leetcode/py/calcEquation.py","file_name":"calcEquation.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"43843423545","text":"import tkinter\r\nimport random \r\nX_MAX, Y_MAX = 800, 600\r\ncanvas=tkinter.Canvas(width=X_MAX,height=Y_MAX, bg = \"white\")\r\ncanvas.pack()\r\n\r\nr = 20\r\n\r\nglobal farba, hrubka\r\nfarba = \"black\"\r\nhrubka = 1\r\n\r\ndef paleta(x, y, r):\r\n canvas.create_rectangle(x-r, y-r, x+r, y+r, fill=\"red\")\r\n canvas.create_rectangle(x+r, y-r, x+3*r, y+r, fill=\"green\")\r\n canvas.create_rectangle(x+3*r, y-r, x+5*r, y+r, fill=\"blue\")\r\n canvas.create_rectangle(x-3*r, y-r, x-r, y+r)\r\n canvas.create_oval(x-3*r+5, y-r+5, x-r-5, y+r-5, fill=\"black\")\r\n canvas.create_rectangle(x-5*r, y-r, x-3*r, y+r)\r\n canvas.create_oval(x-5*r+10, y-r+10, x-3*r-10, y+r-10, fill=\"black\")\r\n canvas.create_rectangle(x-7*r, y-r, x-5*r, y+r)\r\n canvas.create_oval(x-7*r+15, y-r+15, x-5*r-15, y+r-15, fill=\"black\")\r\n canvas.create_rectangle(x-9*r, y-r, x-7*r, y+r)\r\n canvas.create_text(x-8*r, y, font = (\"Arial\", 7), text = \"erase all\")\r\n canvas.create_rectangle(x+5*r, y-r, x+7*r, y+r, fill = \"gray\")\r\n canvas.create_rectangle(x+5*r+10, y-r+7, x+7*r-10, y+r-7, fill = \"white\")\r\n canvas.create_text(x+6*r, y, font = (\"Arial\", 7), text = \"guma\")\r\n\r\ndef kreslenie(event):\r\n global a, b, farba, hrubka\r\n xx, yy = event.x, event.y\r\n canvas.create_line(a, b, xx, yy, fill = farba, width = hrubka)\r\n a, b = xx, yy\r\n \r\ndef klik(event):\r\n global a, b, farba, hrubka\r\n xx, yy = event.x, event.y\r\n a, b = xx, yy\r\n if X_MAX/2-r < xx < X_MAX/2+r and Y_MAX-r-r < yy < Y_MAX:\r\n farba = \"red\"\r\n\r\n elif X_MAX/2+r < xx < X_MAX/2+3*r and Y_MAX-r-r < yy < Y_MAX:\r\n farba = \"green\"\r\n\r\n elif X_MAX/2+3*r < xx < X_MAX/2+5*r and Y_MAX-r-r < yy < Y_MAX:\r\n farba = \"blue\"\r\n\r\n elif X_MAX/2-3*r < xx < X_MAX/2-r and Y_MAX-r-r < yy < Y_MAX:\r\n hrubka = 10\r\n\r\n elif X_MAX/2-5*r < xx < X_MAX/2-3*r and Y_MAX-r-r < yy < Y_MAX:\r\n hrubka = 5\r\n\r\n elif X_MAX/2-7*r < xx < X_MAX/2-5*r and Y_MAX-r-r < yy < Y_MAX:\r\n hrubka = 1\r\n\r\n elif X_MAX/2-9*r < xx < X_MAX/2-7*r and Y_MAX-r-r < yy < Y_MAX:\r\n canvas.delete(\"all\")\r\n paleta(X_MAX/2, Y_MAX-r, r)\r\n\r\n elif X_MAX/2+5*r < xx < X_MAX/2+7*r and Y_MAX-r-r < yy < Y_MAX:\r\n farba = \"white\"\r\n hrubka = 10\r\n\r\n# else:\r\n# print('snezi')\r\n \r\ndef pusti(event):\r\n global a, b\r\n a,b=0,0\r\n \r\npaleta(X_MAX/2, Y_MAX-r, r)\r\n\r\ncanvas.bind('', klik)\r\ncanvas.bind('', kreslenie)\r\ncanvas.bind('', pusti)\r\n\r\n","repo_name":"ZPistovcakova/kvinta-mesiac-inf","sub_path":"skicar.py","file_name":"skicar.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"32992270061","text":"import sys\r\ninput = sys.stdin.readline\r\ndef main(): \r\n l = []\r\n dic = {}\r\n N, M = map(int,input().split())\r\n for _ in range(N):\r\n word = input().strip()\r\n if len(word) >= M:\r\n l.append(word)\r\n l.sort()\r\n l.sort(key=len,reverse=True)\r\n for i in l:\r\n if i in dic:\r\n dic[i] +=1\r\n else:\r\n dic[i] = 1\r\n dic = dict(sorted(dic.items(), key=lambda x: x[1], reverse=True))\r\n for y in dic.keys():\r\n print(y)\r\nmain()","repo_name":"kimdahee7/CodingTest_Python","sub_path":"백준/Silver/20920. 영단어 암기는 괴로워/영단어 암기는 괴로워.py","file_name":"영단어 암기는 괴로워.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"38930980432","text":"\nimport re\nfh = open('raven.txt')\nx = raw_input (\"enter word\")\n\nif re-search (r\"[x]\",fh,re.M):\n\n print(x + \"has been found\")\nelse:\n print (\"sorry \" +x+ \"not found\")\n","repo_name":"mwongeraE/python","sub_path":"spellChecker/wordserch.py","file_name":"wordserch.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"30526226154","text":"\"\"\"\nA simple SAT solver in Python.\n\"\"\"\n\n# loom:start(classes)\nclass Expr:\n pass\n\n\nclass FalseExpr(Expr):\n pass\n\n\nclass TrueExpr(Expr):\n pass\n\n\nclass Var(Expr):\n def __init__(self, name: str):\n self.name = name\n\n\nclass Not(Expr):\n def __init__(self, expr: Expr):\n self.expr = expr\n\n\nclass And(Expr):\n def __init__(self, exprs: list[Expr]):\n self.exprs = exprs\n\n\nclass Or(Expr):\n def __init__(self, exprs: list[Expr]):\n self.exprs = exprs\n\n\nclass Impl(Expr):\n def __init__(self, p: Expr, q: Expr):\n self.p = p\n self.q = q\n\n\n# loom:end(classes)\n\n# loom:start(replace)\ndef replace(e: Expr, name: str, value: bool) -> Expr:\n if isinstance(e, FalseExpr):\n return FalseExpr()\n elif isinstance(e, TrueExpr):\n return TrueExpr()\n elif isinstance(e, Var):\n if e.name == name:\n return TrueExpr() if value else FalseExpr()\n else:\n return Var(e.name)\n elif isinstance(e, Not):\n return Not(replace(e.expr, name, value))\n elif isinstance(e, And):\n return And([replace(expr, name, value) for expr in e.exprs])\n elif isinstance(e, Or):\n return Or([replace(expr, name, value) for expr in e.exprs])\n elif isinstance(e, Impl):\n return Impl(replace(e.p, name, value), replace(e.q, name, value))\n else:\n raise TypeError(\"Invalid expression type\")\n\n\n# loom:end(replace)\n\n# loom:start(eval)\ndef eval_expr(e: Expr) -> bool:\n if isinstance(e, FalseExpr):\n return False\n elif isinstance(e, TrueExpr):\n return True\n elif isinstance(e, Var):\n raise ValueError(f\"eval: the variable {e.name} has not been replaced.\")\n elif isinstance(e, Not):\n return not eval_expr(e.expr)\n elif isinstance(e, And):\n return all(eval_expr(expr) for expr in e.exprs)\n elif isinstance(e, Or):\n return any(eval_expr(expr) for expr in e.exprs)\n elif isinstance(e, Impl):\n return (not eval_expr(e.p)) or eval_expr(e.q)\n else:\n raise TypeError(\"Invalid expression type\")\n\n\n# loom:end(eval)\n\n# loom:start(free)\ndef free(e: Expr) -> set[str]:\n if isinstance(e, FalseExpr) or isinstance(e, TrueExpr):\n return set()\n elif isinstance(e, Var):\n return {e.name}\n elif isinstance(e, Not):\n return free(e.expr)\n elif isinstance(e, And):\n return set().union(*[free(expr) for expr in e.exprs])\n elif isinstance(e, Or):\n return set().union(*[free(expr) for expr in e.exprs])\n elif isinstance(e, Impl):\n return free(e.p).union(free(e.q))\n else:\n raise TypeError(\"Invalid expression type\")\n\n\ndef any_var(e: Expr) -> str | None:\n variables: list[str] = sorted(list(free(e)))\n if len(variables) == 0:\n return None\n else:\n return variables[0]\n\n\n# loom:end(free)\n\n# loom:start(solver)\nBindings = dict[str, bool]\n\n\ndef solve(e: Expr) -> Bindings | None:\n return solver(e, {})\n\n\ndef solver(e: Expr, bs: Bindings) -> Bindings | None:\n free_var = any_var(e)\n if free_var is None:\n if eval_expr(e):\n return bs\n else:\n return None\n else:\n # Replace with True.\n t: Expr = replace(e, free_var, True)\n t_bs: Bindings = dict(bs)\n t_bs[free_var] = True\n # Replace with False.\n f: Expr = replace(e, free_var, False)\n f_bs: Bindings = dict(bs)\n f_bs[free_var] = False\n # Solve both branches, and return the first one that works.\n return solver(t, t_bs) or solver(f, f_bs)\n\n\n# loom:end(solver)\n\n\ndef string_of_expr(e: Expr) -> str:\n if isinstance(e, FalseExpr):\n return r\"\\mathbf{F}\"\n elif isinstance(e, TrueExpr):\n return r\"\\mathbf{T}\"\n elif isinstance(e, Var):\n return r\"\\text{\" + e.name + \"}\"\n elif isinstance(e, Not):\n return r\"\\neg\" + string_of_expr(e.expr)\n elif isinstance(e, And):\n return \"(\" + r\" \\land \".join(string_of_expr(expr) for expr in e.exprs) + \")\"\n elif isinstance(e, Or):\n return \"(\" + r\" \\lor \".join(string_of_expr(expr) for expr in e.exprs) + \")\"\n elif isinstance(e, Impl):\n return \"(\" + string_of_expr(e.p) + r\"\\implies\" + string_of_expr(e.q) + \")\"\n else:\n raise TypeError(\"Invalid expression type\")\n\n\n#\n# Example\n#\n\n# loom:start(deps)\nfrom dataclasses import dataclass\n\n\n@dataclass(frozen=True)\nclass Dependency:\n name: str\n minimum: int\n maximum: int\n\n\n@dataclass(frozen=True)\nclass Package:\n name: str\n version: int\n depends_on: list[Dependency]\n\n\npackages: list[Package] = [\n Package(\n \"app\",\n 0,\n [\n Dependency(\"sql\", 2, 2),\n Dependency(\"threads\", 2, 2),\n Dependency(\"http\", 3, 4),\n Dependency(\"stdlib\", 4, 4),\n ],\n ),\n Package(\"sql\", 0, []),\n Package(\"sql\", 1, [Dependency(\"stdlib\", 1, 4), Dependency(\"threads\", 1, 1)]),\n Package(\"sql\", 2, [Dependency(\"stdlib\", 2, 4), Dependency(\"threads\", 1, 2)]),\n Package(\"threads\", 0, [Dependency(\"stdlib\", 2, 4)]),\n Package(\"threads\", 1, [Dependency(\"stdlib\", 2, 4)]),\n Package(\"threads\", 2, [Dependency(\"stdlib\", 3, 4)]),\n Package(\"http\", 0, [Dependency(\"stdlib\", 0, 3)]),\n Package(\"http\", 1, [Dependency(\"stdlib\", 0, 3)]),\n Package(\"http\", 2, [Dependency(\"stdlib\", 1, 4)]),\n Package(\"http\", 3, [Dependency(\"stdlib\", 2, 4)]),\n Package(\"http\", 4, [Dependency(\"stdlib\", 3, 4)]),\n]\n# loom:end(deps)\n\n# loom:start(convert)\nfrom itertools import combinations\n\n\ndef convert(root: str, packages: list[Package]) -> Expr:\n \"\"\"\n Given a package-version to use as the root of the build DAG, and a list of\n package dependency constraints, convert them into a logical expression.\n \"\"\"\n # First things first: we need the root package to be part of the assignment.\n terms: list[Expr] = [Var(root)]\n # Add implications.\n for p in packages:\n # Package versions imply their dependencies.\n for dep in p.depends_on:\n versions: list[int] = list(range(dep.minimum, dep.maximum + 1))\n deps: list[Expr] = [package_var(dep.name, v) for v in versions]\n impl: Impl = Impl(package_var(p.name, p.version), Or(deps))\n terms.append(impl)\n # Exclude every pair of versions. We do this by taking the set.of free\n # variables in the expression built up so far, and for each package depended\n # upon, we find all the versions mentioned for it and pairwise exclude them.\n variables: set[str] = free(And(terms))\n varnames: set[str] = set([var_name(v) for v in variables])\n for name in varnames:\n vers: set[int] = {var_version(v) for v in variables if var_name(v) == name}\n for a, b in all_combinations(vers):\n terms.append(Not(And([package_var(name, a), package_var(name, b)])))\n # Finally, return the built up expression as a conjunction.\n return And(terms)\n\n\ndef package_var(name: str, version: int) -> Var:\n return Var(f\"{name}-v{version}\")\n\n\ndef var_name(var: str) -> str:\n return var.split(\"-v\")[0]\n\n\ndef var_version(var: str) -> int:\n return int(var.split(\"-v\")[1])\n\n\ndef all_combinations(lst: set[int]) -> list[tuple[int, int]]:\n return list(combinations(lst, 2))\n\n\n# loom:end(convert)\n\n# loom:start(conversion)\nformula: Expr = convert(\"app-v0\", packages)\n# loom:end(conversion)\n\n\ndef pretty_print(expr: And):\n print(\"$$\")\n print(r\"\\begin{align*}\")\n for e in expr.exprs:\n print(f\"\\\\land ~~ &{string_of_expr(e)} \\\\\\\\\")\n print(r\"\\end{align*}\")\n print(\"$$\")\n\n\npretty_print(formula)\n\n# loom:start(solution)\nbs: Bindings | None = solve(formula)\nif bs is not None:\n for k, v in sorted(bs.items(), key=lambda p: p[0]):\n print(k, v)\n# loom:end(solution)\n\nif bs is not None:\n print(\"| Variable | Value |\")\n print(\"| -------- | ----- |\")\n for k, v in sorted(bs.items(), key=lambda p: p[0]):\n b: str = r\"\\true\" if v else r\"\\false\"\n print(f\"| `{k}` | ${b}$ |\")\n\nif bs is not None:\n print(\"| Package | Version |\")\n print(\"| ------- | ------- |\")\n for k, v in sorted(bs.items(), key=lambda p: p[0]):\n if v:\n print(f\"| `{var_name(k)}` | {var_version(k)} |\")\n\n# loom:start(any_var_latest)\ndef any_var_latest(e: Expr) -> str | None:\n # Sort variables alphabetically, for determinism.\n variables: list[str] = sorted(list(free(e)))\n if len(variables) == 0:\n return None\n else:\n # Return the last one, the highest version number for the\n # (alphabetically) last package.\n return variables[-1]\n# loom:end(any_var_latest)\n\n# loom:start(solver_latest)\nBindings = dict[str, bool]\n\n\ndef solve_latest(e: Expr) -> Bindings | None:\n return solver_latest(e, {})\n\n\ndef solver_latest(e: Expr, bs: Bindings) -> Bindings | None:\n free_var = any_var_latest(e)\n if free_var is None:\n if eval_expr(e):\n return bs\n else:\n return None\n else:\n # Replace with True.\n t: Expr = replace(e, free_var, True)\n t_bs: Bindings = dict(bs)\n t_bs[free_var] = True\n # Replace with False.\n f: Expr = replace(e, free_var, False)\n f_bs: Bindings = dict(bs)\n f_bs[free_var] = False\n # Solve both branches, and return the first one that works.\n return solver_latest(t, t_bs) or solver_latest(f, f_bs)\n# loom:end(solver_latest)\n\nlatest_bs: Bindings | None = solve_latest(formula)\nif latest_bs is not None:\n print(\"| Package | Version |\")\n print(\"| ------- | ------- |\")\n for k, v in sorted(latest_bs.items(), key=lambda p: p[0]):\n if v:\n print(f\"| `{var_name(k)}` | {var_version(k)} |\")\n","repo_name":"eudoxia0/eudoxia0.github.io","sub_path":"assets/content/dependency-resolution-made-simple/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9666,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"}
+{"seq_id":"33946964653","text":"from server.helper.node import Node\r\nfrom math import sqrt\r\n# Program membaca file input\r\n\r\n# menghitung jarak heuristik dengan euclidean\r\ndef euclidean(node1, node2):\r\n coord1 = node1.getCoord()\r\n coord2 = node2.getCoord()\r\n x = (coord1[0] - coord2[0])**2\r\n y = (coord1[1] - coord2[1])**2\r\n return round(sqrt(x + y), 2)\r\n\r\ndef read(filename):\r\n file1 = open(filename, 'r')\r\n\r\n # bikin array of node kosong\r\n graf = []\r\n # baca baris pertama file yaitu jumlah node\r\n jmlNode = int(file1.readline())\r\n # loop sebanyak jumlah node, masukin ke array of node\r\n for i in range(jmlNode):\r\n line = file1.readline().rstrip(\"\\n\").split(\" \")\r\n x = float(line[0])\r\n y = float(line[1])\r\n name = line[2]\r\n graf.append(Node(name, (x, y), None))\r\n\r\n # mengisi atribut connected\r\n for i in range(jmlNode):\r\n # menghasilkan array line dengan len = jmlNode\r\n line = file1.readline().rstrip(\"\\n\").split(\" \")\r\n for j in range(len(line)):\r\n if line[j] != '0':\r\n # masukkan ke connected, contoh format (A,2)\r\n name = graf[j].getName()\r\n bobot = euclidean(graf[i],graf[j])\r\n graf[i].appendToconnected((name, bobot))\r\n\r\n return graf\r\n","repo_name":"ahanprojects/StimaTucil3","sub_path":"src/server/helper/fileinput.py","file_name":"fileinput.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"22353527333","text":"#!/usr/bin/env python3\n\nfrom __future__ import annotations\nimport enum\nfrom typing import Dict, NamedTuple, Optional, Tuple\n\n\nclass Board(NamedTuple):\n rows: int\n cols: int\n east: Set[Tuple[int, int]]\n south: Set[Tuple[int, int]]\n\n def _coords(self, row: int, col: int) -> Tuple[int, int]:\n return (\n row % self.rows,\n col % self.cols,\n )\n\n def nextgen(self) -> Board:\n neast = set()\n nsouth = set()\n # First, east facing.\n for (row, col) in self.east:\n nxt = self._coords(row, col + 1)\n if nxt in self.east or nxt in self.south:\n neast.add((row, col))\n else:\n neast.add(nxt)\n # Then, south facing\n for (row, col) in self.south:\n nxt = self._coords(row + 1, col)\n if nxt in self.south or nxt in neast:\n nsouth.add((row, col))\n else:\n nsouth.add(nxt)\n return Board(self.rows, self.cols, neast, nsouth)\n\n @classmethod\n def parse(cls, data: str) -> Board:\n lines = data.split(\"\\n\")[:-1]\n east = set()\n south = set()\n for row, line in enumerate(lines):\n for col, char in enumerate(line):\n if char == \">\":\n east.add((row, col))\n elif char == \"v\":\n south.add((row, col))\n return cls(len(lines), len(lines[0]), east, south)\n\n def __repr__(self) -> str:\n res = \"\"\n for row in range(self.rows):\n for col in range(self.cols):\n if (row, col) in self.east:\n res += \">\"\n elif (row, col) in self.south:\n res += \"v\"\n else:\n res += \".\"\n res += \"\\n\"\n return res\n\n\n\n\nwith open(\"data/25.txt\") as f:\n data = f.read()\n\nboard = Board.parse(data)\nnxt = board.nextgen()\ni = 1\nwhile board != nxt:\n board = nxt\n nxt = board.nextgen()\n i += 1\nprint(i)\n","repo_name":"kdungs/adventofcode","sub_path":"2021/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"14511055605","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.spatial import distance\nimport cv2\n\n\ndef FuzzyCMeans(data, k=2, f=2):\n '''\n Fuzzy C Means Clustering Algorithm\n Original source: https://www.youtube.com/watch?v=FA-hJBu5Bkc by The Academician\n\n Inputs:\n - data: numpy array of dimension n * d\n - n: number of datapoints\n - d: number of dimensions of a datapoint (x, y, z, ...)\n - k: (int, default=2) number of clusters\n - f: (int, default=2) \"fuzziness\" or softness of clustering\n - As f -> infinity, more cluster \"sharing\" (datapoints being in many clusters) occurs\n\n Outputs:\n - centers (numpy array): positions of the clusters\n - datapoint_clusters: datapoints and their corresponding clusters\n - numpy array of dimension n * (d+1) as the extra dimension holds the number of the cluster\n '''\n\n num_datapoints = len(data)\n num_dimensions = len(data[0]) # x, y, z, ...\n mem_values = np.random.dirichlet(np.ones(k), num_datapoints) #dirichlet --> make random mem_values that add to 1 basically\n centers = np.zeros((k, num_dimensions)) # initalize k centers with same dimensions as datapoints, all 0s\n\n # Calculate centroids\n for j in range(k):\n # Sum of all membership values (from datapoints) for a cluster j\n mem_sum = sum(np.power(mem_values[:,j], f)) \n data_mem_sum = 0\n for i in range(num_datapoints):\n # Multiplying the membership values of the datapoint by the datapoint's x, y values\n dp_sum = np.multiply(np.power(mem_values[i, j], f), data[i, :])\n data_mem_sum += dp_sum\n centroid_pos = data_mem_sum / mem_sum\n centers[j] = np.reshape(centroid_pos, num_dimensions) # Update centers positions\n # Recalculate the membership values\n for i in range(num_datapoints):\n # Calculate the total distance to ALL clusters (using Euclidean distance)\n total_dist = 0\n for j in range(k):\n total_dist += np.power(1/distance.euclidean(centers[j, 0:num_dimensions], data[i, 0:num_dimensions]), 1/(f-1))\n # New membership value is equal to the euclidean distance from a datapoint i to cluster j\n # divided by the total distance to all clusters from the same datapoint\n for j in range(k):\n new_weight = np.power((1/distance.euclidean(centers[j, 0:num_dimensions], data[i, 0:num_dimensions])), 1/(f-1)) / total_dist\n mem_values[i,j] = new_weight\n # Decide on a datapoint's primary cluster based on these updated values\n addZeros = np.zeros((num_datapoints, 1))\n datapoint_clusters = np.append(data, addZeros, axis=1)\n for i in range(num_datapoints):\n cluster_num = np.where(mem_values[i] == np.amax(mem_values[i]))\n datapoint_clusters[i, num_dimensions-1] = cluster_num[0]\n return centers, datapoint_clusters\n\n","repo_name":"linschris/clustering-algorithms","sub_path":"code/examples/implementations/FuzzyCMeans.py","file_name":"FuzzyCMeans.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"29415309953","text":"import os\nimport time\n\nimport cv2\n\n# from PIL import Image\nts = time.time()\nts = time.time()\n\n# cut_frame=\"/home/mayank_sati/Desktop/crop_image/\"\n# cut_frame=cut_frame+\"image_\"+str(st)+\".jpg\"\n# cv2.imwrite(cut_frame, frame)\n\nsaved_path = \"/home/mayank_sati/Desktop/sorting_light/complete_image_with_diff_name/black/\"\ninput_folder = root = \"/home/mayank_sati/Desktop/sorting_light/complte_data/black\"\nfor root, _, filenames in os.walk(input_folder):\n if (len(filenames) == 0):\n print(\"Input folder is empty\")\n # return 1\n # time_start = time.time()\n for filename in filenames:\n image_path = os.path.join(root, filename)\n image_scale = cv2.imread(image_path, 1)\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%d_%m_%Y_%H_%M_%S_%f')\n cut_frame = saved_path + \"image_\" + str(st) + \".jpg\"\n cv2.imwrite(cut_frame, image_scale)\n","repo_name":"mayanks888/AI","sub_path":"Python/python_code/changefile_name.py","file_name":"changefile_name.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"72207054226","text":"import sys\n\nif __name__ == '__main__':\n N = int(sys.stdin.readline())\n res = 0\n tmp = N - len(str(N)) * 9\n if tmp < 0:\n tmp = 1\n\n for i in range(tmp, N + 1):\n num_list = list(map(int, str(i)))\n res = i + sum(num_list)\n if res == N:\n print(i)\n exit(0)\n\n if i == N:\n print(0)\n","repo_name":"camp5803/data_structure_c_py","sub_path":"boj/class_2/2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73111429906","text":"def print_formatted(number):\r\n # your code goes here\r\n w = len (bin(number)[2:])\r\n for i in range(1, number + 1):\r\n decimal = str(i)\r\n octal = oct(i)[2:]\r\n hexadecimal = hex(i)[2:].upper()\r\n binary = bin(i)[2:]\r\n \r\n print(decimal.rjust(w), octal.rjust(w), hexadecimal.rjust(w), binary.rjust(w))\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n print_formatted(n)","repo_name":"Rmn0Fz/HackerRank_Practice","sub_path":"Hacker_Rank/python-string-formatting.py","file_name":"python-string-formatting.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"70206817107","text":"'''open .bin files in subfolders, with imv\n\n** for single-band files only!'''\nimport os\nimport sys\nsep = os.path.sep\nfrom misc import run, args\n\nprint('python3 bin.py')\n\ncmd = 'find ./ -name \"*.bin\"'\nX = [x.strip() for x in os.popen(cmd).readlines()]\nX.sort()\n\nfor x in X:\n fn = x\n cmd = 'imv ' + fn\n run(cmd)\n","repo_name":"bcgov/wps-research","sub_path":"py/bin.py","file_name":"bin.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"}
+{"seq_id":"70162676946","text":"import sys \nsys.path.append(\"../Controlador\")\nfrom Camaras import Camaras \nfrom conexion import Conexion\nimport pandas as pd\nSELECCIONAR='SELECT * FROM Camaras'\nELIMINAR='DELETE FROM Camaras WHERE id_camara=?'\nINSERTAR='INSERT INTO Camaras(id_camara,id_parqueadero,numero,area,longitud) VALUES(?,?,?,?,?)'\nMODIFICAR=\"UPDATE Camaras set numero=?, area=?, longitud=? where id_camara=?\"\n\nclass Camaras_D:\n\tdef INSERTAR(Camaras):\n\t\tconexion=Conexion.ObtenerConexion()\n\t\tcursor= Conexion.ObtenerCursor(conexion)\n\t\tvalor=[Camaras.id_camara,Camaras.id_parqueadero,Camaras.numero,Camaras.area,Camaras.longitud]\n\t\tcursor.execute(INSERTAR,valor)\n\t\tconexion.commit()\n\t\tprint(\"el registro se guardo\")\n\t\tconexion.close()\n\tdef MODIFICAR(dato):\n\t\tconexion=Conexion.ObtenerConexion()\n\t\tcursor= Conexion.ObtenerCursor(conexion)\n\t\tcursor.execute(MODIFICAR,dato)\n\t\tconexion.commit()\n\t\tprint(\"el registro se actualizo\")\n\t\tconexion.close()\n\tdef ELIMINAR(dato):\n\t\tconexion=Conexion.ObtenerConexion()\n\t\tcursor= Conexion.ObtenerCursor(conexion)\n\t\tcursor.execute(ELIMINAR,dato)\n\t\tconexion.commit()\n\t\tprint(\"el registro se ELIMINO\")\n\t\tconexion.close()\n\tdef consultar():\n\t\tconexion=Conexion.ObtenerConexion()\n\t\tcursor= Conexion.ObtenerCursor(conexion)\n\t\tcursor.execute(SELECCIONAR)\n\t\tdato=cursor.fetchall()\n\t\tconexion.commit()\n\t\tconexion.close()\n\t\treturn dato\t\n\nif __name__ == '__main__':\n\t#Cam1=Camaras(2,5,9,93,56)\n\t#Camaras_D.INSERTAR(Cam1)\n\n\t#dato=(100,50,25,1)\n\t#Camaras_D.MODIFICAR(dato)\n\n\t#dato=(2,)\n\t#Camaras_D.ELIMINAR(dato)\n\n\n\tdatoz=Camaras_D.consultar()\n\tdf=pd.DataFrame(datoz,columns=[\"ID Camaras\",\"ID Parqueadero\",\"Numero\",\"Area\",\"Longitud\"])\n\tprint(df)","repo_name":"santiagoHernandezM/Proyecto-parqueadero-python","sub_path":"Modelo/Camaras_D.py","file_name":"Camaras_D.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"38900244570","text":"import numpy as np\nimport dask\nimport dask.array as dsa\nfrom dask.base import tokenize, normalize_token\nimport xarray as xr\nimport warnings\n\nfrom .duck_array_ops import concatenate\nfrom .shrunk_index import all_index_data\n\ndef _get_var_metadata():\n # The LLC run data comes with zero metadata. So we import metadata from\n # the xmitgcm package.\n from ..variables import state_variables, package_state_variables\n from ..utils import parse_available_diagnostics\n from ..default_diagnostics import diagnostics\n from io import StringIO\n\n diag_file = StringIO(diagnostics)\n available_diags = parse_available_diagnostics(diag_file)\n var_metadata = state_variables\n var_metadata.update(package_state_variables)\n var_metadata.update(available_diags)\n\n # even the file names from the LLC data differ from standard MITgcm output\n aliases = {'Eta': 'ETAN', 'PhiBot': 'PHIBOT', 'Salt': 'SALT',\n 'Theta': 'THETA'}\n for a, b in aliases.items():\n var_metadata[a] = var_metadata[b]\n\n return var_metadata\n\n_VAR_METADATA = _get_var_metadata()\n\ndef _get_variable_point(vname):\n dims = _VAR_METADATA[vname]['dims']\n if 'i' in dims and 'j' in dims:\n point = 'c'\n elif 'i_g' in dims and 'j' in dims:\n point = 'w'\n elif 'i' in dims and 'j_g' in dims:\n point = 's'\n elif 'i_g' in dims and 'j_g' in dims:\n raise ValueError(\"Don't have masks for corner points!\")\n else:\n raise ValueError(\"Variable `%s` is not a horizontal variable.\" % vname)\n return point\n\ndef _get_scalars_and_vectors(varnames, type):\n\n for vname in varnames:\n if vname not in _VAR_METADATA:\n raise ValueError(\"Varname `%s` not found in metadata.\" % vname)\n\n if type != 'latlon':\n return varnames, []\n\n scalars = []\n vector_pairs = []\n for vname in varnames:\n meta = _VAR_METADATA[vname]\n try:\n mate = meta['attrs']['mate']\n if mate not in varnames:\n raise ValueError(\"Vector pairs are required to create \"\n \"latlon type datasets. Varname `%s` is \"\n \"missing its vector mate `%s`\"\n % vname, mate)\n vector_pairs.append((vname, mate))\n varnames.remove(mate)\n except KeyError:\n scalars.append(vname)\n\ndef _decompress(data, mask, dtype):\n data_blank = np.full_like(mask, np.nan, dtype=dtype)\n data_blank[mask] = data\n data_blank.shape = mask.shape\n return data_blank\n\n\n\n_facet_strides = ((0,3), (3,6), (6,7), (7,10), (10,13))\n# whether to reshape each face\n_facet_reshape = (False, False, False, True, True)\n_nfaces = 13\n_nfacets = 5\n\ndef _uncompressed_facet_index(nfacet, nside):\n face_size = nside**2\n start = _facet_strides[nfacet][0] * face_size\n end = _facet_strides[nfacet][1] * face_size\n return start, end\n\ndef _facet_shape(nfacet, nside):\n facet_length = _facet_strides[nfacet][1] - _facet_strides[nfacet][0]\n if _facet_reshape[nfacet]:\n facet_shape = (1, nside, facet_length*nside)\n else:\n facet_shape = (1, facet_length*nside, nside)\n return facet_shape\n\ndef _facet_to_faces(data, nfacet):\n shape = data.shape\n # facet dimension\n nf, ny, nx = shape[-3:]\n other_dims = shape[:-3]\n assert nf == 1\n facet_length = _facet_strides[nfacet][1] - _facet_strides[nfacet][0]\n if _facet_reshape[nfacet]:\n new_shape = other_dims + (ny, facet_length, nx / facet_length)\n data_rs = data.reshape(new_shape)\n data_rs = np.moveaxis(data_rs, -2, -3) # dask-safe\n else:\n new_shape = other_dims + (facet_length, ny / facet_length, nx)\n data_rs = data.reshape(new_shape)\n return data_rs\n\ndef _facets_to_faces(facets):\n all_faces = []\n for nfacet, data_facet in enumerate(facets):\n data_rs = _facet_to_faces(data_facet, nfacet)\n all_faces.append(data_rs)\n return concatenate(all_faces, axis=-3)\n\ndef _faces_to_facets(data, facedim=-3):\n assert data.shape[facedim] == _nfaces\n facets = []\n for nfacet, (strides, reshape) in enumerate(zip(_facet_strides, _facet_reshape)):\n face_data = [data[(...,) + (slice(nface, nface+1), slice(None), slice(None))]\n for nface in range(*strides)]\n if reshape:\n concat_axis = facedim + 2\n else:\n concat_axis = facedim + 1\n # todo: use duck typing for concat\n facet_data = concatenate(face_data, axis=concat_axis)\n facets.append(facet_data)\n return facets\n\n\ndef _rotate_scalar_facet(facet):\n facet_transposed = np.moveaxis(facet, -1, -2)\n facet_rotated = np.flip(facet_transposed, -2)\n return facet_rotated\n\n\ndef _facets_to_latlon_scalar(all_facets):\n rotated = (all_facets[:2]\n + [_rotate_scalar_facet(facet) for facet in all_facets[-2:]])\n # drop facet dimension\n rotated = [r[..., 0, :, :] for r in rotated]\n return concatenate(rotated, axis=-1)\n\n\ndef _faces_to_latlon_scalar(data):\n data_facets = _faces_to_facets(data)\n return _facets_to_latlon_scalar(data_facets)\n\n\n# dask's pad function doesn't work\n# it does weird things to non-pad dimensions\n# need to roll our own\ndef shift_and_pad(a):\n a_shifted = a[..., 1:]\n pad_array = dsa.zeros_like(a[..., -2:-1])\n return concatenate([a_shifted, pad_array], axis=-1)\n\ndef transform_v_to_u(facet):\n return _rotate_scalar_facet(facet)\n\ndef transform_u_to_v(facet, metric=False):\n # \"shift\" u component by 1 pixel\n pad_width = (facet.ndim - 1) * (None,) + ((1, 0),)\n #facet_padded = dsa.pad(facet[..., 1:], pad_width, 'constant')\n facet_padded = shift_and_pad(facet)\n assert facet.shape == facet_padded.shape\n facet_rotated = _rotate_scalar_facet(facet_padded)\n if not metric:\n facet_rotated = -facet_rotated\n return facet_rotated\n\ndef _facets_to_latlon_vector(facets_u, facets_v, metric=False):\n # need to pad the rotated v values\n ndim = facets_u[0].ndim\n # second-to-last axis is the one to pad, plus a facet axis\n assert ndim >= 3\n\n # drop facet dimension\n facets_u_drop = [f[..., 0, :, :] for f in facets_u]\n facets_v_drop = [f[..., 0, :, :] for f in facets_v]\n\n u_rot = (facets_u_drop[:2]\n + [transform_v_to_u(facet) for facet in facets_v_drop[-2:]])\n v_rot = (facets_v_drop[:2]\n + [transform_u_to_v(facet, metric) for facet in facets_u_drop[-2:]])\n\n u = concatenate(u_rot, axis=-1)\n v = concatenate(v_rot, axis=-1)\n return u, v\n\ndef _faces_to_latlon_vector(u_faces, v_faces, metric=False):\n u_facets = _faces_to_facets(u_faces)\n v_facets = _faces_to_facets(v_faces)\n u, v = _facets_to_latlon_vector(u_facets, v_facets, metric=metric)\n return u, v\n\ndef _drop_facedim(dims):\n dims = list(dims)\n dims.remove('face')\n return dims\n\ndef _add_face_to_dims(dims):\n new_dims = dims.copy()\n if 'j' in dims:\n j_dim = dims.index('j')\n new_dims.insert(j_dim, 'face')\n elif 'j_g' in dims:\n j_dim = dims.index('j_g')\n new_dims.insert(j_dim, 'face')\n return new_dims\n\ndef _faces_coords_to_latlon(ds):\n coords = ds.reset_coords().coords.to_dataset()\n ifac = 4\n jfac = 3\n dim_coords = {}\n for vname in coords.coords:\n if vname[0] == 'i':\n data = np.arange(ifac * coords.dims[vname])\n elif vname[0] == 'j':\n data = np.arange(jfac * coords.dims[vname])\n else:\n data = coords[vname].data\n var = xr.Variable(ds[vname].dims, data, ds[vname].attrs)\n dim_coords[vname] = var\n return xr.Dataset(dim_coords)\n\ndef faces_dataset_to_latlon(ds, metric_vector_pairs=[('dxC', 'dyC'), ('dyG', 'dxG')]):\n \"\"\"Transform a 13-face LLC xarray Dataset into a rectancular grid,\n discarding the Arctic.\n\n Parameters\n ----------\n ds : xarray.Dataset\n A 13-face LLC dataset\n metric_vector_pairs : list, optional\n Pairs of variables that are positive-definite metrics located at grid\n edges.\n\n Returns\n -------\n out : xarray.Dataset\n Transformed rectangular dataset\n \"\"\"\n\n coord_vars = list(ds.coords)\n ds_new = _faces_coords_to_latlon(ds)\n\n vector_pairs = []\n scalars = []\n vnames = list(ds.reset_coords().variables)\n for vname in vnames:\n try:\n mate = ds[vname].attrs['mate']\n vector_pairs.append((vname, mate))\n vnames.remove(mate)\n except KeyError:\n pass\n\n all_vector_components = [inner for outer in (vector_pairs + metric_vector_pairs)\n for inner in outer]\n scalars = [vname for vname in vnames if vname not in all_vector_components]\n data_vars = {}\n\n for vname in scalars:\n if vname=='face' or vname in ds_new:\n continue\n if 'face' in ds[vname].dims:\n data = _faces_to_latlon_scalar(ds[vname].data)\n dims = _drop_facedim(ds[vname].dims)\n else:\n data = ds[vname].data\n dims = ds[vname].dims\n data_vars[vname] = xr.Variable(dims, data, ds[vname].attrs)\n\n for vname_u, vname_v in vector_pairs:\n data_u, data_v = _faces_to_latlon_vector(ds[vname_u].data, ds[vname_v].data)\n data_vars[vname_u] = xr.Variable(_drop_facedim(ds[vname_u].dims), data_u, ds[vname_u].attrs)\n data_vars[vname_v] = xr.Variable(_drop_facedim(ds[vname_v].dims), data_v, ds[vname_v].attrs)\n for vname_u, vname_v in metric_vector_pairs:\n data_u, data_v = _faces_to_latlon_vector(ds[vname_u].data, ds[vname_v].data, metric=True)\n data_vars[vname_u] = xr.Variable(_drop_facedim(ds[vname_u].dims), data_u, ds[vname_u].attrs)\n data_vars[vname_v] = xr.Variable(_drop_facedim(ds[vname_v].dims), data_v, ds[vname_v].attrs)\n\n\n ds_new = ds_new.update(data_vars)\n ds_new = ds_new.set_coords([c for c in coord_vars if c in ds_new])\n return ds_new\n\n\n# below are data transformers\n\ndef _all_facets_to_faces(data_facets, meta):\n return {vname: _facets_to_faces(data)\n for vname, data in data_facets.items()}\n\n\ndef _all_facets_to_latlon(data_facets, meta):\n\n vector_pairs = []\n scalars = []\n vnames = list(data_facets)\n for vname in vnames:\n try:\n mate = meta[vname]['attrs']['mate']\n vector_pairs.append((vname, mate))\n vnames.remove(mate)\n except KeyError:\n pass\n\n all_vector_components = [inner for outer in vector_pairs for inner in outer]\n scalars = [vname for vname in vnames if vname not in all_vector_components]\n\n data = {}\n for vname in scalars:\n data[vname] = _facets_to_latlon_scalar(data_facets[vname])\n\n for vname_u, vname_v in vector_pairs:\n data_u, data_v = _facets_to_latlon_vector(data_facets[vname_u],\n data_facets[vname_v])\n data[vname_u] = data_u\n data[vname_v] = data_v\n\n return data\n\ndef _chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef _get_facet_chunk(store, varname, iternum, nfacet, klevels, nx, nz, dtype):\n fs, path = store.get_fs_and_full_path(varname, iternum)\n file = fs.open(path)\n\n assert (nfacet >= 0) & (nfacet < _nfacets)\n\n try:\n # workaround for ecco data portal\n file = fs.open(path, size_policy='get')\n except TypeError:\n file = fs.open(path)\n\n # insert singleton axis for time and k level\n facet_shape = (1, 1,) + _facet_shape(nfacet, nx)\n\n level_data = []\n\n # TODO: get index\n # the store tells us whether we need a mask or not\n point = _get_variable_point(varname)\n if store.shrunk:\n index = all_index_data[nx][point]\n zgroup = store.open_mask_group()\n mask = zgroup['mask_' + point].astype('bool')\n else:\n index = None\n mask = None\n\n for k in klevels:\n assert (k >= 0) & (k < nz)\n\n # figure out where in the file we have to read to get the data\n # for this level and facet\n if index:\n i = np.ravel_multi_index((k, nfacet), (nz, _nfacets))\n start = index[i]\n end = index[i+1]\n else:\n level_start = k * nx**2 * _nfaces\n facet_start, facet_end = _uncompressed_facet_index(nfacet, nx)\n start = level_start + facet_start\n end = level_start + facet_end\n\n read_offset = start * dtype.itemsize # in bytes\n read_length = (end - start) * dtype.itemsize # in bytes\n file.seek(read_offset)\n buffer = file.read(read_length)\n data = np.frombuffer(buffer, dtype=dtype)\n assert len(data) == (end - start)\n\n if mask:\n mask_level = mask[k]\n mask_facets = _faces_to_facets(mask_level)\n this_mask = mask_facets[nfacet]\n data = _decompress(data, this_mask, dtype)\n\n # this is the shape this facet is supposed to have\n data.shape = facet_shape\n level_data.append(data)\n\n return np.concatenate(level_data, axis=1)\n\n\nclass BaseLLCModel:\n \"\"\"Class representing an LLC Model Dataset.\n\n Parameters\n ----------\n store : llcreader.BaseStore\n The store object where the data can be found\n mask_ds : zarr.Group\n Must contain variables `mask_c`, `masc_w`, `mask_s`\n\n Attributes\n ----------\n dtype : numpy.dtype\n Datatype of the data in the dataset\n nx : int\n Number of gridpoints per face (e.g. 90, 1080, 4320, etc.)\n nz : int\n Number of vertical gridpoints\n delta_t : float\n Numerical timestep\n time_units : str\n Date unit string, e.g 'seconds since 1948-01-01 12:00:00'\n iter_start : int\n First model iteration number (inclusive; follows python range conventions)\n iter_stop : int\n Final model iteration number (exclusive; follows python range conventions)\n iter_step : int\n Spacing between iterations\n varnames : list\n List of variable names contained in the dataset\n \"\"\"\n\n nface = 13\n dtype = np.dtype('>f4')\n # should be implemented by child classes\n nx = None\n nz = None\n delta_t = None\n time_units = None\n iter_start = None\n iter_stop = None\n iter_step = None\n varnames = []\n\n def __init__(self, store):\n \"\"\"Initialize model\n\n Parameters\n ----------\n store : llcreader.BaseStore\n mask_ds : zarr.Group\n Must contain variables `mask_c`, `masc_w`, `mask_s`\n \"\"\"\n self.store = store\n self.shape = (self.nz, self.nface, self.nx, self.nx)\n if self.store.shrunk:\n self.masks = self._get_masks()\n from .shrunk_index import all_index_data\n self.indexes = all_index_data[self.nx]\n else:\n self.masks = None\n self.indexes = None\n\n\n def _get_masks(self):\n masks = {}\n zgroup = self.store.open_mask_group()\n for point in ['c', 'w', 's']:\n mask_faces = dsa.from_zarr(zgroup['mask_' + point]).astype('bool')\n masks[point] = _faces_to_facets(mask_faces)\n return masks\n\n\n def _make_coords_faces(self, all_iters):\n time = self.delta_t * all_iters\n time_attrs = {'units': self.time_units,\n 'calendar': self.calendar}\n coords = {'face': ('face', np.arange(self.nface)),\n 'i': ('i', np.arange(self.nx)),\n 'i_g': ('i_g', np.arange(self.nx)),\n 'j': ('j', np.arange(self.nx)),\n 'j_g': ('j_g', np.arange(self.nx)),\n 'k': ('k', np.arange(self.nz)),\n 'k_u': ('k_u', np.arange(self.nz)),\n 'k_l': ('k_l', np.arange(self.nz)),\n 'k_p1': ('k_p1', np.arange(self.nz + 1)),\n 'niter': ('time', all_iters),\n 'time': ('time', time, time_attrs)\n }\n return xr.decode_cf(xr.Dataset(coords=coords))\n\n\n def _make_coords_latlon():\n ds = self._make_coords_faces(self)\n return _faces_coords_to_latlon(ds)\n\n\n def _get_mask_and_index_for_variable(self, vname):\n if self.masks is None:\n return None, None\n\n dims = _VAR_METADATA[vname]['dims']\n if 'i' in dims and 'j' in dims:\n point = 'c'\n elif 'i_g' in dims and 'j' in dims:\n point = 'w'\n elif 'i' in dims and 'j_g' in dims:\n point = 's'\n elif 'i_g' in dims and 'j_g' in dims:\n raise ValueError(\"Don't have masks for corner points!\")\n else:\n # this is not a 2D variable\n return None, None\n\n mask = self.masks[point]\n index = self.indexes[point]\n return mask, index\n\n\n def _dask_array(self, nfacet, varname, iters, klevels, k_chunksize):\n # return a dask array for a single facet\n facet_shape = _facet_shape(nfacet, self.nx)\n time_chunks = (len(iters) * (1,),)\n k_chunks = (tuple([len(c)\n for c in _chunks(klevels, k_chunksize)]),)\n chunks = time_chunks + k_chunks + tuple([(s,) for s in facet_shape])\n\n # manually build dask graph\n dsk = {}\n token = tokenize(varname, self.store, nfacet)\n name = '-'.join([varname, token])\n for n_iter, iternum in enumerate(iters):\n for n_k, these_klevels in enumerate(_chunks(klevels, k_chunksize)):\n key = name, n_iter, n_k, 0, 0, 0\n task = (_get_facet_chunk, self.store, varname, iternum,\n nfacet, these_klevels, self.nx, self.nz, self.dtype)\n dsk[key] = task\n\n return dsa.Array(dsk, name, chunks, self.dtype)\n\n\n def _get_facet_data(self, varname, iters, klevels, k_chunksize):\n mask, index = self._get_mask_and_index_for_variable(varname)\n # needs facets to be outer index of nested lists\n dims = _VAR_METADATA[varname]['dims']\n\n if len(dims)==2:\n klevels = [0,]\n\n data_facets = [self._dask_array(nfacet, varname, iters, klevels, k_chunksize)\n for nfacet in range(5)]\n\n if len(dims)==2:\n # squeeze depth dimension out of 2D variable\n data_facets = [facet[..., 0, :, :, :] for facet in data_facets]\n\n return data_facets\n\n\n def get_dataset(self, varnames=None, iter_start=None, iter_stop=None,\n iter_step=None, k_levels=None, k_chunksize=1,\n type='faces'):\n \"\"\"\n Create an xarray Dataset object for this model.\n\n Parameters\n ----------\n *varnames : list of strings, optional\n The variables to include, e.g. ``['Salt', 'Theta']``. Otherwise\n include all known variables.\n iter_start : int, optional\n Starting iteration number. Otherwise use model default.\n Follows standard `range` conventions. (inclusive)\n iter_start : int, optional\n Stopping iteration number. Otherwise use model default.\n Follows standard `range` conventions. (exclusive)\n iter_step : int, optional\n Iteration number stepsize. Otherwise use model default.\n k_levels : list of ints, optional\n Vertical levels to extract. Default is to get them all\n k_chunksize : int, optional\n How many vertical levels per Dask chunk.\n type : {'faces', 'latlon'}, optional\n What type of dataset to create\n\n Returns\n -------\n ds : xarray.Dataset\n \"\"\"\n\n def _if_not_none(a, b):\n if a is None:\n return b\n else:\n return a\n\n iter_start = _if_not_none(iter_start, self.iter_start)\n iter_stop = _if_not_none(iter_stop, self.iter_stop)\n iter_step = _if_not_none(iter_step, self.iter_step)\n iter_params = [iter_start, iter_stop, iter_step]\n if any([a is None for a in iter_params]):\n raise ValueError(\"The parameters `iter_start`, `iter_stop` \"\n \"and `iter_step` must be defined either by the \"\n \"model class or as argument. Instead got %r \"\n % iter_params)\n iters = np.arange(*iter_params)\n\n varnames = varnames or self.varnames\n\n ds = self._make_coords_faces(iters)\n if type=='latlon':\n ds = _faces_coords_to_latlon(ds)\n\n k_levels = k_levels or np.arange(self.nz)\n ds = ds.sel(k=k_levels, k_l=k_levels, k_u=k_levels, k_p1=k_levels)\n\n # get the data in facet form\n data_facets = {vname:\n self._get_facet_data(vname, iters, k_levels, k_chunksize)\n for vname in varnames}\n\n # transform it into faces or latlon\n data_transformers = {'faces': _all_facets_to_faces,\n 'latlon': _all_facets_to_latlon}\n\n transformer = data_transformers[type]\n data = transformer(data_facets, _VAR_METADATA)\n\n variables = {}\n for vname in varnames:\n meta = _VAR_METADATA[vname]\n dims = meta['dims']\n if type=='faces':\n dims = _add_face_to_dims(dims)\n dims = ['time',] + dims\n attrs = meta['attrs']\n variables[vname] = xr.Variable(dims, data[vname], attrs)\n\n ds = ds.update(variables)\n return ds\n","repo_name":"rowhit/xmitgcm","sub_path":"xmitgcm/llcreader/llcmodel.py","file_name":"llcmodel.py","file_ext":"py","file_size_in_byte":21544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"25554175337","text":"import random\n\nimport wx\nfrom wxAlgorithm.constants import PALLETES\n\nclass ArrayPanel(wx.Panel):\n \"\"\"array를 받아서 그림을 그려주는 panel.\"\"\"\n def __init__(self, parent, array, colors):\n super().__init__(parent, wx.ID_ANY)\n self.set(array, colors)\n\n self.Bind(wx.EVT_SIZE, self.on_size)\n self.Bind(wx.EVT_PAINT, self.on_paint)\n\n def set(self, array, colors):\n self.array = array\n for color in colors:\n if color not in PALLETES.keys():\n raise ValueError(\"{} are not in PALLETES\".format(color))\n self.colors = colors\n self.Refresh()\n\n def on_size(self, event):\n event.Skip()\n self.Refresh()\n\n def on_paint(self, event):\n canvas_w, canvas_h = self.GetClientSize()\n\n dc = wx.PaintDC(self)\n dc.Clear()\n\n array = self.array\n colors = self.colors\n\n max_val = max(array)\n el_width = canvas_w / len(array)\n\n for i, (el, color) in enumerate(zip(array, colors)):\n el_height = (el / max_val * canvas_h)\n dc.SetBrush(wx.Brush(PALLETES[color]))\n\n dc.DrawRectangle(i * el_width,\n canvas_h - el_height,\n el_width,\n el_height)\n\n\nclass StepPanel(wx.Panel):\n \"\"\"Array view를 가지며 step button, reset button, slide를 추가한 panel\"\"\"\n def __init__(self, parent, num_elements=5):\n super().__init__(parent, wx.ID_ANY)\n bSizer = wx.BoxSizer(wx.VERTICAL)\n self.panel = ArrayPanel(self, [], [])\n bSizer.Add(self.panel, 1, wx.EXPAND| wx.ALL)\n\n hSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.reset = wx.Button(self, wx.ID_ANY, \"reset\")\n self.next = wx.Button(self, wx.ID_ANY, \"next\")\n hSizer.Add(self.next, 1, wx.EXPAND)\n hSizer.Add(self.reset, 1, wx.EXPAND)\n bSizer.Add(hSizer, 0, wx.EXPAND|wx.ALL)\n\n hSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n self.statictext = wx.StaticText(self, wx.ID_ANY, \"#elements: 5\", style=wx.ALIGN_CENTER)\n hSizer2.Add(self.statictext, 1, wx.EXPAND|wx.ALL)\n slider = wx.Slider(self, wx.ID_ANY, 5, 2, 10)\n hSizer2.Add(slider, 3, wx.EXPAND|wx.ALL)\n bSizer.Add(hSizer2, 0, wx.EXPAND|wx.ALL)\n\n self.SetSizer(bSizer)\n self.Layout()\n\n self.reset.Bind(wx.EVT_BUTTON, self.on_reset_clicked)\n self.next.Bind(wx.EVT_BUTTON, self.on_next_clicked)\n slider.Bind(wx.EVT_SLIDER, self.on_slide)\n \n # not GUI\n self.num_elements = num_elements\n self.reset_data()\n\n def on_slide(self, event):\n obj = event.GetEventObject()\n value = obj.GetValue()\n if self.num_elements != value:\n self.set_num_elements(value)\n self.statictext.SetLabel(\"#elements: {}\".format(value))\n self.Layout()\n\n def set_num_elements(self, num_elements):\n self.num_elements = num_elements\n self.reset_data()\n\n def reset_data(self):\n self.array = [i + 1 for i in range(self.num_elements)]\n random.shuffle(self.array)\n\n self.step_gen = self.step_generator()\n self.colors = ['default' for _ in self.array]\n self.panel.set(self.array, self.colors)\n self.next.Enable()\n\n def step_generator(self):\n \"\"\"self.array, self.colors를 바꾸는 함수. \n \n yield 후 self.colors가 초기화되기 때문에, 바꾸고싶은 color만 바꾸면 된다.\n\n \"\"\"\n raise NotImplementedError\n \n def on_reset_clicked(self, event):\n self.reset_data()\n\n def on_next_clicked(self, event):\n \"\"\"array의 모든 color를 default로 초기화 시키고, step_gen을 한번 호출한다.\n\n \"\"\"\n try:\n self.colors = ['default' for _ in self.array]\n next(self.step_gen)\n self.panel.set(self.array, self.colors)\n except StopIteration:\n self.panel.set(self.array, ['disabled' for _ in self.array])\n self.next.Disable()\n","repo_name":"Hulk89/wxAlgorithm","sub_path":"wxAlgorithm/sorts/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"22044411282","text":"\"\"\"Test the `WatcherManager` class.\n\"\"\"\n\nfrom tests.helper_functions import TestCaseWithFakeFiles\nfrom tests.helper_functions import create_file\nfrom tests.helper_functions import remove_file\n\nfrom watch_do import WatcherManager\nfrom watch_do import GlobManager\nfrom watch_do.watchers import Watcher\nfrom watch_do.watchers.hash import MD5\n\n\nclass TestWatcherManager(TestCaseWithFakeFiles):\n \"\"\"Test the `WatcherManager` class.\n \"\"\"\n def setUp(self):\n super(TestWatcherManager, self).setUp()\n\n self.glob_manager = GlobManager(['*'])\n self.watcher_manager = WatcherManager(\n MD5, self.glob_manager, True, True)\n\n def test___init__(self):\n \"\"\"Check that all passed in properties are being stored correctly.\n \"\"\"\n self.assertTrue(issubclass(self.watcher_manager.watcher, Watcher))\n self.assertIsInstance(self.watcher_manager.glob_manager, GlobManager)\n self.assertTrue(self.watcher_manager.reglob)\n self.assertTrue(self.watcher_manager.changed_on_remove)\n self.assertEqual(self.watcher_manager.files, set())\n\n def test_get_changed_files(self):\n \"\"\"Chack that new, removed and changed files are being reported.\n \"\"\"\n # No changed files to start with\n self.assertEqual(self.watcher_manager.get_changed_files(), set(''))\n\n # Check we have successfully globbed some files\n self.assertEqual(self.watcher_manager.files,\n {\n 'dave.txt',\n 'bob.py',\n 'jim.py.txt',\n 'fred.txt.py',\n 'rob.txt',\n 'geoff.py'\n })\n\n # New file\n create_file('something_random.jpeg')\n self.assertEqual(self.watcher_manager.get_changed_files(),\n {'something_random.jpeg'})\n\n # Removed file (as `changed_on_remove` is True)\n remove_file('something_random.jpeg')\n self.assertEqual(self.watcher_manager.get_changed_files(),\n {'something_random.jpeg'})\n\n # Change file\n create_file('dave.txt', 'Hello World')\n self.assertEqual(self.watcher_manager.get_changed_files(),\n {'dave.txt'})\n\n # Disable changed_on_remove\n self.watcher_manager._changed_on_remove = False\n remove_file('dave.txt')\n self.assertEqual(self.watcher_manager.get_changed_files(),\n set())\n\n # New file with reglob disabled\n self.watcher_manager._reglob = False\n create_file('dave.txt')\n self.assertEqual(self.watcher_manager.get_changed_files(),\n set())\n\n # Removed file with reglob disabled\n remove_file('bob.py')\n self.assertEqual(self.watcher_manager.get_changed_files(),\n set())\n","repo_name":"vimist/watch-do","sub_path":"tests/watcher_manager.py","file_name":"watcher_manager.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"12585717691","text":"import numpy as np \nimport functools\nimport operator\nfrom typing import Union, List, Tuple, Optional, Dict\nfrom collections import defaultdict\nfrom scipy.special import expit, logsumexp\nfrom gym import spaces\n\n\ndef make_hashable(x: Union[int, float, np.ndarray, list, tuple]) -> tuple:\n \"\"\"Converts the input into a hashable for use as a key\n\n Args:\n x (Union[int, float, np.ndarray, list, tuple]): Raw input to be converted.\n\n Raises:\n NotImplementedError: If the input is not a supported type.\n\n Returns:\n tuple: Hashable version of the input.\n \"\"\"\n if isinstance(x, np.ndarray):\n return tuple(x.flatten())\n elif isinstance(x, list):\n return tuple(functools.reduce(operator.concat, x))\n elif isinstance(x, tuple):\n return x\n else:\n raise NotImplementedError\n\nclass IntraOptionQTable:\n def __init__(self, discount: float, lr: float, level: int) -> None:\n self.lr = lr\n self.discount = discount\n self.level = level\n self.table = defaultdict(float) # Input will be obs and option_chain and option to get q value\n \n\n def _preprocess_obs(self, obs: Union[int, float, np.ndarray, list, tuple]) -> tuple:\n \"\"\"\n Converts the input into a hashable for use as a key in the `weights` dictionary. \n Assumes single observation is given.\n Args:\n obs (Union[np.ndarray, list, tuple]): Observation from environment.\n\n Returns:\n tuple: Outputs a flat tuple \n \"\"\"\n return make_hashable(obs)\n\n def get_q_value(\n self, \n obs: Union[int, np.ndarray, list, tuple], \n option_chain: Tuple[int],\n option: int\n ) -> Union[int, float, np.ndarray]:\n \"\"\"Main API for calling the QTable. Note that for tabular methods, we assume single inputs.\n\n Args:\n obs (Union[int, np.ndarray, list, tuple]): Single observation.\n option_chain (Tuple[int], optional): Tuple of options executing above this critic's level. Length = self.level-2\n\n Returns:\n Union[int, float]: Q(s, o^{1:l}) or Q(s, :) if no action is given. Shape should be at least 2d\n \"\"\"\n obs_ = self._preprocess_obs(obs)\n \n q = self.weights[(obs_, option_chain, option)]\n return np.atleast_2d(q)\n\n def set_q_value(\n self, \n obs: Union[int, np.ndarray, list, tuple], \n option_chain: Tuple[int], \n option: int,\n target: Union[float, int]) -> None:\n obs_ = self._preprocess_obs(obs)\n self.weights[(obs_, option_chain, option)] = target\n\n def update(self, transition: Dict[str, Union[int, float, bool]]) -> None:\n obs = transition[\"obs\"]\n action = transition[\"actions\"]\n reward = transition[\"task_rewards\"]\n done = transition[\"dones\"]\n next_obs = transition[\"next_obs\"]\n\n # One-step update target\n next_obs_action_vals = self.get_q_value(next_obs)\n next_obs_val = np.max(next_obs_action_vals, axis=1)\n update_target = reward + (1-done) * self.discount * next_obs_val\n new_q = self.get_q_value(obs, action) * \\\n (1 - self.lr) + self.lr * update_target\n self.set_q_value(obs, action, new_q)\n\nclass OptionQTable:\n pass\n\nclass OptionActionQTable: # Q(s, o, a)\n def __init__(self, discount, lr, num_obs, num_actions, num_options, reward_type):\n self.lr = lr\n self.discount = discount\n self.weights = np.zeros((num_obs, num_options, num_actions), dtype=np.float32)\n self.reward_type = reward_type\n self.num_obs = num_obs\n self.num_actions = num_actions\n self.num_options = num_options\n\n def get_q_value(self, obs, option, action=None):\n \n if isinstance(obs, float) or isinstance(option, float) or isinstance(action, float):\n raise ValueError(\"Obs, options and actions must be integers or arrays of integers.\") \n if not isinstance(obs, int):\n obs = np.array(obs, dtype=int).reshape(-1,)\n if not isinstance(option, int):\n option = np.array(option, dtype=int).reshape(-1,)\n \n if action is None:\n q = self.weights[obs, option].reshape(-1, self.num_actions)\n else:\n # If it's a float array, a float, or a list or tuple.\n if not isinstance(action, int):\n action = np.array(action, dtype=int).reshape(-1,)\n\n q = self.weights[obs, option, action].reshape(-1, 1)\n return q # (batch_size, 1 or num_actions)\n\n def set_q_value(\n self, \n obs: Union[int, List, np.ndarray],\n option: Union[int, List, np.ndarray],\n action: Union[int, List, np.ndarray],\n target: Union[float, List, np.ndarray]\n ):\n \"\"\"Setting Q values. This works for single or sequences of (state, action, target) \"\"\"\n if isinstance(obs, float) or isinstance(option, float) or isinstance(action, float):\n raise ValueError(\"Obs, options and actions should be integers or sequences of integers.\")\n \n if not isinstance(obs, int):\n obs = np.array(obs, dtype=int).reshape(-1,)\n \n if not isinstance(option, int):\n option = np.array(option, dtype=int).reshape(-1,)\n if not isinstance(action, int):\n action = np.array(action, dtype=int).reshape(-1,)\n \n if not isinstance(target, (int, float)):\n target = np.array(target).reshape(-1,)\n \n if len(obs) != len(option) or len(obs) != len(action):\n raise ValueError(\"The number of observations must equal the number of options and actions\")\n elif len(obs) != len(target):\n raise ValueError(\"The number of observations must equal the number of targets\")\n \n self.weights[obs, option, action] = target\n return target\n\n @staticmethod\n def compute_vua(\n option, # (batch_size, 1)\n next_obs_val, # (batch_size, 1)\n next_obs_option_vals, # (batch_size, num_options)\n next_obs_option_beta, # (batch_size, 1) but how to compute this? I would need beta_r(s')[option] \n ):\n option_vals = next_obs_option_vals[range(next_obs_option_vals.shape[0]), option.reshape(-1,)].reshape(-1, 1)\n value_upon_arrival = (1.-next_obs_option_beta) * option_vals + next_obs_option_beta * next_obs_val \n \n return value_upon_arrival # (batch_size, 1)\n\n def update(\n self, \n batch,\n next_obs_val, # (batch_size, 1)\n next_obs_option_vals, # (batch_size, num_options)\n next_obs_option_beta # batch_size, 1\n ):\n\n obs = batch[\"obs\"]\n option = batch[\"options\"]\n action = batch[\"actions\"]\n reward = batch[self.reward_type + \"_rewards\"]\n done = batch[\"dones\"]\n\n # One-step update target\n vua = self.compute_vua(option, next_obs_val, next_obs_option_vals, next_obs_option_beta)\n update_target = reward + (1-done) * self.discount * vua\n\n # Update values upon arrival if desired\n old_q = self.get_q_value(obs, option, action)\n new_q = old_q + self.lr * (update_target - old_q)\n self.set_q_value(obs, option, action, new_q)\n\n return update_target - old_q\n\n\nclass OptionQTable:\n def __init__(self, discount, lr, num_obs, num_options, reward_type):\n self.lr = lr\n self.discount = discount\n self.weights = np.zeros((num_obs, num_options), dtype=np.float32)\n self.reward_type = reward_type\n self.num_obs = num_obs\n self.num_options = num_options\n\n def get_q_value(self, obs, option=None):\n if isinstance(obs, float) or isinstance(option, float):\n raise ValueError(\"Obs or options given are floats, they should be integers or arrays of integers.\") \n\n if not isinstance(obs, int):\n obs = np.array(obs, dtype=int).reshape(-1,)\n \n if option is None:\n q = self.weights[obs].reshape(-1, self.num_options)\n else:\n # If it's a float array, a float, or a list or tuple.\n if not isinstance(option, int):\n option = np.array(option, dtype=int).reshape(-1,)\n\n q = self.weights[obs, option].reshape(-1, 1)\n return q\n\n def set_q_value(\n self, \n obs: Union[int, List, np.ndarray],\n option: Union[int, List, np.ndarray],\n target: Union[float, List, np.ndarray]\n ):\n \"\"\"Setting Q values. This works for single or sequences of (state, option, target) \"\"\"\n if isinstance(obs, float) or isinstance(option, float):\n raise ValueError(\"Obs and option are floats and should be integers.\") \n \n if not isinstance(obs, int):\n obs = np.array(obs, dtype=int).reshape(-1,)\n\n if not isinstance(option, int):\n option = np.array(option, dtype=int).reshape(-1,)\n \n if not isinstance(target, (int, float)):\n target = np.array(target).reshape(-1,)\n \n # Dealing with sequences\n if not isinstance(obs, int):\n if len(obs) != len(option):\n raise ValueError(\"The number of observations must equal the number of options\")\n elif len(obs) != len(target):\n raise ValueError(\"The number of observations must equal the number of targets\")\n \n self.weights[obs, option] = target\n return target\n\n \n\nclass EgreedyPolicy:\n def __init__(self, rng, critic, epsilon, option_id):\n \"\"\"Action Selecion implementation. Created to work with a Q function.\n\n Args:\n rng (np.random.Generator): _description_\n critic (QTable): (Should work with option or OptionAction Q table)\n epsilon (float): \n id (int): _Index of Corresponding Option (either meta or policy over primitives)\n \"\"\"\n self.rng = rng\n self.epsilon = epsilon\n self.critic = critic\n self.num_actions = critic.weights.shape[-1]\n self.reward_type = critic.reward_type\n self.option_id = option_id\n\n def sample(self, obs, q_vals, deterministic=False):\n \"\"\"Does not work for batches.\n\n Args:\n obs (_type_): _description_\n q_vals (np.ndarray): (batch_size, num_actions)\n deterministic (bool, optional): _description_. Defaults to False.\n\n Returns:\n _type_: _description_\n \"\"\"\n if deterministic or self.rng.uniform() > self.epsilon:\n action = self.rng.choice(np.flatnonzero(q_vals == q_vals.max(axis=-1)))\n else:\n action = self.rng.integers(0, q_vals.shape[-1])\n return action\n\n def get_prob(self, obs, q_vals, action=None):\n \"\"\"Return prob for target policy.\"\"\"\n q_max = np.max(q_vals, axis=-1).reshape(-1, 1)\n greedy_mask = (q_vals == q_max)\n out = np.zeros_like(q_vals)\n \n out[greedy_mask] = 1 # TODO:If this is used to mask as importance sampling weight, it will be wrong.\n out /= out.sum(axis=1).reshape(-1, 1)\n return out\n\nclass SoftmaxPolicy:\n def __init__(self, rng, critic, option_id, temp=1.):\n self.rng = rng\n self.critic = critic # Make sure this is the correct level of critic!\n self.temp = temp\n self.num_actions = critic.weights.shape[-1]\n self.option_id = option_id\n \n def get_prob(self, obs, option_chain, q_vals):\n q_max = q_vals.max(axis=-1).reshape(-1, 1)\n v = np.exp(q_vals - q_max)\n prob = v / v.sum(axis=-1).reshape(-1, 1)\n return np.array(prob).reshape(-1, self.num_actions)\n\n def sample(self, obs, q_vals, deterministic=False):\n prob = self.prob(obs, q_vals)\n if deterministic:\n return self.rng.choice(np.where(prob == prob.max(axis=-1)[1]))\n else:\n return self.rng.choice(self.num_actions, p=prob.squeeze())\n\nclass SigmoidTermination:\n \"\"\" \n This class implements a level-wide sigmoid termination function. \n One per level in the option hierarchy.\n \"\"\"\n def __init__(self, rng, lr, discount, level):\n self.rng = rng\n self.weights = defaultdict(float(0.5)) # Input should be (obs, option_chain[:level], option) \n self.level = level\n self.lr = lr\n self.discount = discount\n\n def get_prob(self, obs, option_chain: Tuple[int], option:int):\n \"\"\" Get the prob of termination for the given option.\n Args:\n obs (Union[int, float, np.ndarray]): Observation from environment.\n option_chain (Tuple[int]): Options currently executing.\n option (int): Option at current level, for which to query termination.\n\n Returns:\n prob (float): Probability of termination.\n \"\"\"\n if isinstance(obs, (int, float)):\n obs = np.array(obs).reshape(1, 1)\n prob = expit(self.weights[(np.rint(obs).astype(int), option_chain, option)])\n return prob\n \n def sample(self, obs, option_chain, option, training_mode=True):\n \"\"\"Query whether the option should terminate.\n\n Args:\n obs (Union[int, float, np.ndarray]): Observation from environment.\n option_chain (Tuple[int]): Options currently executing.\n option (int): Option at current level, for which to query termination.\n training_mode (bool, optional): Determines whether to sample probabilistically. Defaults to True.\n\n Returns:\n term (int): 1 if option should terminate, 0 otherwise.\n prob (float): Probability of termination.\n \"\"\"\n prob = self.get_prob(obs, option_chain, option)\n if not training_mode:\n term = int(prob > 0.5)\n else:\n term = int(self.rng.uniform() < prob)\n\n return term, prob\n \n # def get_grad(self, obs, option_chain, option):\n # t_prob = self.prob(obs)\n # grad = t_prob * (1 - t_prob)\n # return grad\n\n\nclass TabularHOCAgent:\n def __init__(self, \n num_options_per_level: Union[Tuple[int], List[int]],\n rng: np.random.Generator\n ) -> None:\n # Number of option levels in the hierarchy including root but not primitives.\n self.num_o_levels = 1 + len(num_options_per_level) \n \n # List of lists of policies. Each list is a level in the hierarchy. Each level is a list of option policies.\n self.policies_by_level = [] \n\n # One termination function per level in the hierarchy, apart from root option, which never terminates and primitives.\n self.termination_fns = [None] \n \n # One critic per level in the hierarchy including one for primitives.\n self.critics = []\n self.rng = rng\n\n def choose_options(self, obs: Union[int, float, np.ndarray], option_chain: Tuple[int], lowest_level:int) -> Tuple[int]:\n \"\"\"\n Main method for option selection. Called at each timestep to determine which option to execute. \n The method does not always change the option chain.\n Args:\n obs (Union[int, float, np.ndarray]): Raw observation from environment.\n option_chain (Tuple[int]): Options currently being executed. length should be (self.num_o_levels - 1). \n lowest_level (int): The levels above this will remain unchanged. lowest_level option and those below will be sampled.\n\n Returns:\n Tuple[int]: Next options to execute. This may be the same as the current option chain.\n \"\"\" \n if lowest_level == self.num_o_levels:\n return option_chain\n else:\n next_option_chain = list(option_chain)\n level = lowest_level\n while level < self.num_o_levels:\n policy = self.policies_by_level[level - 1]\n option = policy.sample(obs, option_chain[:level], self.training_mode) # DOes the input need to be the options from all levels above?\n next_option_chain[level] = option\n\n self.curr_option_chain = next_option_chain\n return next_option_chain\n \n def choose_action(self, obs:Union[int, float, np.ndarray], option_chain: Tuple[int]) -> int:\n return self.policies_by_level[-1][option_chain[-1]].sample(obs, option_chain[:-1], self.training_mode)\n \n def get_lowest_termination(self, obs, option_chain:Tuple[int]):\n \"\"\"Queries the termination function at each level of the hierarchy to get the probabilities of termination.\n Args:\n obs (Union[int, float, np.ndarray]): Raw observation from environment.\n option_chain (Tuple[int]): Options currently executing, for which we query terminations.\n\n Returns:\n lowest_term (int): Furthest level from primitives, in the hierarchy, where the option has just terminated.\n term_probs (Tuple[float]): Probability of termination at each level of the hierarchy. Recorded for updating.\n \"\"\"\n\n lowest_term = self.num_o_levels\n term_probs = np.zeros(self.num_o_levels, dtype=np.float32)\n\n for level in range(1, self.num_o_levels): # Skip root option\n term, term_prob = self.termination_fns[level].sample(\n obs,\n option_chain[:level],\n option_chain[level],\n self.training_mode,\n return_prob=True\n )\n term_probs.append(term_prob)\n if term:\n lowest_term = level \n \n return lowest_term, term_probs\n \n def process_transition(self, transition, term_probs):\n pass\n\n def update_critics(self, transition, term_probs):\n pass\n\n def update_policies(self, transition, term_probs):\n pass\n \n def update_terminations(self, transition, term_probs):\n pass\n","repo_name":"akshilpatel/hoc","sub_path":"hoc/agent/hoc.py","file_name":"hoc.py","file_ext":"py","file_size_in_byte":18414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"4221385881","text":"# -*- coding: utf-8 -*-\r\n\r\n############################################################### Import Libraries\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\n############################################################# Setting Parameters\r\n\r\nN = 10000 ## total number of rounds (customers connecting to website)\r\nd = 9 ## number of strategies\r\n\r\n\r\n############################################################ Creating Simulation\r\n\r\nconversion_rates = [0.05, 0.13, 0.09, 0.16, 0.11, 0.04, 0.20, 0.08, 0.01] ## 9 strategies and conversion rates unknown to AI\r\nX = np.array(np.zeros([N,d])) ## initiate array of 10000 rows and 9 columns with zeros \r\n\r\n\r\n## Update succesfull plan with \"1\"\r\nfor i in range(N):\r\n for j in range(d): ## Bernoulli distribution\r\n if np.random.rand() <= conversion_rates[j]:\r\n X[i,j] = 1\r\n \r\n \r\n############################### Implementing Random Strategy vs Thomson Sampling\r\n\r\n## For each strategy i take a random draw from the following distribution\r\n\r\nstrategies_selected_rs = []\r\nstrategies_selected_ts = []\r\ntotal_reward_rs = 0\r\ntotal_reward_ts = 0\r\nnumbers_of_rewards_1 = [0] * d\r\nnumbers_of_rewards_0 = [0] * d\r\n\r\nfor n in range(0, N): ## for each round\r\n # Random Strategy\r\n strategy_rs = random.randrange(d) ## select random 0-8 strategy\r\n strategies_selected_rs.append(strategy_rs) ## append to list of random strategies\r\n reward_rs = X[n, strategy_rs] ## compare selected action with \"real life simulation\" X and get assigned reward\r\n total_reward_rs += reward_rs ## get total reward\r\n \r\n # Thomson Sampling\r\n strategy_ts = 0\r\n max_random = 0\r\n for i in range(0, d): ## for each strategy\r\n ## compare how many times till now that strategy recieved 1 or 0 to get the Random Draw\r\n random_beta = random.betavariate(numbers_of_rewards_1[i] +1, numbers_of_rewards_0[i] +1)\r\n # update random beta for each strategy\r\n if random_beta > max_random: \r\n max_random = random_beta\r\n strategy_ts = i \r\n \r\n reward_ts = X[n, strategy_ts] ## compare selected action with \"real life simulation\" X and get assigned reward \r\n # update number of rewards\r\n if reward_ts == 1:\r\n numbers_of_rewards_1[strategy_ts] += 1\r\n else:\r\n numbers_of_rewards_0[strategy_ts] += 1\r\n ## append to list of ts strategies \r\n strategies_selected_ts.append(strategy_ts)\r\n ## accumulate total ts rewards\r\n total_reward_ts += reward_ts\r\n \r\n \r\n####################################### Compute the Absolute and Relative Return \r\n \r\nabsolute_return = (total_reward_ts - total_reward_rs)*100 ## each customer converion = 100 USD\r\nrelative_return = (total_reward_ts - total_reward_rs) / total_reward_rs * 100\r\n \r\nprint(\"Absolute Return: {:.0f} $\".format(absolute_return)) \r\nprint(\"Relative Return: {:.0f} %\".format(relative_return)) \r\n \r\n \r\n \r\n########################################### Plotting the Histogram of Selections\r\n\r\nplt.hist(strategies_selected_ts) \r\nplt.title(\"Histogram of Selections\")\r\nplt.xlabel('Strategy')\r\nplt.ylabel('Number of times the strategy was aselected')\r\nplt.show()\r\n \r\n \r\n","repo_name":"LukaszMalucha/AI-Concepts","sub_path":"thomson_sampling/thomson_sampling.py","file_name":"thomson_sampling.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"39592346454","text":"from pyAudioAnalysis import audioSegmentation\nimport scipy.io.wavfile as wav\nimport uuid\nimport os\n\nfilename = \"../audio/myrec.wav\"\n(rate, sig) = wav.read(filename)\n\n# Machine Learning algorithm which provided by pyAudioAnalysis\nsegments = audioSegmentation.silence_removal(sig, rate, 0.020, 0.020, smooth_window=0.5, weight=0.5)\n\n# Get true segmentations of recorded audio file\ntrue_segments = []\nfor i in range(len(segments)):\n if segments[i][1] - segments[i][0] > 5:\n start = int(segments[i][0] * rate)\n end = int(segments[i][1] * rate)\n true_segments.append([start, end])\n\n# Output the segmentation as .wav file\nfilePath = str(uuid.uuid1())\nos.mkdir('../audio/' + filePath)\nfor j in range(len(true_segments)):\n per_segmentation = sig[true_segments[j][0]: true_segments[j][1]]\n wav.write('../audio/' + filePath + '/segmentation' + str(j) + '.wav', rate, per_segmentation)","repo_name":"Teiyui/AudioProject","sub_path":"project/audible_part_segmentation.py","file_name":"audible_part_segmentation.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26528525612","text":"from django.conf import settings\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.conf.urls import url\n\n# from main_site.views.view_feedback import FeedbackCreateView, FeedbackListView, FeedbackDetailView\nfrom main_site.views.view_feedback import FeedbackCreateView, FeedbackListView, FeedbackDetailView\nfrom main_site.views.view_maintenences import MaintenanceCreateView, MaintenanceDetailView, MaintenanceEndView, \\\n MaintenanceListView, MaintenanceUpdateView\nfrom main_site.views.view_schedules import ScheduleDetailView, ScheduleUpdateView\nfrom main_site.views.views import UserHomeView, LoginView, LogoutView, StaffHomeView, FareCalculatorView, PlayTripView, \\\n PlayView\nfrom main_site.views.view_announcements import AnnouncementCreateView, AnnouncementUpdateView, AnnouncementListView, \\\n AnnouncementDeleteView, AnnouncementDetailView\nfrom main_site.views.view_bills import BillDetailView, BillCreateView\nfrom main_site.views.view_drivers import DriverCreateView, DriverDetailView, DriverUpdateView, DriverDeleteView, \\\n DriverListView\nfrom main_site.views.view_requests import RequestDetailView, RequestListView, MyRequestsView, RequestCreateView, RequestCancelView\nfrom main_site.views.view_trips import TripCreateView, TripDetailView, TripCancelView, TripListView, UserTripListView\nfrom main_site.views.view_vehicles import VehicleCreateView, VehicleDetailView, VehicleUpdateView, VehicleDeleteView, \\\n VehicleListView\n\nurlpatterns=[\n url(r'^$', UserHomeView.as_view(), name='user-home'),\n url(r'^login',LoginView.as_view(),name='login'),\n url(r'^logout', LogoutView.as_view(), name='logout'),\n\n url(r'^staff$', StaffHomeView.as_view(),name='staff-home'),\n\n url(r'^requests/new', RequestCreateView.as_view(), name='new-request'),\n url(r'^requests/(?P\\d+)$', RequestDetailView.as_view(), name='view-request'),\n url(r'^requests/(?P\\d+)/cancel$', RequestCancelView.as_view(), name='cancel-request'),\n url(r'^requests[/]$', RequestListView.as_view(), name='list-requests'),\n url(r'^myrequests[/]$',MyRequestsView.as_view(),name='my-requests'),\n\n url(r'^drivers/new[/]$', DriverCreateView.as_view(), name='new-driver'),\n url(r'^drivers/(?P\\d+)$', DriverDetailView.as_view(), name='view-driver'),\n url(r'^drivers/(?P\\d+)/edit[/]$', DriverUpdateView.as_view(), name='update-driver'),\n url(r'^drivers/(?P\\d+)/delete[/]$', DriverDeleteView.as_view(), name='delete-driver'),\n url(r'^drivers[/]$', DriverListView.as_view(), name='list-drivers'),\n\n url(r'^vehicles/new$', VehicleCreateView.as_view(), name='new-vehicle'),\n url(r'^vehicles/(?P\\d+)$', VehicleDetailView.as_view(), name='view-vehicle'),\n url(r'^vehicles/(?P\\d+)/edit$', VehicleUpdateView.as_view(), name='update-vehicle'),\n url(r'^vehicles/(?P\\d+)/delete$', VehicleDeleteView.as_view(), name='delete-vehicle'),\n url(r'^vehicles', VehicleListView.as_view(), name='list-vehicles'),\n\n url(r'^requests/(?P\\d+)/trips/new', TripCreateView.as_view(), name='new-trip'),\n url(r'^trips/(?P\\d+)$', TripDetailView.as_view(), name='view-trip'),\n url(r'^trips/(?P\\d+)/cancel$', TripCancelView.as_view(), name='cancel-trip'),\n url(r'^requests/(?P\\d+)/trips', TripListView.as_view(), name='list-trips'),\n url(r'^myrequests/(?P\\d+)/trips', UserTripListView.as_view(), name='list-user-trips'),\n\n url(r'^requests/(?P\\d+)/billing$', BillCreateView.as_view(), name='new-bill'),\n url(r'^requests/(?P\\d+)/bill$', BillDetailView.as_view(), name='view-bill'),\n\n url(r'^announcements/new$', AnnouncementCreateView.as_view(), name='new-announcement'),\n url(r'^announcements/(?P\\d+)$', AnnouncementDetailView.as_view(), name='view-announcement'),\n url(r'^announcements/(?P\\d+)/edit$', AnnouncementUpdateView.as_view(), name='update-announcement'),\n url(r'^announcements/(?P\\d+)/delete$', AnnouncementDeleteView.as_view(), name='delete-announcement'),\n url(r'^announcements/$', AnnouncementListView.as_view(), name='list-announcements'),\n\n url(r'^maintenances/new$', MaintenanceCreateView.as_view(), name='new-maintenance'),\n url(r'^maintenances[/]$', MaintenanceListView.as_view(), name='list-maintenances'),\n url(r'^maintenances/(?P\\d+)$', MaintenanceDetailView.as_view(), name='view-maintenance'),\n url(r'^maintenances/(?P\\d+)/end$', MaintenanceEndView.as_view(), name='end-maintenance'),\n url(r'^maintenances/(?P\\d+)/update$', MaintenanceUpdateView.as_view(), name='update-maintenance'),\n\n url(r'^schedule[/]$', ScheduleDetailView.as_view(), name='view-schedule'),\n url(r'^schedule/update[/]$', ScheduleUpdateView.as_view(), name='update-schedule'),\n\n url(r'^fare_calculator[/]$', FareCalculatorView.as_view(), name='fare-calculator'),\n\n\n url(r'^feedbacks/new$', FeedbackCreateView.as_view(), name='new-feedback'),\n url(r'^feedbacks[/]$', FeedbackListView.as_view(), name='list-feedbacks'),\n url(r'^feedbacks/(?P\\d+)$', FeedbackDetailView.as_view(), name='view-feedback'),\n\n #playing trips\n url(r'^play_trips$', PlayTripView.as_view(), name='play-trips'),\n url(r'^play/(?P\\w+).mp3$', PlayView.as_view(), name='play'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"myashok/transportSystem","sub_path":"main_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"14268804965","text":"# Example 5-25. Demo of methodcaller: second test shows the binding of extra arguments\n\nfrom operator import methodcaller\n\ns = 'The time has come'\nupcase = methodcaller('upper')\n\nprint(upcase(s))\n\nhiphenate = methodcaller('replace', ' ', '-')\n\nprint(hiphenate(s))","repo_name":"rajeevdodda/Python-Practice","sub_path":"FluentPython/Chapter 5/Example25.py","file_name":"Example25.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"40198110309","text":"import random\n\nfrom SQL import musicqueue, loops, skipped\nfrom Music import playvideo\n\n\nasync def shuffle(ctx, type):\n serverid = ctx.guild.id\n voice_state = ctx.author.voice\n replacement = []\n musiclist = await musicqueue.read(serverid)\n song = await loops.read(\"song\", serverid)\n queue = await loops.read(\"queue\", serverid)\n length = len(musiclist)\n if len(musiclist) > 1:\n number = random.randrange(0, (len(musiclist) - 1))\n else:\n number = 0\n if ctx.voice_client:\n if voice_state and ctx.author.voice.channel == ctx.voice_client.channel:\n if queue == 1:\n if type == 'normal':\n await ctx.reply(\"You can't shuffle when the queue is looped.\")\n elif type == 'slash':\n await ctx.respond(\"You can't shuffle when the queue is looped.\")\n elif song == 1:\n if type == 'normal':\n await ctx.reply(\"You can't shuffle when a song is looped.\")\n elif type == 'slash':\n await ctx.respond(\"You can't shuffle when a song is looped.\")\n else:\n ctx.voice_client.stop()\n for i in range(length):\n replacement.append({\"url\": musiclist[number][\"url\"], \"title\": musiclist[number][\"title\"], \"duration\": musiclist[number][\"duration\"], \"time\": musiclist[number][\"time\"]})\n del musiclist[number]\n if length > 1:\n length -= 1\n number = random.randrange(0, length)\n else:\n number = 0\n await musicqueue.empty(serverid)\n for i in range(len(replacement)):\n await musicqueue.write(replacement[i][\"url\"], replacement[i][\"title\"], replacement[i][\"duration\"], serverid)\n if type == 'normal':\n await ctx.reply('Queue has been shuffled.')\n elif type == 'slash':\n await ctx.respond('Queue has been shuffled.')\n await skipped.update(1, serverid)\n await playvideo.playvideo(ctx)\n elif voice_state is None:\n if type == 'normal':\n await ctx.reply(str(ctx.author.name) + \" is not in a channel.\")\n elif type == 'slash':\n await ctx.respond(str(ctx.author.name) + \" is not in a channel.\")\n else:\n if type == 'normal':\n await ctx.reply(str(ctx.author.name) + \" is not in the same channel.\")\n elif type == 'slash':\n await ctx.respond(str(ctx.author.name) + \" is not in the same channel.\")\n else:\n if type == 'normal':\n await ctx.reply('Bot is not connected to a voice channel')\n elif type == 'slash':\n await ctx.respond('Bot is not connected to a voice channel')","repo_name":"YorbenJoosen/Gerbinbot_3000","sub_path":"Music/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73631589264","text":"#Devasvi\nimport time \n\ndef countdown(t):\n \n while t:\n mins, secs = divmod(t, 60)\n timer = '{:02d}:{:02d}'.format(mins, secs)\n print(timer, end=\"\\r\")\n time.sleep(1)\n t -= 1\n \n print('Timer end')\n \nprint(\"Enter S to set\",\"Enter P to pause and resume\")\nprint(\"gy\")\nt = input(\"Enter the time in seconds: \")\ncountdown(int(t))\n","repo_name":"DevasviZ/Assignment_1","sub_path":"Minor_Project.py","file_name":"Minor_Project.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"19376705935","text":"import os\n\nimport abkhazia.utils as utils\nfrom abkhazia.acoustic.abstract_acoustic_model import (\n AbstractAcousticModel)\nimport abkhazia.kaldi as kaldi\n\n\nclass TriphoneSpeakerAdaptive(AbstractAcousticModel):\n \"\"\"Wrapper on Kaldi egs/wsj/s5/steps/{align_fmllr, train_sat}.sh\n\n The parameter `tri-dir` is the path to the computed triphone\n speaker independent acoustic model. It must contains the files\n 'ali.1.gz' and 'final.mdl', else an OSError is raised.\n\n Other parameters are the same as in AbstractAcousticModel.\n\n The following options are not forwarded from Kaldi to Abkhazia:\n train_tree=true, tree_stats_opts, context_opts, power,\n cluster_thresh, cluster_phones_opts, phone_map,\n compile_questions_opts.\n\n \"\"\"\n model_type = 'tri-sa'\n\n options = {k: v for k, v in (\n kaldi.options.make_option(\n 'transition-scale', default=1.0, type=float,\n help='Transition-probability scale (relative to acoustics)'),\n kaldi.options.make_option(\n 'self-loop-scale', default=0.1, type=float,\n help=('Scale of self-loop versus non-self-loop log probs '\n '(relative to acoustics)')),\n kaldi.options.make_option(\n 'acoustic-scale', default=0.1, type=float,\n help='Scaling factor for acoustic likelihoods'),\n kaldi.options.make_option(\n 'beam', default=10, type=int,\n help='Decoding beam used in alignment'),\n kaldi.options.make_option(\n 'retry-beam', default=40, type=int,\n help='Decoding beam for second try at alignment'),\n kaldi.options.make_option(\n 'careful', default=False, type=bool,\n help=('If true, do careful alignment, which is better at '\n 'detecting alignment failure (involves loop to start '\n 'of decoding graph)')),\n kaldi.options.make_option(\n 'boost-silence', default=1.0, type=float,\n help=('Factor by which to boost silence likelihoods '\n 'in alignment')),\n kaldi.options.make_option(\n 'fmllr-update-type', default='full', type=str,\n help='Update type for FMLLR (full|diag|offset|none)'),\n kaldi.options.make_option(\n 'realign-iterations', type=list, default=[10, 20, 30],\n help='Iterations on which to align features on the model'),\n kaldi.options.make_option(\n 'fmllr-iterations', type=list, default=[2, 4, 6, 12],\n help='Iterations on which to align features on the model'),\n kaldi.options.make_option(\n 'num-iterations', default=35, type=int,\n help='Number of iterations for training'),\n kaldi.options.make_option(\n 'max-iteration-increase', default=25, type=int,\n help='Last iteration to increase number of Gaussians on'),\n kaldi.options.make_option(\n 'silence-weight', default=0.0, type=float,\n help='Weight on silence in fMLLR estimation'),\n kaldi.options.make_option(\n 'num-leaves', default=2500, type=int,\n help='Maximum number of leaves to be used in tree-buliding'),\n kaldi.options.make_option(\n 'total-gaussians', default=15000, type=int,\n help='Target number of Gaussians at the end of training'),\n )}\n\n def __init__(self, corpus, feats_dir, tri_dir,\n output_dir, lang_args, log=utils.logger.null_logger()):\n super(TriphoneSpeakerAdaptive, self).__init__(\n corpus, feats_dir, output_dir, lang_args, log=log)\n\n self.tri_dir = os.path.abspath(tri_dir)\n utils.check_directory(\n self.tri_dir, ['final.mdl', 'ali.1.gz'])\n\n def run(self):\n align_dir = os.path.join(self.recipe_dir, 'exp', 'tri_ali_fmllr')\n self._align_fmllr(align_dir)\n self._train_sat(align_dir)\n\n def _align_fmllr(self, align_dir):\n \"\"\"Wrapper on steps/align_fmllr.sh\n\n Computes training alignments; assumes features are (LDA+MLLT\n or delta+delta-delta) + fMLLR (probably with SAT models). It\n first computes an alignment with the final.alimdl (or the\n final.mdl if final.alimdl is not present), then does 2\n iterations of fMLLR estimation.\n\n \"\"\"\n message = 'forced-aligning triphone model'\n\n command = (\n 'steps/align_fmllr.sh --nj {njobs} --cmd \"{cmd}\" '\n '--scale-opts \"--transition-scale={transition} '\n '--acoustic-scale={acoustic} --self-loop-scale={selfloop}\" '\n '--beam {beam} --retry-beam {retrybeam} '\n '--careful {careful} --boost-silence {boost} '\n '--fmllr-update-type {fmllr} '\n '{data} {lang} {origin} {target}'\n .format(\n njobs=self.njobs,\n cmd=utils.config.get('kaldi', 'train-cmd'),\n transition=self._opt('transition-scale'),\n acoustic=self._opt('acoustic-scale'),\n selfloop=self._opt('self-loop-scale'),\n beam=self._opt('beam'),\n retrybeam=self._opt('retry-beam'),\n careful=self._opt('careful'),\n boost=self._opt('boost-silence'),\n fmllr=self._opt('fmllr-update-type'),\n data=self.data_dir,\n lang=self.lang_dir,\n origin=self.tri_dir,\n target=align_dir))\n self._run_am_command(command, align_dir, message)\n\n def _train_sat(self, ali_dir):\n \"\"\"Wrapper on steps/train_sat.shallow\n\n This does Speaker Adapted Training (SAT), i.e. train on\n fMLLR-adapted features. It can be done on top of either\n LDA+MLLT, or delta and delta-delta features. If there are no\n transforms supplied in the alignment directory, it will\n estimate transforms itself before building the tree (and in\n any case, it estimates transforms a number of times during\n training).\n\n \"\"\"\n message = 'training speaker-adaptive triphone model'\n target = os.path.join(self.recipe_dir, 'exp', self.model_type)\n\n if not os.path.isdir(ali_dir):\n raise RuntimeError(\n 'unexisting directory: {}, please provide alignments '\n 'using align_fmllr'.format(ali_dir))\n\n command = (\n 'steps/train_sat.sh --cmd \"{cmd}\" '\n '--scale-opts \"--transition-scale={transition} '\n '--acoustic-scale={acoustic} --self-loop-scale={selfloop}\" '\n '--realign-iters {realign} --num-iters {niters} '\n '--careful {careful} --boost-silence {boost} '\n '--fmllr-update-type {fmllr} --silence-weight {silence} '\n '--fmllr-iters {fmllriters} '\n '--max-iter-inc {maxiter} --beam {beam} --retry-beam {retrybeam} '\n '{numleaves} {totgauss} {data} {lang} {origin} {target}'\n .format(\n cmd=utils.config.get('kaldi', 'train-cmd'),\n transition=self._opt('transition-scale'),\n acoustic=self._opt('acoustic-scale'),\n selfloop=self._opt('self-loop-scale'),\n beam=self._opt('beam'),\n retrybeam=self._opt('retry-beam'),\n careful=self._opt('careful'),\n boost=self._opt('boost-silence'),\n maxiter=self._opt('max-iteration-increase'),\n realign=self._opt('realign-iterations'),\n niters=self._opt('num-iterations'),\n numleaves=self._opt('num-leaves'),\n totgauss=self._opt('total-gaussians'),\n fmllr=self._opt('fmllr-update-type'),\n silence=self._opt('silence-weight'),\n fmllriters=self._opt('fmllr-iterations'),\n data=self.data_dir,\n lang=self.lang_dir,\n origin=ali_dir,\n target=target))\n self._run_am_command(command, target, message)\n","repo_name":"bootphon/abkhazia","sub_path":"abkhazia/acoustic/triphone_speaker_adaptive.py","file_name":"triphone_speaker_adaptive.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"}
+{"seq_id":"22974788067","text":"import json\r\nimport sys\r\nimport os\r\nimport firebase_admin\r\n\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import firestore\r\n\r\nservice_key = \"yourservicekey.json\"\r\n\r\ndef importData(directory):\r\n\ttry:\r\n\t\tcred_obj = credentials.Certificate(service_key)\r\n\t\tfirebase_admin.initialize_app(cred_obj)\r\n\r\n\t\tdb = firestore.client()\r\n\r\n\t\tfor filename in os.listdir(directory):\r\n\t\t\tfile = os.path.join(directory,filename)\r\n\t\t\tif os.path.isfile(file):\r\n\t\t\t\tdocument = os.path.splitext(filename)[0]\r\n\t\t\t\t#collection.lstrip(directory + \"\\\\\")\r\n\t\t\t\tprint(document)\r\n\t\t\t\tdata = dataJSON(file)\r\n\r\n\t\t\t\tdocumentPtr = db.collection('recipes').document(document).collection('all')\r\n\t\t\t\tfor element in data:\r\n\t\t\t\t\tif element:\r\n\t\t\t\t\t\tdocumentPtr.add(element)\r\n\texcept Exception as error:\r\n\t\tprint(\"ERROR: {}\".format(str(error)))\r\n\telse:\r\n\t\tprint(\"completed\")\r\n\r\n\r\ndef dataJSON(datafile):\r\n\twith open(datafile, 'r',encoding= 'utf-8') as file:\r\n\t\treturn json.load(file)\r\n\r\nUSAGE = \"Please enter the directory for the data.\\n COMMAND: python firebase_add_dir.py directory\\n\"\r\n\r\nif __name__ == '__main__':\r\n\ttry:\r\n\t\tif len(sys.argv) == 2:\r\n\t\t\tdirectory = sys.argv[1]\r\n\r\n\t\telse:\r\n\t\t\tprint(USAGE)\r\n\t\t\texit()\r\n\r\n\t\timportData(directory)\r\n\r\n\texcept KeyboardInterrupt as keyboard_err:\r\n\t\tprint(\"Process Interrupted\\n\")\r\n\tfinally:\r\n\t\tprint(\"Ended!\")\r\n","repo_name":"ericcai9907/DIETISE","sub_path":"Database/Firebase/firebase_add_dir.py","file_name":"firebase_add_dir.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"21223993560","text":"# import cProfile\nimport sys\nimport json\nfrom .settings import *\nfrom .graph import *\nfrom contextlib import suppress\n\n\nclass GraphInteraction():\n def __init__(self, mainWin):\n self.initKeymap()\n self.graph = TreeDecomposition(Graph(True))\n self.hoverVertex = None\n self.hoverEdge = None\n self.selectedVertices = [] # List with vertex ids\n self.mainWin = mainWin\n self.isTreeDecomposition = type(self.graph) == TreeDecomposition\n\n # LOL, this is actually nescessary for (DP on) some graphs (500 vertices)\n sys.setrecursionlimit(2000)\n\n def redraw(self):\n self.mainWin.redraw()\n\n def initKeymap(self):\n self.keymap = {\n 'LMB': self.selectVertex,\n 'RMB': self.selectVertex,\n 'a': self.selectAll,\n 'Esc': self.deselect,\n 'v': self.addVertex,\n 'b': self.addBag,\n 'd': self.removeVertices,\n 'c': self.cliqueify,\n 'p': self.pathify,\n 't': self.treeify,\n '1': self.toggleDrawText,\n '2': self.toggleDrawSize,\n '-': self.zoomOut,\n '+': self.zoomIn,\n '=': self.resetZoom,\n 'g': self.gridAdjust,\n 'q': self.tspDP,\n 'w': self.tikz,\n 'Ctrl-s': self.saveAs,\n 'Ctrl-o': self.openFile,\n 'Ctrl-c': self.quit,\n 'Ctrl-d': self.quit\n }\n\n #\n # Graph editing tools\n #\n def selectVertex(self):\n \"\"\"(De)select a vertex\"\"\"\n if not self.hoverVertex:\n return False\n if self.hoverVertex in self.selectedVertices:\n with suppress(ValueError):\n self.selectedVertices.remove(self.hoverVertex)\n else:\n self.selectedVertices.append(self.hoverVertex)\n return True\n\n def selectAll(self):\n \"\"\"(De)select all vertices\"\"\"\n if self.selectedVertices == self.graph.vertices and self.selectedVertices != []:\n self.selectedVertices = []\n elif self.graph.originalGraph:\n if self.selectedVertices == self.graph.originalGraph.vertices:\n self.selectedVertices = list(self.graph.vertices)\n else:\n self.selectedVertices = list(self.graph.originalGraph.vertices)\n else:\n self.selectedVertices = list(self.graph.vertices)\n self.redraw()\n\n def deselect(self):\n \"\"\"Deselect all vertices\"\"\"\n self.selectedVertices = []\n self.redraw()\n\n def addVertex(self):\n \"\"\"Add a vertex at the mouse position\"\"\"\n workGraph = self.graph.originalGraph if self.isTreeDecomposition else self.graph\n if self.hoverVertex != None or self.hoverEdge != None:\n return False\n if not workGraph.addVertex(Vertex(workGraph, len(workGraph.vertices), self.mainWin.mousePos)):\n return False\n self.hoverVertex = workGraph.vertices[-1]\n self.redraw()\n\n def addBag(self):\n \"\"\"Add a bag at the mouse position\"\"\"\n if not self.isTreeDecomposition:\n return False\n if self.hoverVertex != None or self.hoverEdge != None:\n return False\n if not self.graph.addVertex(Bag(self.graph, len(self.graph.vertices), self.mainWin.mousePos)):\n return False\n self.hoverVertex = self.graph.vertices[-1]\n self.redraw()\n\n def removeVertices(self):\n \"\"\"Remove the selected vertices\"\"\"\n for v in self.selectedVertices:\n if type(v) == Bag:\n self.graph.removeVertex(v)\n else:\n self.graph.originalGraph.removeVertex(v)\n self.selectedVertices = []\n self.redraw()\n\n def cliqueify(self):\n \"\"\"Add or remove edges between all selected vertices\"\"\"\n if len(self.selectedVertices) < 2:\n return\n result = False\n workGraph = self.graph\n if self.isTreeDecomposition and type(self.selectedVertices[0]) != Bag:\n workGraph = self.graph.originalGraph\n # Add clique edges\n for a in self.selectedVertices:\n for b in self.selectedVertices:\n if a != b:\n if workGraph.addEdge(a.vid, b.vid):\n result = True\n # If no edges were added, remove all edges\n if not result:\n for a in self.selectedVertices:\n for b in self.selectedVertices:\n if a.vid < b.vid:\n workGraph.removeEdge(a.vid, b.vid)\n self.redraw()\n\n def pathify(self):\n \"\"\"Create a path, a tour or remove all edges between consecutive vertices\"\"\"\n if len(self.selectedVertices) < 2:\n return\n result = False\n workGraph = self.graph\n sv = self.selectedVertices\n if self.isTreeDecomposition and type(self.selectedVertices[0]) != Bag:\n workGraph = self.graph.originalGraph\n # Add path edges\n for i in range(len(sv) - 1):\n if workGraph.addEdge(sv[i].vid, sv[i + 1].vid):\n result = True\n # Add tour edge\n if not result:\n result = workGraph.addEdge(sv[0].vid, sv[-1].vid)\n # If no edges were added, remove all edges\n if not result:\n for i in range(len(sv) - 1):\n workGraph.removeEdge(sv[i].vid, sv[i + 1].vid)\n if len(sv) > 2:\n workGraph.removeEdge(sv[0].vid, sv[-1].vid)\n self.redraw()\n\n def treeify(self):\n \"\"\"Connect or remove the first vertex to all others\"\"\"\n if len(self.selectedVertices) < 2:\n return\n result = False\n workGraph = self.graph\n sv = self.selectedVertices\n if self.isTreeDecomposition and type(self.selectedVertices[0]) != Bag:\n workGraph = self.graph.originalGraph\n # Add path edges\n r = sv[0]\n for v in sv[1:]:\n if workGraph.addEdge(r.vid, v.vid):\n result = True\n # If no edges were added, remove all edges\n if not result:\n for v in sv[1:]:\n workGraph.removeEdge(r.vid, v.vid)\n self.redraw()\n\n def toggleDrawText(self):\n \"\"\"Toggle drawtext settings\"\"\"\n self.mainWin.settings.drawtext = not self.mainWin.settings.drawtext\n self.redraw()\n def toggleDrawSize(self):\n \"\"\"Toggle draw size settings\"\"\"\n self.mainWin.settings.drawsize = (self.mainWin.settings.drawsize + 2) % 3\n self.redraw()\n\n def zoomOut(self):\n \"\"\"Zoom out\"\"\"\n self.mainWin.scaleFactor /= 2\n self.redraw()\n def zoomIn(self):\n \"\"\"Zoom in\"\"\"\n self.mainWin.scaleFactor *= 2\n self.redraw()\n def resetZoom(self):\n \"\"\"Reset zoom\"\"\"\n self.mainWin.scaleFactor = 1\n self.redraw()\n\n def gridAdjust(self):\n \"\"\"Adjust vertices that are almost horizontal or vertical\"\"\"\n difference = 5\n for u in self.selectedVertices:\n for v in self.selectedVertices:\n if u == v:\n continue\n if abs(u.pos.x - v.pos.x) <= difference:\n u.pos.x = (u.pos.x + v.pos.x) // 2\n v.pos.x = u.pos.x\n if abs(u.pos.y - v.pos.y) <= difference:\n u.pos.y = (u.pos.y + v.pos.y) // 2\n v.pos.y = u.pos.y\n self.redraw()\n\n #\n # Parse to tikz\n #\n def tikz(self):\n \"\"\"Output the LaTeX tikz-code that draws the current graph (and TD)\"\"\"\n # Some configuration that might (or might not) be usefull to have in the LaTeX document.\n # \\tikzstyle{vertex2} = [circle,fill=black!25,minimum size=18pt,align=center,font=\\tiny]\n # \\tikzstyle{vertex1} = [circle,fill=black!25,minimum size=8pt,align=center,font=\\tiny]\n # \\tikzstyle{vertex0} = [circle,minimum size=1pt]\n # \\tikzstyle{bag} = [circle,fill=black!25,minimum size=35pt,align=center,text width=35pt,font=\\tiny]\n # \\tikzstyle{edge} = [draw,-]\n # \\tikzstyle{arc} = [draw,->-]\n # \\tikzstyle{weight} = [font=\\small]\n z = 1 / 80\n print(r\"\\begin{figure}\")\n print(r\"\\centering\")\n print(r\"\\begin{tikzpicture}[auto,swap]\")\n\n # Tree decomposition first, so it appears in the back if overlapping\n for b in self.graph.vertices:\n print(r\"\\node[bag] (b-{}) at ({:.2f}, {:.2f}) {{{}: {}}};\".format(\n b.vid, b.pos.x * z, -b.pos.y * z, b.vid, str([v.vid for v in b.vertices])[1:-1]\n ))\n\n for b in self.graph.vertices:\n for e in b.edges:\n if b.vid > e.other(b).vid:\n continue\n print(r\"\\path[edge] (b-{}) to (b-{});\".format(b.vid, e.other(b).vid))\n\n # Then the normal graph\n for v in self.graph.originalGraph.vertices:\n print(r\"\\node[vertex{}] ({}) at ({:.2f}, {:.2f}) {{{}}};\".format(\n self.mainWin.settings.drawsize, v.vid, v.pos.x * z, -v.pos.y * z, v.vid\n ))\n\n for v in self.graph.originalGraph.vertices:\n for e in v.edges:\n if v.vid > e.other(v).vid:\n continue\n print(r\"\\path[edge] ({}) to ({});\".format(v.vid, e.other(v).vid))\n\n print(r\"\\end{tikzpicture}\")\n print(r\"\\caption{TODO}\")\n print(r\"\\label{fig:TODO}\")\n print(r\"\\end{figure}\")\n print()\n\n #\n # Dynamic Programming Algorithm\n #\n def tspDP(self):\n \"\"\"Temp tsp\"\"\"\n # cProfile.runctx('self.temptemptemp()', globals(), locals())\n self.temptemptemp()\n\n def temptemptemp(self):\n \"\"\"Compute the smallest tour using DP on a tree decomposition\"\"\"\n if not self.isTreeDecomposition or len(self.graph.vertices) < 1:\n return\n Xroot = self.createRoot()\n S = self.fromDegreesEndpoints([2] * len(Xroot.vertices), [])\n value = self.tspTable(S, Xroot)\n print(\"TSP cost: {}\".format(value))\n for nr, table in enumerate([bag.a for bag in self.graph.vertices]):\n print('X{}'.format(nr))\n for key, val in table.items():\n print(' {}: {}'.format(key, val))\n if value < sys.maxsize:\n tour = list(set(self.tspReconstruct(S, Xroot)))\n print('\\nDP-TSP:\\n Length: {}\\n Tour: {}\\n'.format(value, tour))\n\n def tspTable(self, S, Xi):\n # The smallest value such that all vertices below Xi have degree 2 and vertices in Xi have degrees defined by S\n debug = False\n if debug: print(\"A({} {}, X{}): {}\".format(self.toDegrees(S), self.toEndpoints(S), Xi.vid, \"?\"))\n if S in Xi.a:\n if debug: print('lookup return: {}'.format(Xi.a[S]))\n return Xi.a[S]\n # We don't know this value yet, so we compute it.\n edges = []\n for v in Xi.vertices:\n for e in v.edges:\n if e.other(v) not in Xi.vertices:\n continue\n if v.vid < e.other(v).vid:\n edges.append(e)\n edges.sort(key=lambda e: e.cost)\n degrees = self.toDegrees(S)\n endpoints = self.toEndpoints(S)\n childEndpoints = [[] for _ in Xi.edges]\n childDegrees = [[0] * len(degrees) for _ in Xi.edges]\n Xi.a[S] = self.tspRecurse(Xi, edges, 0, 0, degrees, childDegrees, endpoints, childEndpoints,\n self.tspChildEvaluation, min, sys.maxsize)\n if debug: print('calculation return: {}'.format(Xi.a[S]))\n return Xi.a[S]\n\n def tspChildEvaluation(self, Xi, edges, targetDegrees, childDegrees, endpoints, childEndpoints, resultingEdgeList = None):\n # This method is the base case for the calculate tsp recurse method.\n # If we analyzed the degrees of all vertices (i.e. we have a complete combination),\n # return the sum of B values of all children.\n debug = False\n # Check: all bags (except the root) are not allowed to be a cycle.\n if not endpoints and Xi.parent:\n if debug: print('{}All bags should be a cycle - no endpoints given'.format(' ' * len(Xi.vertices)))\n return sys.maxsize\n # Base cost: the edges needed inside this Xi to account for the (target) degrees we didn't pass on to our children.\n allChildEndpoints = sum(childEndpoints, []) # Flatten the list\n val = self.tspEdgeSelect(sys.maxsize, 0, Xi, edges, targetDegrees, endpoints, allChildEndpoints, resultingEdgeList)\n if 0 <= val < sys.maxsize:\n if debug: print('{}Local edge selection cost: {}, edges: {}, degrees: {}, endpoints: {}, edgeList: {}'.format(\n ' ' * len(Xi.vertices), val, edges, targetDegrees, endpoints, resultingEdgeList))\n for k, cds in enumerate(childDegrees):\n Xkid = Xi.edges[k].other(Xi)\n if Xi.parent != Xkid:\n # Strip off the vertices not in Xkid and add degrees 2 for vertices not in Xi\n kidDegrees = [2] * len(Xkid.vertices)\n for p, v in enumerate(Xkid.vertices):\n for q, w in enumerate(Xi.vertices):\n if v == w:\n kidDegrees[p] = cds[q]\n S = self.fromDegreesEndpoints(kidDegrees, childEndpoints[k])\n if debug: print('{}child A: {}, cds: {}, degrees: {}, endpoints: {}'.format(' ' * len(Xi.vertices),\n val, cds, kidDegrees, childEndpoints[k]))\n # Add to that base cost the cost of hamiltonian paths nescessary to satisfy the degrees.\n val += self.tspTable(S, Xkid)\n if debug: print('{}Min cost for X{} with these child-degrees: {}'.format(' ' * len(Xi.vertices), Xi.vid, val))\n else:\n if debug: print('{}No local edge selection found'.format(' ' * len(Xi.vertices)))\n return val\n\n def tspReconstruct(self, S, Xi):\n # Reconstruct the tsp tour (get a list of all edges)\n edges = []\n for v in Xi.vertices:\n for e in v.edges:\n if e.other(v) not in Xi.vertices:\n continue\n if v.vid < e.other(v).vid:\n edges.append(e)\n edges.sort(key=lambda e: e.cost)\n degrees = self.toDegrees(S)\n endpoints = self.toEndpoints(S)\n childEndpoints = [[] for _ in Xi.edges]\n childDegrees = [[0] * len(degrees) for _ in Xi.edges]\n mergeF = lambda a, b: a + b\n return self.tspRecurse(Xi, edges, 0, 0, degrees, childDegrees, endpoints, childEndpoints, self.tspLookback, mergeF, [])\n\n def tspLookback(self, Xi, edges, targetDegrees, childDegrees, endpoints, childEndpoints):\n # This method is the base case for the reconstruct tsp recurse method.\n debug = False\n resultingEdgeList = [] # This list will be filled with the edges used in Xi\n totalDegrees = targetDegrees.copy()\n for cds in childDegrees:\n for i, d in enumerate(cds):\n totalDegrees[i] += d\n val = Xi.a[self.fromDegreesEndpoints(totalDegrees, endpoints)]\n if val == None:\n return []\n if val != self.tspChildEvaluation(Xi, edges, targetDegrees, childDegrees, endpoints, childEndpoints, resultingEdgeList):\n return [] # Side effect above intended to fill the edge list\n if debug: print('X{} edgelist 1: {}'.format(Xi.vid, resultingEdgeList))\n # So these are indeed the child degrees that we are looking for\n for k, cds in enumerate(childDegrees):\n Xkid = Xi.edges[k].other(Xi)\n if Xi.parent != Xkid:\n # Strip off the vertices not in Xkid and add degrees 2 for vertices not in Xi\n kidDegrees = [2] * len(Xkid.vertices)\n for p, v in enumerate(Xkid.vertices):\n for q, w in enumerate(Xi.vertices):\n if v == w:\n kidDegrees[p] = cds[q]\n S = self.fromDegreesEndpoints(kidDegrees, childEndpoints[k])\n # We already got the resultingEdgeList for Xi, now add the REL for all the children\n resultingEdgeList += self.tspReconstruct(S, Xkid)\n # print('test 2 edgelist: {}'.format(resultingEdgeList))\n if debug: print('X{} edgelist 3: {}'.format(Xi.vid, resultingEdgeList))\n return resultingEdgeList\n\n def tspRecurse(self, Xi, edges, i, j, targetDegrees, childDegrees, endpoints, childEndpoints, baseF, mergeF, defaultVal):\n # Select all possible mixes of degrees for all vertices and evaluate them\n # i = the vertex we currently analyze, j = the child we currently analyze\n # targetDegrees goes from full to empty, childDegrees from empty to full, endpoints are the endpoints for each child path\n debug = False and isinstance(defaultVal, int)\n if debug: print('{}{}{} (X{}: {}, {}) {}|{}'.format(' ' * i, childDegrees, ' ' * (len(Xi.vertices) + 8 - i), Xi.vid, i, j, targetDegrees, endpoints))\n # Final base case.\n if i >= len(Xi.vertices):\n return baseF(Xi, edges, targetDegrees, childDegrees, endpoints, childEndpoints)\n # Base case: if we can't or didn't want to 'spend' this degree, move on\n if targetDegrees[i] == 0 or j >= len(Xi.edges):\n return self.tspRecurse(Xi, edges, i + 1, 0, targetDegrees, childDegrees, endpoints, childEndpoints,\n baseF, mergeF, defaultVal)\n Xj = Xi.edges[j].other(Xi)\n # Base case: if the current bag (must be child) does not contain the vertex to analyze, try the next (child) bag\n if Xi.parent == Xi.edges[j].other(Xi) or Xi.vertices[i] not in Xj.vertices:\n return self.tspRecurse(Xi, edges, i, j + 1, targetDegrees, childDegrees, endpoints, childEndpoints,\n baseF, mergeF, defaultVal)\n\n # If the current degree is 2, try letting the child manage it\n result = defaultVal\n if targetDegrees[i] == 2 and childDegrees[j][i] == 0:\n td, cds = targetDegrees.copy(), [d.copy() for d in childDegrees]\n td[i] = 0\n cds[j][i] = 2\n result = self.tspRecurse(Xi, edges, i + 1, 0, td, cds, endpoints, childEndpoints, baseF, mergeF, defaultVal)\n # If the current degree is at least 1 (which it is if we get here),\n # try to combine it (for all other vertices) in a hamiltonian path\n for k in range(i + 1, len(Xi.vertices)):\n # Stay in {0, 1, 2}\n if targetDegrees[k] < 1 or childDegrees[j][k] > 1 or Xi.vertices[k] not in Xj.vertices:\n continue\n # Don't add edges twice\n if self.inEndpoints(childEndpoints[j], Xi.vertices[i].vid, Xi.vertices[k].vid):\n continue\n td, cds, eps = targetDegrees.copy(), [d.copy() for d in childDegrees], [ep.copy() for ep in childEndpoints]\n td[i] -= 1\n cds[j][i] += 1\n td[k] -= 1\n cds[j][k] += 1\n eps[j].extend([Xi.vertices[nr].vid for nr in [i, k]])\n\n\n # DEBUG DEBUG DEBUG\n for test1 in range(len(eps[j]) - 1):\n for test2 in range(test1 + 1, len(eps[j])):\n if eps[j][test1] == eps[j][test2]:\n print(\"NOOOOOOOOOOOOOOOO! - some endpoints are occuring twice in the eps list: {}\".format(eps[j]));\n\n\n # We may have to try to analyze the same vertex again if it's degree is higher than 1\n result = mergeF(result, self.tspRecurse(Xi, edges, i, j, td, cds, endpoints, eps, baseF, mergeF, defaultVal))\n # Also, try not assigning this degree to anyone, we (maybe) can solve it inside Xi\n result = mergeF(result, self.tspRecurse(Xi, edges, i, j + 1, targetDegrees, childDegrees,\n endpoints, childEndpoints, baseF, mergeF, defaultVal))\n return result\n\n # Todo: use the minimum to abort early??? (is possible for leaf case, but perhaps not for normal bag case\n def tspEdgeSelect(self, minimum, index, Xi, edges, degrees, endpoints, allChildEndpoints, edgeList = None):\n # Calculate the smallest cost to satisfy the degrees target using only using edges >= the index\n debug = False\n # Base case 1: the degrees are all zero, so we succeeded as we don't need to add any more edges\n satisfied = True\n for d in degrees:\n if d != 0:\n satisfied = False\n break\n if satisfied:\n # So we have chosen all our edges and satisfied the targets - now make sure there is no cycle (unless root)\n if not self.cycleCheck(endpoints, edgeList, allChildEndpoints):\n if debug: print('Edge select ({}): edges contain a cycle'.format(index))\n return sys.maxsize\n if debug: print('Edge select ({}): no need to add edges, min value: 0'.format(index))\n return 0\n # Base case 2: we have not succeeded yet, but there are no more edges to add, so we failed\n if index >= len(edges):\n if debug: print('Edge select ({}): no more edges to add'.format(index))\n return sys.maxsize\n # Base case 3: one of the degrees is < 1, so we added too many vertices, so we failed [with side effect]\n edge = edges[index]\n deg = degrees.copy()\n assertCounter = 0\n for i, d in enumerate(deg):\n if Xi.vertices[i] == edge.a or Xi.vertices[i] == edge.b:\n if d < 0: # If it's negative it will tell us later\n # - can't return right now, as we need to evaluete not taking this edge as well.\n if debug: print('Edge select ({}): too many edges added'.format(index))\n return sys.maxsize\n # While checking this base case, also compute the new degree list for the first recursion\n deg[i] -= 1\n assertCounter += 1\n assert assertCounter in {0, 2}\n\n # Try both to take the edge and not to take the edge\n if debug: print('Edge select ({}), degrees: {}'.format(index, degrees))\n tempEL = [] if edgeList == None else edgeList.copy()\n tempEL1, tempEL2 = tempEL + [edge], tempEL.copy()\n minimum = min(minimum, edge.cost + self.tspEdgeSelect(minimum - edge.cost, index + 1, Xi, edges,\n deg, endpoints, allChildEndpoints, tempEL1))\n val = self.tspEdgeSelect(minimum, index + 1, Xi, edges, degrees, endpoints, allChildEndpoints, tempEL2)\n if val < minimum:\n minimum = val\n # So without edge is better - Append the second edge list\n if edgeList != None:\n for e in tempEL2:\n edgeList.append(e)\n # So without edge is not better - Append the first edge list\n elif edgeList != None:\n for e in tempEL1:\n edgeList.append(e)\n if debug: print('Edge select ({}): min value: {}, edges: {}'.format(index, minimum, edgeList))\n return minimum\n\n def toDegrees(self, S):\n # From a string representation to a list of degrees\n return json.loads(S.split('|')[0])\n\n def toEndpoints(self, S):\n # From a string representation to a list of edges\n return json.loads(S.split('|')[1])\n\n def fromDegreesEndpoints(self, degrees, endpoints):\n # From a list of degrees and endpoints to a string representation\n return json.dumps(degrees) + '|' + json.dumps(endpoints)\n\n def createRoot(self, rootBag=None):\n \"\"\"Make the tree decomposition a true tree, by choosing a root and setting all parent pointers correctly\"\"\"\n # Choose the first bag as root if none is given\n if rootBag == None:\n rootBag = self.graph.vertices[0]\n # Define a local function that sets the parent of a bag recursively\n def setParentRecursive(bag, parent):\n bag.parent = parent\n bag.a = {}\n for e in bag.edges:\n child = e.other(bag)\n if not parent or bag.parent != child:\n setParentRecursive(child, bag)\n # Set the parent for all bags\n setParentRecursive(rootBag, None)\n return rootBag\n\n def cycleCheck(self, endpoints, edgeList, allChildEndpoints):\n # This method returns whether or not the given edge list and all child endpoints provide a set of paths\n # satisfying the endpoints and sorts the edge list in place.\n debug = False\n progressCounter, edgeCounter, endpsCounter, v = -2, 0, 0, None\n if edgeList == None: edgeList = []\n\n # Special case: the root bag.\n if endpoints == []:\n if len(allChildEndpoints) > 0:\n endpoints = allChildEndpoints[:2]\n endpsCounter += 2\n elif len(edgeList) > 0:\n endpoints = [edgeList[0].a.vid, edgeList[0].b.vid]\n edgeCounter += 1\n else:\n if debug: print('ERROR: cycle check root bag has both no edges to add, nor any child endpoints')\n return False\n\n # Normal case\n while True:\n # Dump the state\n if debug:\n print('cycle check dump 1:')\n print(' endpoints: {}'.format(endpoints))\n print(' edgeList: {} - {}'.format(edgeCounter, edgeList))\n print(' kid endpoints: {} - {}'.format(endpsCounter, allChildEndpoints))\n print(' progress: {} - v: {}\\n'.format(progressCounter, -1 if not v else v.vid))\n\n # If we completed the path\n if v == None or v.vid == endpoints[progressCounter + 1]:\n progressCounter += 2\n if progressCounter >= len(endpoints):\n if edgeCounter == len(edgeList) and endpsCounter == len(allChildEndpoints):\n return True\n else:\n if debug: print('ERROR: all endpoints are satisfied, but there are edges or endpoints left')\n return False\n v = self.graph.originalGraph.vertices[endpoints[progressCounter]]\n\n # Dump the state\n if debug:\n print('cycle check dump 2:')\n print(' endpoints: {}'.format(endpoints))\n print(' edgeList: {} - {}'.format(edgeCounter, edgeList))\n print(' kid endpoints: {} - {}'.format(endpsCounter, allChildEndpoints))\n print(' progress: {} - v: {}\\n'.format(progressCounter, -1 if not v else v.vid))\n\n # Find the next vertex\n for i in range(endpsCounter, len(allChildEndpoints), 2):\n if v.vid in allChildEndpoints[i : i + 2]:\n v = self.graph.originalGraph.vertices[allChildEndpoints[i + 1 if v.vid == allChildEndpoints[i] else i]]\n allChildEndpoints[endpsCounter : endpsCounter + 2], allChildEndpoints[i : i + 2] = allChildEndpoints[\n i : i + 2], allChildEndpoints[endpsCounter : endpsCounter + 2]\n endpsCounter += 2\n break\n else:\n for i in range(edgeCounter, len(edgeList)):\n if v in edgeList[i]:\n v = edgeList[i].other(v)\n edgeList[edgeCounter], edgeList[i] = edgeList[i], edgeList[edgeCounter]\n edgeCounter += 1\n break\n else:\n if debug: print('eps: {}, edgelist: {}, all kid eps: {}'.format(endpoints, edgeList, allChildEndpoints))\n if debug: print('ERROR, no more endpoints or edges found according to specs')\n return False\n if debug: print('ERROR: The code should not come here')\n return False\n\n def inEndpoints(self, endpoints, start, end):\n # Return whether or not this combination of endpoints (or reversed order) is already in the endpoints list\n for j in range(0, len(endpoints), 2):\n if (endpoints[j] == start and endpoints[j + 1] == end) or (endpoints[j + 1] == start and endpoints[j] == end):\n return True\n return False\n\n #\n # Misc\n #\n def quit(self):\n \"\"\"Quit\"\"\"\n self.mainWin.quit()\n\n def saveAs(self):\n \"\"\"Save the graph to file\"\"\"\n origGraph = self.graph.originalGraph if self.isTreeDecomposition else self.graph\n vidStart = self.mainWin.settings.vidStart\n s = \"\"\n s += \"DIMENSION : {}\\n\".format(len(origGraph.vertices))\n if origGraph.isEuclidean:\n s += \"EDGE_WEIGHT_TYPE : EUC_2D\\n\"\n s += \"NODE_COORD_SECTION\\n\"\n for v in origGraph.vertices:\n s += \"{} {} {}\\n\".format(v.vid + vidStart, int(v.pos.x), int(v.pos.y))\n s += \"EDGE_SECTION\\n\"\n for v in origGraph.vertices:\n for e in v.edges:\n if v.vid < e.other(v).vid:\n s += \"{} {} {}\\n\".format(e.a.vid + vidStart, e.b.vid + vidStart, int(e.cost))\n if self.isTreeDecomposition:\n s += \"BAG_COORD_SECTION\\n\"\n for b in self.graph.vertices:\n s += \"{} {} {}\".format(b.vid + vidStart, int(b.pos.x), int(b.pos.y))\n for v in b.vertices:\n s += \" \" + str(v.vid + vidStart)\n s += \"\\n\"\n s += \"BAG_EDGE_SECTION\\n\"\n for b in self.graph.vertices:\n for e in b.edges:\n if e.a.vid < e.b.vid:\n s += \"{} {}\\n\".format(e.a.vid + vidStart, e.b.vid + vidStart)\n self.mainWin.app.broSave(s, True)\n\n def openFile(self):\n \"\"\"Open a file\"\"\"\n path = self.mainWin.app.broOpen()\n self.openFileWithPath(path)\n\n def openFileWithPath(self, path):\n if path == \"\":\n return\n with open(path) as f:\n # Looks like the file opening went right. Good, now first create the new graph.\n self.graph = TreeDecomposition(Graph(False))\n origGraph = self.graph.originalGraph if self.isTreeDecomposition else self.graph\n self.mainWin.app.setTitle()\n comp = lambda line, s: line[0:len(s)] == s\n state = 0 # 0=nothing, 1=vertices, 2=edges, 3=bags, 4=bag edges\n vidStart = self.mainWin.settings.vidStart\n\n # And lets now fill the graph with some sensible stuff.\n for line in f:\n l = line.strip().split(' ')\n # Important file parameters\n if comp(line, \"NAME : \"):\n self.graph.name = l[2]\n origGraph.name = l[2]\n self.mainWin.app.setTitle(l[2])\n elif comp(line, \"EDGE_WEIGHT_TYPE : EUC_2D\"):\n origGraph.isEuclidean = True\n # Vertices and edges\n elif comp(line, \"NODE_COORD_SECTION\"): state = 1\n elif comp(line, \"EDGE_SECTION\"): state = 2\n elif comp(line, \"BAG_COORD_SECTION\"): state = 3\n elif comp(line, \"BAG_EDGE_SECTION\"): state = 4\n elif comp(line, \"DEMAND_SECTION\"): state = 5\n elif comp(line, \"DEPOT_SECTION\"): state = 6\n # Add vertices, edges, bags or bag edges\n elif state == 1:\n origGraph.addVertex(Vertex(origGraph, int(l[0]) - vidStart, Pos(int(l[1]), int(l[2]))))\n elif state == 2:\n origGraph.addEdge(int(l[0]) - vidStart, int(l[1]) - vidStart, int(l[2]))\n elif state == 3:\n bag = Bag(self.graph, int(l[0]) - vidStart, Pos(int(l[1]), int(l[2])))\n for v in l[3:]:\n bag.addVertex(origGraph.vertices[int(v) - vidStart])\n self.graph.addVertex(bag)\n elif state == 4:\n self.graph.addEdge(int(l[0]) - vidStart, int(l[1]) - vidStart, 1)\n\n # Change some settings for large graphs\n if len(origGraph.vertices) > 30:\n self.mainWin.settings.drawtext = False\n self.mainWin.settings.drawsize = 0\n for _ in range(5):\n self.zoomOut()\n self.redraw()\n\n def keymapToStr(self):\n \"\"\"Returns a string with all the keys and their explanation (docstring).\"\"\"\n result = \"\"\n for key, command in sorted(self.keymap.items()):\n result += key + \": \" + command.__doc__ + \"\\n\"\n return result\n\n","repo_name":"Mattias1/graph-tools","sub_path":"src/graph_interaction.py","file_name":"graph_interaction.py","file_ext":"py","file_size_in_byte":32967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73112262865","text":"import psycopg2\nimport matplotlib.pyplot as plt\n\nusername = 'nedzelsky'\npassword = '3030'\ndatabase = 'db_lab3_nedzelsky'\n\nquery_1 = '''\nselect \n\tsup_name, \n\tcount(del_date) as total_number_purchases\nfrom \n\tsupermarkets \n\tleft join deliveries using(sup_id)\n\tleft join vegetables using(prod_id)\ngroup by \n\tsupermarkets.sup_id\norder by \n\ttotal_number_purchases;\n'''\n\nquery_2 = '''\nselect \n\tsup_name, \n\tsum(del_quantity_kg * prod_price_kg) as total_amount_purchases\nfrom \n\tsupermarkets \n\tleft join deliveries using(sup_id)\n\tleft join vegetables using(prod_id)\ngroup by \n\tsupermarkets.sup_id\norder by \n\ttotal_amount_purchases;\n'''\n\nquery_3 = \"\"\"\nselect \n\trtrim(storages.stor_id) as stor_id, \n\tcoalesce(sum(stor_prod_quantity_kg), 0) as store_prod_quantity_kg\nfrom \n\tstorages \n\tleft join storage_vegetables using(stor_id)\n\tleft join vegetables using(prod_id)\nwhere \n\tprod_name = 'potato'\ngroup by \n\tstorages.stor_id\nhaving \n\tcoalesce(sum(stor_prod_quantity_kg), 0) >= 100;\n\"\"\"\n\nconn = psycopg2.connect(user=username, password=password, dbname=database)\nprint(type(conn))\n\nwith conn:\n \n cur = conn.cursor()\n\n cur.execute(query_1)\n supermarkets_1 = []\n total_number_purchases = []\n\n for row in cur:\n replaced_row_0 = row[0].replace(' ', '\\n')\n supermarkets_1.append(replaced_row_0)\n total_number_purchases.append(row[1])\n\n figure, (bar_ax, pie_ax, pie2_ax) = plt.subplots(1, 3)\n bar = bar_ax.bar(supermarkets_1, total_number_purchases, label='Total')\n bar_ax.bar_label(bar, label_type='center')\n bar_ax.set_xlabel('Супермаркети')\n bar_ax.set_ylabel('Кількість замовлень')\n bar_ax.set_title('Кількість замовлень здійснених кожним магазином')\n\n\n cur.execute(query_2)\n supermarkets_2 = []\n total_amount_purchases = []\n\n for row in cur:\n supermarkets_2.append(row[0])\n total_amount_purchases.append(row[1])\n\n pie_ax.pie(total_amount_purchases, labels=supermarkets_2, autopct='%1.2f%%')\n pie_ax.set_title('Частка суми замовлень кожного супермаркету')\n\n\n cur.execute(query_3)\n storages = []\n store_prod_quantity_kg = []\n\n for row in cur:\n storages.append(row[0])\n store_prod_quantity_kg.append(row[1])\n\n pie2_ax.pie(store_prod_quantity_kg, labels=storages, autopct='%1.2f%%')\n pie2_ax.set_title('Частка наявеості картоплі на складах \\nпри умові що їх там більше ста')\n\n\nmng = plt.get_current_fig_manager()\nmng.resize(1400, 600)\n\nplt.show()","repo_name":"Nedzelskij/db_lab4_Nedzelsky","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"4505164745","text":"import os\nfrom gns3.vm import VM\nfrom gns3.node import Node\nfrom gns3.ports.ethernet_port import EthernetPort\nfrom gns3.utils.normalize_filename import normalize_filename\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass VPCSDevice(VM):\n\n \"\"\"\n VPCS device.\n\n :param module: parent module for this node\n :param server: GNS3 server instance\n :param project: Project instance\n \"\"\"\n URL_PREFIX = \"vpcs\"\n\n def __init__(self, module, server, project):\n super().__init__(module, server, project)\n\n log.info(\"VPCS instance is being created\")\n self._vm_id = None\n self._settings = {\"name\": \"\",\n \"startup_script\": None,\n \"startup_script_path\": None,\n \"console\": None}\n\n port_name = EthernetPort.longNameType() + str(0)\n short_name = EthernetPort.shortNameType() + str(0)\n\n # VPCS devices have only one fixed Ethernet port\n port = EthernetPort(port_name)\n port.setShortName(short_name)\n port.setAdapterNumber(0)\n port.setPortNumber(0)\n port.setHotPluggable(False)\n self._ports.append(port)\n log.debug(\"port {} has been added\".format(port_name))\n\n def setup(self, name=None, vm_id=None, additional_settings={}, default_name_format=\"PC{0}\"):\n \"\"\"\n Setups this VPCS device.\n\n :param name: optional name\n :param vm_id: VM identifier\n :param additional_settings: additional settings for this device\n \"\"\"\n\n # let's create a unique name if none has been chosen\n if not name:\n name = self.allocateName(default_name_format)\n\n if not name:\n self.error_signal.emit(self.id(), \"could not allocate a name for this VPCS device\")\n return\n\n self._settings[\"name\"] = name\n params = {\"name\": name}\n\n if vm_id:\n params[\"vm_id\"] = vm_id\n\n if \"script_file\" in additional_settings:\n if os.path.isfile(additional_settings[\"script_file\"]):\n base_config_content = self._readBaseConfig(additional_settings[\"script_file\"])\n if base_config_content is not None:\n additional_settings[\"startup_script\"] = base_config_content\n del additional_settings[\"script_file\"]\n\n if \"startup_script_path\" in additional_settings:\n del additional_settings[\"startup_script_path\"]\n\n # If we have an vm id that mean the VM already exits and we should not send startup_script\n if \"startup_script\" in additional_settings and vm_id is not None:\n del additional_settings[\"startup_script\"]\n\n params.update(additional_settings)\n self.httpPost(\"/vpcs/vms\", self._setupCallback, body=params)\n\n def _setupCallback(self, result, error=False, **kwargs):\n \"\"\"\n Callback for setup.\n\n :param result: server response (dict)\n :param error: indicates an error (boolean)\n \"\"\"\n\n if not super()._setupCallback(result, error=error, **kwargs):\n return\n\n if self._loading:\n self.loaded_signal.emit()\n else:\n self.setInitialized(True)\n log.info(\"VPCS instance {} has been created\".format(self.name()))\n self.created_signal.emit(self.id())\n self._module.addNode(self)\n\n def update(self, new_settings):\n \"\"\"\n Updates the settings for this VPCS device.\n\n :param new_settings: settings dictionary\n \"\"\"\n\n if \"name\" in new_settings and new_settings[\"name\"] != self.name() and self.hasAllocatedName(new_settings[\"name\"]):\n self.error_signal.emit(self.id(), 'Name \"{}\" is already used by another node'.format(new_settings[\"name\"]))\n return\n\n if \"script_file\" in new_settings:\n if os.path.isfile(new_settings[\"script_file\"]):\n base_config_content = self._readBaseConfig(new_settings[\"script_file\"])\n if base_config_content is not None:\n new_settings[\"startup_script\"] = base_config_content\n del new_settings[\"script_file\"]\n\n if \"startup_script_path\" in new_settings:\n del new_settings[\"startup_script_path\"]\n\n params = {}\n for name, value in new_settings.items():\n if name in self._settings and self._settings[name] != value:\n params[name] = value\n\n log.debug(\"{} is updating settings: {}\".format(self.name(), params))\n self.httpPut(\"/vpcs/vms/{vm_id}\".format(project_id=self._project.id(), vm_id=self._vm_id), self._updateCallback, body=params)\n\n def _updateCallback(self, result, error=False, **kwargs):\n \"\"\"\n Callback for update.\n\n :param result: server response (dict)\n :param error: indicates an error (boolean)\n \"\"\"\n\n if not super()._updateCallback(result, error=error, **kwargs):\n return False\n\n updated = False\n for name, value in result.items():\n if name in self._settings and self._settings[name] != value:\n log.info(\"{}: updating {} from '{}' to '{}'\".format(self.name(), name, self._settings[name], value))\n updated = True\n if name == \"name\":\n # update the node name\n self.updateAllocatedName(value)\n self._settings[name] = value\n\n if updated:\n log.info(\"VPCS device {} has been updated\".format(self.name()))\n self.updated_signal.emit()\n\n def info(self):\n \"\"\"\n Returns information about this VPCS device.\n\n :returns: formated string\n \"\"\"\n\n if self.status() == Node.started:\n state = \"started\"\n else:\n state = \"stopped\"\n\n info = \"\"\"Device {name} is {state}\n Local node ID is {id}\n Server's VPCS device ID is {vm_id}\n VPCS's server runs on {host}:{port}, console is on port {console}\n\"\"\".format(name=self.name(),\n id=self.id(),\n vm_id=self._vm_id,\n state=state,\n host=self._server.host(),\n port=self._server.port(),\n console=self._settings[\"console\"])\n\n port_info = \"\"\n for port in self._ports:\n if port.isFree():\n port_info += \" {port_name} is empty\\n\".format(port_name=port.name())\n else:\n port_info += \" {port_name} {port_description}\\n\".format(port_name=port.name(),\n port_description=port.description())\n\n return info + port_info\n\n def dump(self):\n \"\"\"\n Returns a representation of this VPCS device.\n (to be saved in a topology file).\n\n :returns: representation of the node (dictionary)\n \"\"\"\n\n vpcs_device = super().dump()\n vpcs_device[\"vm_id\"] = self._vm_id\n\n # add the properties\n for name, value in self._settings.items():\n if value is not None and value != \"\":\n if name != \"startup_script\":\n if name == \"startup_script_path\":\n value = os.path.basename(value)\n vpcs_device[\"properties\"][name] = value\n\n return vpcs_device\n\n def load(self, node_info):\n \"\"\"\n Loads a VPCS device representation\n (from a topology file).\n\n :param node_info: representation of the node (dictionary)\n \"\"\"\n\n super().load(node_info)\n\n # for backward compatibility\n vm_id = node_info.get(\"vpcs_id\")\n if not vm_id:\n vm_id = node_info.get(\"vm_id\")\n\n # prepare the VM settings\n vm_settings = {}\n for name, value in node_info[\"properties\"].items():\n if name in self._settings:\n vm_settings[name] = value\n name = vm_settings.pop(\"name\")\n\n log.info(\"VPCS device {} is loading\".format(name))\n self.setName(name)\n self.setup(name, vm_id, vm_settings)\n\n def exportConfig(self, config_export_path):\n \"\"\"\n Exports the script file.\n\n :param config_export_path: export path for the script file\n \"\"\"\n\n self.httpGet(\"/vpcs/vms/{vm_id}\".format(vm_id=self._vm_id),\n self._exportConfigCallback,\n context={\"path\": config_export_path})\n\n def _exportConfigCallback(self, result, error=False, context={}, **kwargs):\n \"\"\"\n Callback for exportConfig.\n\n :param result: server response\n :param error: indicates an error (boolean)\n \"\"\"\n\n if error:\n log.error(\"error while exporting {} configs: {}\".format(self.name(), result[\"message\"]))\n self.server_error_signal.emit(self.id(), result[\"message\"])\n elif \"startup_script\" in result:\n path = context[\"path\"]\n try:\n with open(path, \"wb\") as f:\n log.info(\"saving {} script file to {}\".format(self.name(), path))\n if result[\"startup_script\"]:\n f.write(result[\"startup_script\"].encode(\"utf-8\"))\n except OSError as e:\n self.error_signal.emit(self.id(), \"could not export the script file to {}: {}\".format(path, e))\n\n def exportConfigToDirectory(self, directory):\n \"\"\"\n Exports the script-file to a directory.\n\n :param directory: destination directory path\n \"\"\"\n\n self.httpGet(\"/vpcs/vms/{vm_id}\".format(vm_id=self._vm_id),\n self._exportConfigToDirectoryCallback,\n context={\"directory\": directory})\n\n def _exportConfigToDirectoryCallback(self, result, error=False, context={}, **kwargs):\n \"\"\"\n Callback for exportConfigToDirectory.\n\n :param result: server response\n :param error: indicates an error (boolean)\n \"\"\"\n\n if error:\n log.error(\"error while exporting {} configs: {}\".format(self.name(), result[\"message\"]))\n self.server_error_signal.emit(self.id(), result[\"message\"])\n elif \"startup_script\" in result:\n export_directory = context[\"directory\"]\n config_path = os.path.join(export_directory, normalize_filename(self.name())) + \"_startup.vpc\"\n try:\n with open(config_path, \"wb\") as f:\n log.info(\"saving {} script file to {}\".format(self.name(), config_path))\n if result[\"startup_script\"]:\n f.write(result[\"startup_script\"].encode(\"utf-8\"))\n except OSError as e:\n self.error_signal.emit(self.id(), \"could not export the script file to {}: {}\".format(config_path, e))\n\n def importConfig(self, path):\n \"\"\"\n Imports a script-file.\n\n :param path: path to the script file\n \"\"\"\n\n new_settings = {\"script_file\": path}\n self.update(new_settings)\n\n def importConfigFromDirectory(self, directory):\n \"\"\"\n Imports an initial-config from a directory.\n\n :param directory: source directory path\n \"\"\"\n\n try:\n contents = os.listdir(directory)\n except OSError as e:\n self.warning_signal.emit(self.id(), \"Can't list file in {}: {}\".format(directory, str(e)))\n return\n script_file = normalize_filename(self.name()) + \"_startup.vpc\"\n new_settings = {}\n if script_file in contents:\n new_settings[\"script_file\"] = os.path.join(directory, script_file)\n else:\n self.warning_signal.emit(self.id(), \"no script file could be found, expected file name: {}\".format(script_file))\n return\n self.update(new_settings)\n\n def name(self):\n \"\"\"\n Returns the name of this VPCS device.\n\n :returns: name (string)\n \"\"\"\n\n return self._settings[\"name\"]\n\n def settings(self):\n \"\"\"\n Returns all this VPCS device settings.\n\n :returns: settings dictionary\n \"\"\"\n\n return self._settings\n\n def ports(self):\n \"\"\"\n Returns all the ports for this VPCS device.\n\n :returns: list of Port instances\n \"\"\"\n\n return self._ports\n\n def console(self):\n \"\"\"\n Returns the console port for this VPCS device.\n\n :returns: port (integer)\n \"\"\"\n\n return self._settings[\"console\"]\n\n def configPage(self):\n \"\"\"\n Returns the configuration page widget to be used by the node properties dialog.\n\n :returns: QWidget object\n \"\"\"\n\n from .pages.vpcs_device_configuration_page import VPCSDeviceConfigurationPage\n return VPCSDeviceConfigurationPage\n\n @staticmethod\n def defaultSymbol():\n \"\"\"\n Returns the default symbol path for this node.\n\n :returns: symbol path (or resource).\n \"\"\"\n\n return \":/symbols/computer.svg\"\n\n @staticmethod\n def symbolName():\n\n return \"VPCS\"\n\n @staticmethod\n def categories():\n \"\"\"\n Returns the node categories the node is part of (used by the device panel).\n\n :returns: list of node category (integer)\n \"\"\"\n\n return [Node.end_devices]\n\n def __str__(self):\n\n return \"VPCS device\"\n","repo_name":"mpplab/MNSS","sub_path":"gns3/modules/vpcs/vpcs_device.py","file_name":"vpcs_device.py","file_ext":"py","file_size_in_byte":13318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"38506651700","text":"#Author-diomedea16\n#Description-Draw wing.\n\nimport adsk.core, traceback\nfrom os import path\nimport csv\nimport math\n\nresources_dir = path.join(path.dirname(__file__), 'resources')\n\ndef run(context):\n ui = None\n try:\n #おまじない\n app = adsk.core.Application.get()\n ui = app.userInterface\n product = app.activeProduct\n design = adsk.fusion.Design.cast(product)\n rootComp = design.rootComponent\n sketches = rootComp.sketches\n\n #ペラのデータをロード\n data = get_setting_csv('setting.csv')\n foilCache = {}\n\n #回転軸対称に2枚描画\n for lr in [-1, 1]:\n sketch = sketches.add(rootComp.xZConstructionPlane)\n tops = adsk.core.ObjectCollection.create()\n ends = adsk.core.ObjectCollection.create()\n paths = []\n\n #リブを1枚ずつ検証\n for r in data:\n #翼型データのロード\n if r[5] in foilCache:\n foil = foilCache[r[5]]\n else:\n foil = get_2d_csv(r[5] + '.csv')\n foilCache[r[5]] = foil\n\n #翼型をスケッチ\n points = adsk.core.ObjectCollection.create()\n for p, i in zip(foil[:-1], range(len(foil[:-1]))):\n x = (p[0] - r[3]/100) * r[2] * 0.1\n y = - (p[1] - r[4]/100) * r[2] * 0.1\n rx = x * math.cos(math.radians(r[1])) - y * math.sin(math.radians(r[1]))\n ry = x * math.sin(math.radians(r[1])) + y * math.cos(math.radians(r[1]))\n node = adsk.core.Point3D.create(lr * rx, ry, lr * r[0] * 0.1)\n points.add(node)\n if i == 0:\n ends.add(node)\n elif p[0] == 0:\n tops.add(node)\n spline = sketch.sketchCurves.sketchFittedSplines.add(points)\n spline.isClosed = True\n paths.append(rootComp.features.createPath(spline))\n\n #前縁・後縁の線を描画(ロフト時に型崩れしないようレールとして使う)\n topLine = sketch.sketchCurves.sketchFittedSplines.add(tops)\n endLine = sketch.sketchCurves.sketchFittedSplines.add(ends)\n\n #ロフト\n loftFeats = rootComp.features.loftFeatures\n loftInput = loftFeats.createInput(adsk.fusion.FeatureOperations.NewBodyFeatureOperation)\n for pa in paths:\n loftInput.loftSections.add(pa)\n loftInput.isSolid = True\n loftInput.centerLineOrRails.addRail(rootComp.features.createPath(topLine))\n loftInput.centerLineOrRails.addRail(rootComp.features.createPath(endLine))\n loftFeats.add(loftInput)\n\n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n#設定csvの読み取り\ndef get_setting_csv(filename):\n with open(path.join(resources_dir, filename)) as f:\n return list(\n map(lambda r: [float(r[0]),\n float(r[1]),\n float(r[2]),\n float(r[3]),\n float(r[4]),\n r[5]], csv.reader(f)))\n\n#二次元座標を格納したcsvの読み取り\ndef get_2d_csv(filename):\n\twith open(path.join(resources_dir, filename)) as f:\n\t\treturn list(\n map(lambda r: list(map(lambda c: float(c), r)), csv.reader(f)))\n","repo_name":"kamino410/FusionScripts","sub_path":"DrawProp/DrawProp.py","file_name":"DrawProp.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"639828504","text":"import asyncio\n\nfrom aiohttp import ClientSession\n\n# dictionary with asyncio tasks representing currently running bots\nbot_tasks = dict()\n\n\nasync def run_bot(user_id: str, bot_token: str):\n bot_task = asyncio.get_event_loop().create_task(_process_updates(bot_token))\n if not bot_tasks.get(user_id):\n bot_tasks[user_id] = dict()\n bot_tasks[user_id][bot_token] = bot_task\n\n\nasync def stop_bot(user_id: str, bot_token: str):\n bot_task = bot_tasks.get(user_id).get(bot_token)\n bot_task.cancel()\n\n\nasync def _process_updates(bot_token: str):\n bot_api_url = f\"https://api.telegram.org/bot{bot_token}/\"\n session = ClientSession()\n\n json_body = {\n \"limit\": 1\n }\n\n # start polling telegram bot API for updates\n while True:\n try:\n response = await session.post(bot_api_url + \"getUpdates\", json=json_body)\n json_response = await response.json()\n\n # check whether there is error in response\n if not json_response.get(\"ok\"):\n await asyncio.sleep(1)\n continue\n\n # check whether response has update\n if json_response.get(\"result\"):\n update_id = json_response.get(\"result\")[0].get(\"update_id\")\n from_id = json_response.get(\"result\")[0].get(\"message\").get(\"from\").get(\"id\")\n # if received message is text message, send it back\n text = json_response.get(\"result\")[0].get(\"message\").get(\"text\")\n if text:\n response = await session.post(bot_api_url + \"sendMessage\", json={\"chat_id\": from_id, \"text\": text})\n json_body[\"offset\"] = update_id + 1\n except asyncio.CancelledError:\n break\n await asyncio.sleep(0.1)\n\n await session.close()\n\n\n\n","repo_name":"maxim-pr/bot_management_service","sub_path":"services/bot_service.py","file_name":"bot_service.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"14550878219","text":"import numpy as np\nimport xlrd\nimport os\n\nworkbook1 = xlrd.open_workbook('raw_datas/TPD_training_samples_lable V20181016.xlsx')\nbooksheet1 = workbook1.sheet_by_index(0)\nworkbook2 = xlrd.open_workbook('raw_datas/TPD_IPX0001444002.xlsx')\nbooksheet2 = workbook2.sheet_by_index(0) \n\n#用字典的形式保存excel表格中的 ID:样本名\ndics1 = {}\nrows = booksheet1.nrows\nfor i in range(1,rows):\n\tkey_ = booksheet1.cell_value(i,3)\n\tcell_ = booksheet1.cell_value(i,2)\n\tif cell_[:2] == \"Mo\":\n\t\tcontinue\n\tif cell_[:2] == \"po\":\n\t\tcontinue\n\tif cell_[:1] == \"N\":\n\t\tcontinue\n\tif cell_ == \"\":\n\t\tcontinue\n\tdics1[key_] = cell_ \n#对应的文件去重\ndics1 = {value:key for key,value in dics1.items()}\n\n#去除表格与文件无法对应的样本\nff1 = os.listdir(\"raw_datas/TPD1XICS\")\nkks = []\nfor i in dics1.values():\n\tfs = i+\"out.txt\"\n\tif fs not in ff1:\n\t\tkk = list(dics1.keys())[list(dics1.values()).index(i)]\n\t\tkks.append(kk)\nfor j in kks:\n\tdel dics1[j]\n######\ndics2 = {}\nrows = booksheet2.nrows\nfor i in range(1,rows):\n\tkey_ = booksheet2.cell_value(i,2)\n\tif key_ == \"\":\n\t\tcontinue\n\tcell_ = booksheet2.cell_value(i,1)\n\tdics2[key_] = cell_\ndics2 = {value:key for key,value in dics2.items()}\n\nff2 = os.listdir(\"raw_datas/TPD2XICS\")\nkks = []\nfor i in dics2.values():\n\tfs = i+\"out.txt\"\n\tif fs not in ff2:\n\t\tkk = list(dics2.keys())[list(dics2.values()).index(i)]\n\t\tkks.append(kk)\nfor j in kks:\n\tdel dics2[j]\n\n#以字典的形式保存 病例号:[该病例号的样本]\ndata1 = {}\nfor d in dics1.keys():\n\tif d[:-1] in data1.keys():\n\t\tdata1[d[:-1]].append(d)\n\telse:\n\t\tdata1[d[:-1]] = []\n\t\tdata1[d[:-1]].append(d) #357个病例\n\ndata2 = {}\nfor d in dics2.keys():\n\tif d[:-1] in data2.keys():\n\t\tdata2[d[:-1]].append(d)\n\telse:\n\t\tdata2[d[:-1]] = []\n\t\tdata2[d[:-1]].append(d) #180个病例\n\n'''\n#求两批样本中的样本数量\nnn = 0\nfor i in data1.keys():\n nn += len(data1[i])#905\n\nmm = 0\nfor i in data2.keys():\n mm += len(data2[i])#536\n'''\ndef class_key(data,key):\n\tdc = []\n\tfor i in data:\n\t\tif i[0] == key:\n\t\t\tdc.append(i)\n\treturn dc\n\ndef splits(datas,n):\n\tdas = []\n\tda2 = np.array(datas)\n\tfor i in range(9):\n\t\tindex_1=np.random.choice(da2.shape[0],n,replace=False)\n\t\tda1=da2[index_1].tolist()\n\t\tindex_2=np.arange(da2.shape[0])\n\t\tindex_2=np.delete(index_2,index_1)\n\t\tda2=da2[index_2]\n\t\tdas.append(da1)\n\tda2 = da2.tolist()\n\tif len(da2) <= n:\n\t\tdas.append(da2)\n\tif len(da2) > n:\n\t\tdas.append(da2[:n])\n\t\tdel da2[:n]\n\t\tfor i,j in enumerate(da2):\n\t\t\tdas[i].append(j)\n\treturn das\n\ndef adds(dd,ti):\n\tif ti == 1:\n\t\tdata = data1\n\t\tdics = dics1\n\tif ti == 2:\n\t\tdata = data2\n\t\tdics = dics2\n\tddd = []\n\tfor n in data.keys():\n\t\tif n in dd:\n\t\t\tfor m in data[n]:\n\t\t\t\tddd.append(m)\n\tf = []\n\tfor i in ddd:\n\t\tif dics[i] == \"\":\n\t\t\tcontinue\n\t\telse:\n\t\t\tf.append(dics[i]+\"out.txt\")\n\treturn f\n\ndef split_class(dm,ks,x):\n\tdas1 = splits(dm,int(len(dm)/10))\n\tdd1 = das1[ks]\n\tdds2 = das1[:ks]+das1[ks+1:]\n\tdd2 = []\t\n\tfor i in dds2:\n\t\tdd2 = dd2 + i\n\tf1 = adds(dd1,x)\n\tf2 = adds(dd2,x)\n\treturn f1,f2\n\ndef split_cross(ks):\n\tdm1 = class_key(data1,\"M\")\n\tdm2 = class_key(data2,\"M\")\n\tda1 = class_key(data1,\"A\")\n\tda2 = class_key(data2,\"A\")\n\tdc1 = class_key(data1,\"C\")\n\tdc2 = class_key(data2,\"C\")\n\tdp1 = class_key(data1,\"P\")\n\tdp2 = class_key(data2,\"P\")\n\n\tfm1,fm1s = split_class(dm1,ks,1)\n\tfm2,fm2s = split_class(dm2,ks,2) \n\tfa1,fa1s = split_class(da1,ks,1)\n\tfa2,fa2s = split_class(da2,ks,2)\n\tfc1,fc1s = split_class(dc1,ks,1)\n\tfc2,fc2s = split_class(dc2,ks,2)\n\tfp1,fp1s = split_class(dp1,ks,1)\n\tfp2,fp2s = split_class(dp2,ks,2)\n\n\tf2 = fm1s + fa1s + fc1s + fp1s #808\n\tf1 = fm1 + fa1 + fc1 + fp1 #97\n\tf4 = fm2s + fa2s + fc2s + fp2s #479\n\tf3 = fm2 + fa2 + fc2 + fp2 #57\n\n\t#少了几个数据\n\tnp.save(\"save_npy/file1.npy\",f1)\n\tnp.save(\"save_npy/file2.npy\",f2)\n\tnp.save(\"save_npy/file3.npy\",f3)\n\tnp.save(\"save_npy/file4.npy\",f4)\n\tprint(\"cross-\",ks,\"(样本划分):\",len(f1),len(f2),len(f3),len(f4))\n\n","repo_name":"MonkeyDong/xic--deep-tensorflow","sub_path":"cross_mean.py","file_name":"cross_mean.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"37218835784","text":"from collections import defaultdict\n\nfrom scipy import spatial\nimport numpy as np\nimport torch\n\nfrom collections import defaultdict\nfrom src.modules.lcfcn import lcfcn_loss\nfrom scipy import spatial\nimport numpy as np\nimport torch\n\nfrom collections import defaultdict\n\nfrom scipy import spatial\nimport numpy as np\nimport torch\n\n\nclass SegMeter:\n def __init__(self, split):\n self.cf = None\n self.n_samples = 0\n self.split = split\n self.ae = 0\n self.game = 0\n\n def val_on_batch(self, model, batch):\n masks = batch[\"masks\"].squeeze()\n self.n_samples += batch['images'].shape[0]\n pred_mask = model.predict_on_batch(batch).squeeze()\n\n # counts\n blobs = lcfcn_loss.get_blobs(pred_mask)\n points = lcfcn_loss.blobs2points(blobs)\n pred_counts = float(points.sum())\n self.ae += np.abs(float((batch['points']==1).sum()) - pred_counts)\n\n gt_points = batch['points'].squeeze().clone()\n gt_points[gt_points!=1] = 0\n self.game += lcfcn_loss.compute_game(pred_points=points.squeeze(), \n gt_points=gt_points, L=3)\n # \n # print(masks.sum())\n ind = masks != 255\n masks = masks[ind]\n pred_mask = pred_mask[ind]\n\n \n\n labels = np.arange(model.n_classes)\n cf = confusion_multi_class(torch.as_tensor(pred_mask).float().cuda(), masks.cuda().float(),\n labels=labels)\n \n \n if self.cf is None:\n self.cf = cf \n else:\n self.cf += cf\n\n def get_avg_score(self):\n # return -1 \n Inter = np.diag(self.cf)\n G = self.cf.sum(axis=1)\n P = self.cf.sum(axis=0)\n union = G + P - Inter\n\n nz = union != 0\n iou = Inter / np.maximum(union, 1)\n mIoU = np.mean(iou[nz])\n iou[~nz] = np.nan\n val_dict = {'%s_score' % self.split: mIoU}\n for c in range(self.cf.shape[1]):\n val_dict['%s_class%d' % (self.split, c)] = iou[c]\n val_dict['%s_mae' % (self.split)] = self.ae / self.n_samples\n val_dict['%s_game' % (self.split)] = self.game / self.n_samples\n return val_dict\n\n\n\ndef confusion_multi_class(prediction, truth, labels):\n \"\"\"\n cf = confusion_matrix(y_true=prediction.cpu().numpy().ravel(),\n y_pred=truth.cpu().numpy().ravel(),\n labels=labels)\n \"\"\"\n nclasses = labels.max() + 1\n cf2 = torch.zeros(nclasses, nclasses, dtype=torch.float, device=prediction.device)\n prediction = prediction.view(-1).long()\n truth = truth.view(-1)\n to_one_hot = torch.eye(int(nclasses), dtype=cf2.dtype, device=prediction.device)\n for c in range(nclasses):\n true_mask = (truth == c)\n pred_one_hot = to_one_hot[prediction[true_mask]].sum(0)\n cf2[:, c] = pred_one_hot\n\n return cf2.cpu().numpy()\n\n\ndef confusion_binary_class(prediction, truth):\n confusion_vector = prediction / truth\n\n tp = torch.sum(confusion_vector == 1).item()\n fp = torch.sum(confusion_vector == float('inf')).item()\n tn = torch.sum(torch.isnan(confusion_vector)).item()\n fn = torch.sum(confusion_vector == 0).item()\n cm = np.array([[tn,fp],\n [fn,tp]])\n return cm\n\n\n\nclass SegMeterBinary:\n def __init__(self, split):\n self.cf = None\n self.struct_list = []\n self.split = split\n\n def val_on_batch(self, model, batch):\n masks_org = batch[\"masks\"]\n\n pred_mask_org = model.predict_on_batch(batch)\n ind = masks_org != 255\n masks = masks_org[ind]\n pred_mask = pred_mask_org[ind]\n self.n_classes = model.n_classes\n if model.n_classes == 1:\n cf = confusion_binary_class(torch.as_tensor(pred_mask).float().cuda(), masks.cuda().float())\n else:\n labels = np.arange(model.n_classes)\n cf = confusion_multi_class(torch.as_tensor(pred_mask).float().cuda(), masks.cuda().float(),\n labels=labels)\n\n if self.cf is None:\n self.cf = cf\n else:\n self.cf += cf\n\n # structure\n struct_score = float(struct_metric.compute_struct_metric(pred_mask_org, masks_org))\n self.struct_list += [struct_score]\n\n def get_avg_score(self):\n TP = np.diag(self.cf)\n TP_FP = self.cf.sum(axis=1)\n TP_FN = self.cf.sum(axis=0)\n TN = TP[::-1]\n \n\n FP = TP_FP - TP\n FN = TP_FN - TP\n\n iou = TP / (TP + FP + FN)\n dice = 2*TP / (FP + FN + 2*TP)\n\n iou[np.isnan(iou)] = -1\n dice[np.isnan(dice)] = -1\n\n mDice = np.mean(dice)\n mIoU = np.mean(iou)\n\n prec = TP / (TP + FP)\n recall = TP / (TP + FN)\n spec = TN/(TN+FP)\n fscore = (( 2.0 * prec * recall ) / (prec + recall))\n\n val_dict = {}\n if self.n_classes == 1:\n val_dict['%s_dice' % self.split] = dice[0]\n val_dict['%s_iou' % self.split] = iou[0]\n\n val_dict['%s_prec' % self.split] = prec[0]\n val_dict['%s_recall' % self.split] = recall[0]\n val_dict['%s_spec' % self.split] = spec[0]\n val_dict['%s_fscore' % self.split] = fscore[0]\n\n val_dict['%s_score' % self.split] = dice[0]\n val_dict['%s_struct' % self.split] = np.mean(self.struct_list)\n return val_dict\n\n# def confusion_multi_class(prediction, truth, labels):\n# \"\"\"\n# cf = confusion_matrix(y_true=prediction.cpu().numpy().ravel(),\n# y_pred=truth.cpu().numpy().ravel(),\n# labels=labels)\n# \"\"\"\n# nclasses = labels.max() + 1\n# cf2 = torch.zeros(nclasses, nclasses, dtype=torch.float,\n# device=prediction.device)\n# prediction = prediction.view(-1).long()\n# truth = truth.view(-1)\n# to_one_hot = torch.eye(int(nclasses), dtype=cf2.dtype,\n# device=prediction.device)\n# for c in range(nclasses):\n# true_mask = (truth == c)\n# pred_one_hot = to_one_hot[prediction[true_mask]].sum(0)\n# cf2[:, c] = pred_one_hot\n\n# return cf2.cpu().numpy()\n\n\n\ndef confusion_binary_class(pred_mask, gt_mask):\n intersect = pred_mask.bool() & gt_mask.bool()\n\n fp_tp = (pred_mask ==1).sum().item()\n fn_tp = gt_mask.sum().item()\n tn_fn = (pred_mask ==0).sum().item()\n\n tp = (intersect == 1).sum().item()\n fp = fp_tp - tp\n fn = fn_tp - tp\n tn = tn_fn - fn \n\n cm = np.array([[tp, fp],\n [fn, tn]])\n return cm","repo_name":"IssamLaradji/affinity_lcfcn","sub_path":"src/models/metrics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"}
+{"seq_id":"8950228274","text":"import datetime\nimport re\n\nimport dateutil.parser\nimport pytz\n\nmonth_names = [\"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jul\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\"]\nday_of_week_names = [\"sun\", \"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\"]\nmonth_names_re = re.compile(rf\"(? 3:\n parts[3] = re.sub(\n month_names_re,\n lambda m: str(month_names.index(m.group().lower()) + 1),\n parts[3]\n )\n if len(parts) > 4:\n parts[4] = re.sub(\n day_of_week_names_re,\n lambda m: str(day_of_week_names.index(m.group().lower())),\n parts[4]\n )\n return \" \".join(parts)","repo_name":"vcoder4c/cron-validator","sub_path":"cron_validator/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"}
+{"seq_id":"40071128862","text":"\"\"\"\nConfiguration for docs\n\"\"\"\n\nsource_link = \"https://github.com/YefriTavarez/fimax\"\ndocs_base_url = \"https://yefritavarez.github.io/fimax/\"\nheadline = \"An application for your finance management\"\nsub_heading = \"Let FiMax help you with your business\"\n\ndef get_context(context):\n\tcontext.brand_html = \"FIMAX\"\n","repo_name":"YefriTavarez/fimax","sub_path":"fimax/config/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"}
+{"seq_id":"7278126969","text":"import youtube_dl\nfrom dotenv import load_dotenv\nfrom google.cloud import speech\nfrom google.cloud import storage\nfrom sumy.nlp.stemmers import Stemmer\nfrom sumy.nlp.tokenizers import Tokenizer\nfrom sumy.parsers.plaintext import PlaintextParser\nfrom sumy.summarizers.lsa import LsaSummarizer as Summarizer\nfrom sumy.utils import get_stop_words\nimport os\n\nLANGUAGE = \"english\"\nSENTENCES_COUNT = 10\nbucket_name = 'hoohacks2021'\nprint(bucket_name)\ngs_uri_prefix = f\"gs://{bucket_name}\"\nprint(gs_uri_prefix)\n\nconfig = \"/Users/robertbao/Documents/GitHub/HooHacks2021/Backend/config.json\"\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = config\n\n\ndef download_video(video_id):\n ydl_opts = {\n 'outtmpl': '%(title)s?.',\n 'format':\n 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'flac',\n 'preferredquality': '192'\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n uri = 'http://www.youtube.com/watch?v={0}'.format(video_id)\n ydl.download([uri])\n meta = ydl.extract_info(uri, download=False)\n return meta['title']\n\n\ndef transcribe_file(input_language, mode, path, bucket_name):\n \"\"\"Asynchronously transcribes the audio file specified.\"\"\"\n\n if input_language == 'fr':\n language = 'fr-FR'\n else:\n language = 'en-US'\n\n client = speech.SpeechClient()\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.FLAC,\n language_code=\"en-US\",\n enable_word_time_offsets=True,\n model='video',\n audio_channel_count=2,\n enable_automatic_punctuation=True,\n )\n\n # file on GCS\n if mode == \"gcs\":\n audio = speech.RecognitionAudio(uri=f\"{gs_uri_prefix}/{path}\")\n # local file\n else:\n with open(path, \"rb\") as audio_file:\n content = audio_file.read()\n audio = speech.RecognitionAudio(content=content)\n\n operation = client.long_running_recognize(config=config, audio=audio)\n\n print(\"Waiting for operation to complete...\")\n response = operation.result(timeout=90)\n\n # Each result is for a consecutive portion of the audio. Iterate through\n # them to get the transcripts for the entire audio file.\n content = \"\"\n for result in response.results:\n # The first alternative is the most likely one for this portion.\n print(u\"Transcript: {}\".format(result.alternatives[0].transcript))\n print(\"Confidence: {}\".format(result.alternatives[0].confidence))\n content += result.alternatives[0].transcript\n content += \"\\n\"\n\n return content\n\n\ndef upload_to_bucket(blob_name):\n storage_client = storage.Client.from_service_account_json(config)\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.upload_from_filename(blob_name)\n\n return blob.public_url\n\n\ndef get_summary(video_id: str):\n summary = \"\"\n load_dotenv()\n filename = download_video(video_id)\n filename = filename\n bucket_audio = \"{0}.flac\".format(filename)\n upload_to_bucket(bucket_audio)\n script = transcribe_file(\"en\", \"gcs\", bucket_audio, gs_uri_prefix)\n\n parser = PlaintextParser.from_string(script, Tokenizer(LANGUAGE))\n stemmer = Stemmer(LANGUAGE)\n\n summarizer = Summarizer(stemmer)\n summarizer.stop_words = get_stop_words(LANGUAGE)\n\n for sentence in summarizer(parser.document, SENTENCES_COUNT):\n summary = summary + str(sentence) + \" \"\n\n return [filename, summary]\n\n\nif __name__ == \"__main__\":\n r = get_summary('Zv5Qa2kGL04')\n print(r[0])\n print(r[1])\n","repo_name":"Goutham888/HooHacks2021","sub_path":"Backend/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"21308771601","text":"from flask import Flask, render_template, request,redirect,session\r\nfrom flask import redirect, url_for, session\r\nfrom flask import session, request\r\nimport sqlite3\r\nfrom datetime import datetime\r\nfrom flask_session import Session\r\nfrom flask import *\r\n\r\napp = Flask(__name__)\r\napp = Flask(__name__, template_folder= \"templates\")\r\n\r\napp.config[\"SESSION_PERMANENT\"] = True\r\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\r\nSession(app)\r\n\r\n#Global Variables\r\n\r\nGlobal_hid=0\r\nGlobal_did=0\r\ndoctor_hid=0\r\n\r\n# Note: Sessions have been implemented thus allowing access to only the logged in hospital admins/doctors . \r\n# If you are not logged in, you will be redirected to the homepage automatically and displayed an error message.\r\n\r\n#-------Home Page-----------\r\n@app.route('/')\r\ndef greeting():\r\n return render_template(\"Homepage.html\")\r\n\r\n#-------Registering an Hospital-------\r\n@app.route('/register_hospital', methods=['GET','POST'])\r\ndef register_hospital():\r\n if request.method == 'POST':\r\n hid = request.form['hid']\r\n name = request.form['name']\r\n pswd= request.form['pswd']\r\n\r\n try:\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n \r\n print (\"Executing the DML\")\r\n cursor.execute(\"INSERT into hospital (Hospital_ID,Password,Name) values (?,?,?)\",(hid,pswd,name)) \r\n \r\n print (\"Commiting the changes\")\r\n connection.commit()\r\n\r\n print (\"Closing the datbase\")\r\n connection.close()\r\n\r\n return redirect(url_for('greeting'))\r\n\r\n except Exception as error:\r\n return_message = str(error)\r\n return(return_message)\r\n else:\r\n return render_template(\"register_hospital.html\")\r\n\r\n#-------Hospital Log in---------\r\n@app.route('/login',methods=['GET','POST'])\r\ndef login():\r\n if request.method == 'POST':\r\n global Global_hid\r\n Global_hid = request.form['hid']\r\n pswd= request.form['pswd']\r\n\r\n try:\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n \r\n print (\"Executing the DML\")\r\n cursor.execute(\"SELECT * from hospital where Hospital_ID=?\",(Global_hid,)) \r\n\r\n print (\"Get the Rows from cursor\")\r\n information = cursor.fetchall() \r\n print(information)\r\n\r\n print (\"Closing the database\")\r\n connection.close()\r\n\r\n if (pswd==information[0][1]) :\r\n session['hospital']= Global_hid\r\n return redirect(url_for('options'))\r\n else:\r\n return('Incorrect username/password! Please try again')\r\n\r\n except Exception as error:\r\n return_message = str(error)\r\n return(return_message)\r\n else:\r\n return render_template(\"login_hospital.html\")\r\n\r\n#-------Logging out Doctor-----\r\n@app.route('/logoutdoc')\r\ndef logout_doc():\r\n session['doctor']= None\r\n return redirect(url_for('greeting'))\r\n\r\n#-------Logging out Hospital-----\r\n@app.route('/logouthospital')\r\ndef logout_hospital():\r\n session['hospital']= None\r\n return redirect(url_for('greeting'))\r\n\r\n#--------Viewing the Options---------\r\n@app.route('/options',methods=['GET','POST'])\r\ndef options():\r\n if not session.get(\"hospital\"):\r\n flash('You are not logged in as the admin')\r\n return redirect(url_for('greeting'))\r\n\r\n return render_template('options.html')\r\n\r\n#--------Registering a new doctor-----\r\n@app.route('/register_doctor',methods=['GET','POST'])\r\ndef register_doctor():\r\n if not session.get(\"hospital\"):\r\n flash('You are not logged in as the admin')\r\n return redirect(url_for('greeting'))\r\n if request.method == 'POST':\r\n\r\n hid = request.form['hid']\r\n did = request.form['did']\r\n name = request.form['name']\r\n gender = request.form['gender']\r\n qual = request.form['qual']\r\n about = request.form['about']\r\n contact = request.form['contact']\r\n stime = request.form['stime']\r\n etime = request.form['etime']\r\n pswd= request.form['pswd']\r\n\r\n try:\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n \r\n print (\"Executing the DML\")\r\n cursor.execute(\"INSERT into doctor (Doc_ID,Name,Gender,Qualification,About,Contact,Start_time,End_time,Password,Hospital_ID) values (?,?,?,?,?,?,?,?,?,?)\",(did,name,gender,qual,about,contact,stime,etime,pswd,hid))\r\n \r\n print (\"Commiting the changes\")\r\n connection.commit()\r\n\r\n print (\"Closing the datbase\")\r\n connection.close()\r\n\r\n return redirect(url_for('options'))\r\n\r\n except Exception as error:\r\n return_message = str(error)\r\n return(return_message)\r\n else:\r\n return render_template(\"register_doctor.html\")\r\n\r\n#--------Doctor's Login------\r\n@app.route('/doctorlogin',methods=['GET','POST'])\r\ndef doctor_login():\r\n global doctor_hid\r\n global Global_did\r\n if request.method == 'POST':\r\n Global_did = request.form['did']\r\n doctor_hid = request.form['hid']\r\n pswd= request.form['pswd']\r\n\r\n try:\r\n\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n \r\n print (\"Executing the DML\")\r\n cursor.execute(\"SELECT Name,Password from doctor where Hospital_ID=? AND Doc_ID=?\",(doctor_hid,Global_did,)) \r\n\r\n print (\"Get the Rows from cursor\")\r\n information = cursor.fetchall() \r\n print(information)\r\n\r\n print (\"Closing the database\")\r\n connection.close()\r\n\r\n if (pswd==information[0][1]) :\r\n session['doctor']= Global_did\r\n return redirect(url_for('doctor_view'))\r\n\r\n else:\r\n return ('Incorrect information entered! Try again')\r\n\r\n except Exception as error:\r\n return_message = str(error)\r\n return(return_message)\r\n else:\r\n return render_template(\"doctor_login.html\")\r\n\r\n#--------Doctor's View--------\r\n@app.route('/doctorview',methods=['GET','POST'])\r\ndef doctor_view():\r\n if not session.get(\"doctor\"):\r\n flash('You are not logged in as the doctor')\r\n return redirect(url_for('greeting'))\r\n global doctor_hid\r\n global Global_did\r\n try:\r\n #Getting casual patients\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n \r\n print (\"Executing the DML\")\r\n cursor.execute(\"SELECT P_ID,Name,Gender,Age FROM patients WHERE Doctor_id=? AND Emergency=0 And Hospital_ID=?\",(Global_did,doctor_hid))\r\n\r\n\r\n print (\"Get the Rows from cursor\")\r\n casual_patients = cursor.fetchall() \r\n\r\n print (\"Closing the database\")\r\n connection.close()\r\n\r\n #Getting Emergency patients\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n\r\n print (\"Executing the DML\")\r\n cursor.execute(\"SELECT P_ID,Name,Gender,Age FROM patients WHERE Doctor_id=? AND Emergency=1 And Hospital_ID=?\",(Global_did,doctor_hid,))\r\n\r\n print (\"Get the Rows from cursor\")\r\n emergency_patients = cursor.fetchall() \r\n\r\n print (\"Closing the database\")\r\n connection.close()\r\n\r\n #Getting doctor's name\r\n print (\"making a connection\")\r\n connection = sqlite3.connect('hospital_db.db')\r\n\r\n print (\"Getting a Cursor\")\r\n cursor = connection.cursor()\r\n \r\n print (\"Executing the DML\")\r\n cursor.execute(\"SELECT Name FROM doctor WHERE Doc_ID=? AND Hospital_ID=?\",(Global_did,doctor_hid,))\r\n\r\n\r\n print (\"Get the Rows from cursor\")\r\n docname = cursor.fetchall() \r\n\r\n print (\"Closing the database\")\r\n connection.close()\r\n\r\n print(casual_patients)\r\n\r\n except Exception as error:\r\n return_message = str(error)\r\n return(return_message)\r\n\r\n return render_template('doctor_view.html',casual =casual_patients, emergency=emergency_patients,docname=docname)\r\n\r\n#--------Dismissing a patient----\r\n@app.route(\"/dismiss/